language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/fx/test_fx_param_shape_control_flow.py | {
"start": 2057,
"end": 5146
} | class ____(TestCase):
def verify_mm_relu_mods(self, mm_only_mod, relu_mod):
"""
Verify one module only does a mm op while the other
performs both mm and relu ops in cascade
"""
x = torch.randn(10, 5)
torch.testing.assert_close(
mm_only_mod(x), torch.mm(x, mm_only_mod.get_mul_matrix())
)
tracer = torch.fx.Tracer(param_shapes_constant=True)
traced_graph = tracer.trace(mm_only_mod)
# verify the graph module calculates the same result
graph_mod_mm = torch.fx.GraphModule(mm_only_mod, traced_graph)
torch.testing.assert_close(
graph_mod_mm(x), torch.mm(x, mm_only_mod.get_mul_matrix())
)
# Make a new module with different parameter shape to go down the different
# code path
x = torch.randn(10, 15)
torch.testing.assert_close(
relu_mod(x), torch.relu(torch.mm(x, relu_mod.get_mul_matrix()))
)
tracer2 = torch.fx.Tracer(param_shapes_constant=True)
traced_graph2 = tracer2.trace(relu_mod)
# verify the graph module calculates the same result
graph_mod_relu = torch.fx.GraphModule(relu_mod, traced_graph2)
torch.testing.assert_close(
graph_mod_relu(x), torch.relu(torch.mm(x, relu_mod.get_mul_matrix()))
)
graph1_node_targets = [n.target for n in traced_graph.nodes]
graph2_node_targets = [n.target for n in traced_graph2.nodes]
# the second graph has an extra relu function call node
assert torch.mm in graph1_node_targets and torch.mm in graph2_node_targets
assert (
torch.relu not in graph1_node_targets and torch.relu in graph2_node_targets
)
def test_param_shape_const(self):
mymod = MyModuleParamShape(in_channels=5)
mymod2 = MyModuleParamShape(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_size_const(self):
mymod = MyModuleParamSize(in_channels=5)
mymod2 = MyModuleParamSize(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_dim_const(self):
mymod = MyModuleParamDim(torch.nn.Parameter(torch.randn(2, 5, 3)))
mymod2 = MyModuleParamDim(torch.nn.Parameter(torch.randn(15, 3)))
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_ndim_const(self):
mymod = MyModuleParamNDim(torch.nn.Parameter(torch.randn(2, 5, 3)))
mymod2 = MyModuleParamNDim(torch.nn.Parameter(torch.randn(15, 3)))
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_numel_const(self):
mymod = MyModuleParamNumEl(in_channels=5)
mymod2 = MyModuleParamNumEl(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_nelement_const(self):
mymod = MyModuleParamNElement(in_channels=5)
mymod2 = MyModuleParamNElement(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
if __name__ == "__main__":
raise_on_run_directly("test/test_fx.py")
| TestConstParamShapeInControlFlow |
python | zarr-developers__zarr-python | src/zarr/core/sync.py | {
"start": 881,
"end": 6323
} | class ____(Exception):
pass
def _get_lock() -> threading.Lock:
"""Allocate or return a threading lock.
The lock is allocated on first use to allow setting one lock per forked process.
"""
global _lock
if not _lock:
_lock = threading.Lock()
return _lock
def _get_executor() -> ThreadPoolExecutor:
"""Return Zarr Thread Pool Executor
The executor is allocated on first use.
"""
global _executor
if not _executor:
max_workers = config.get("threading.max_workers", None)
logger.debug("Creating Zarr ThreadPoolExecutor with max_workers=%s", max_workers)
_executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="zarr_pool")
_get_loop().set_default_executor(_executor)
return _executor
def cleanup_resources() -> None:
global _executor
if _executor:
_executor.shutdown(wait=True, cancel_futures=True)
_executor = None
if loop[0] is not None:
with _get_lock():
# Stop the event loop safely
loop[0].call_soon_threadsafe(loop[0].stop) # Stop loop from another thread
if iothread[0] is not None:
iothread[0].join(timeout=0.2) # Add a timeout to avoid hanging
if iothread[0].is_alive():
logger.warning(
"Thread did not finish cleanly; forcefully closing the event loop."
)
# Forcefully close the event loop to release resources
loop[0].close()
# dereference the loop and iothread
loop[0] = None
iothread[0] = None
atexit.register(cleanup_resources)
def reset_resources_after_fork() -> None:
"""
Ensure that global resources are reset after a fork. Without this function,
forked processes will retain invalid references to the parent process's resources.
"""
global loop, iothread, _executor
# These lines are excluded from coverage because this function only runs in a child process,
# which is not observed by the test coverage instrumentation. Despite the apparent lack of
# test coverage, this function should be adequately tested by any test that uses Zarr IO with
# multiprocessing.
loop[0] = None # pragma: no cover
iothread[0] = None # pragma: no cover
_executor = None # pragma: no cover
# this is only available on certain operating systems
if hasattr(os, "register_at_fork"):
os.register_at_fork(after_in_child=reset_resources_after_fork)
async def _runner(coro: Coroutine[Any, Any, T]) -> T | BaseException:
"""
Await a coroutine and return the result of running it. If awaiting the coroutine raises an
exception, the exception will be returned.
"""
try:
return await coro
except Exception as ex:
return ex
def sync(
coro: Coroutine[Any, Any, T],
loop: asyncio.AbstractEventLoop | None = None,
timeout: float | None = None,
) -> T:
"""
Make loop run coroutine until it returns. Runs in other thread
"""
if loop is None:
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
loop = _get_loop()
if _executor is None and config.get("threading.max_workers", None) is not None:
# trigger executor creation and attach to loop
_ = _get_executor()
if not isinstance(loop, asyncio.AbstractEventLoop):
raise TypeError(f"loop cannot be of type {type(loop)}")
if loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = asyncio.events.get_running_loop()
if loop0 is loop:
raise SyncError("Calling sync() from within a running loop")
except RuntimeError:
pass
future = asyncio.run_coroutine_threadsafe(_runner(coro), loop)
finished, unfinished = wait([future], return_when=asyncio.ALL_COMPLETED, timeout=timeout)
if len(unfinished) > 0:
raise TimeoutError(f"Coroutine {coro} failed to finish within {timeout} s")
assert len(finished) == 1
return_result = next(iter(finished)).result()
if isinstance(return_result, BaseException):
raise return_result
else:
return return_result
def _get_loop() -> asyncio.AbstractEventLoop:
"""Create or return the default fsspec IO loop
The loop will be running on a separate thread.
"""
if loop[0] is None:
with _get_lock():
# repeat the check just in case the loop got filled between the
# previous two calls from another thread
if loop[0] is None:
logger.debug("Creating Zarr event loop")
new_loop = asyncio.new_event_loop()
loop[0] = new_loop
iothread[0] = threading.Thread(target=new_loop.run_forever, name="zarr_io")
assert iothread[0] is not None
iothread[0].daemon = True
iothread[0].start()
assert loop[0] is not None
return loop[0]
async def _collect_aiterator(data: AsyncIterator[T]) -> tuple[T, ...]:
"""
Collect an entire async iterator into a tuple
"""
result = [x async for x in data]
return tuple(result)
def collect_aiterator(data: AsyncIterator[T]) -> tuple[T, ...]:
"""
Synchronously collect an entire async iterator into a tuple.
"""
return sync(_collect_aiterator(data))
| SyncError |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 23731,
"end": 24386
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
# The fields are mutable but the payload is always manipulated
# by reference. This scheme allows mutations of an array to
# be seen by its iterators.
members = [
('size', types.intp),
('allocated', types.intp),
# This member is only used only for reflected lists
('dirty', types.boolean),
# Actually an inlined var-sized array
('data', fe_type.container.dtype),
]
super(ListPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.List)
| ListPayloadModel |
python | pallets__jinja | tests/test_security.py | {
"start": 5678,
"end": 7078
} | class ____:
def test_basic_format_safety(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{x.__class__}b".format_map({"x":42}) }}')
assert t.render() == "ab"
def test_basic_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string('{{ "a{x.foo}b".format_map({"x":{"foo": 42}}) }}')
assert t.render() == "a42b"
def test_safe_format_all_okay(self):
env = SandboxedEnvironment()
t = env.from_string(
'{{ ("a{x.foo}b{y}"|safe).format_map({"x":{"foo": 42}, "y":"<foo>"}) }}'
)
assert t.render() == "a42b<foo>"
def test_indirect_call(self):
def run(value, arg):
return value.run(arg)
env = SandboxedEnvironment()
env.filters["run"] = run
t = env.from_string(
"""{% set
ns = namespace(run="{0.__call__.__builtins__[__import__]}".format)
%}
{{ ns | run(not_here) }}
"""
)
with pytest.raises(SecurityError):
t.render()
def test_attr_filter(self) -> None:
env = SandboxedEnvironment()
t = env.from_string(
"""{{ "{0.__call__.__builtins__[__import__]}"
| attr("format")(not_here) }}"""
)
with pytest.raises(SecurityError):
t.render()
| TestStringFormatMap |
python | django-import-export__django-import-export | tests/core/tests/test_mixins.py | {
"start": 14503,
"end": 15170
} | class ____(TestCase):
class TestExportMixin(admin.ExportMixin):
def __init__(self, export_form) -> None:
super().__init__()
self.export_form = export_form
def get_export_form(self):
return self.export_form
class TestExportForm(forms.ExportForm):
pass
def test_get_export_form(self):
m = admin.ExportMixin()
self.assertEqual(admin.ExportMixin.export_form_class, m.get_export_form_class())
def test_get_export_form_with_custom_form(self):
m = self.TestExportMixin(self.TestExportForm)
self.assertEqual(self.TestExportForm, m.get_export_form())
| ExportMixinTest |
python | coleifer__peewee | tests/postgres_helpers.py | {
"start": 6013,
"end": 14855
} | class ____(BaseJsonFieldTestCase):
def _create_test_data(self):
data = [
{'k1': 'v1', 'k2': 'v2', 'k3': {'k4': ['i1', 'i2'], 'k5': {}}},
['a1', 'a2', {'a3': 'a4'}],
{'a1': 'x1', 'a2': 'x2', 'k4': ['i1', 'i2']},
list(range(10)),
list(range(5, 15)),
['k4', 'k1']]
self._bjson_objects = []
for json_value in data:
self._bjson_objects.append(self.M.create(data=json_value))
def assertObjects(self, expr, *indexes):
query = (self.M
.select()
.where(expr)
.order_by(self.M.id))
self.assertEqual(
[bjson.data for bjson in query],
[self._bjson_objects[index].data for index in indexes])
def test_contained_by(self):
self._create_test_data()
item1 = ['a1', 'a2', {'a3': 'a4'}, 'a5']
self.assertObjects(self.M.data.contained_by(item1), 1)
item2 = {'a1': 'x1', 'a2': 'x2', 'k4': ['i0', 'i1', 'i2'], 'x': 'y'}
self.assertObjects(self.M.data.contained_by(item2), 2)
def test_equality(self):
data = {'k1': ['a1', 'a2'], 'k2': {'k3': 'v3'}}
j = self.M.create(data=data)
j_db = self.M.get(self.M.data == data)
self.assertEqual(j.id, j_db.id)
def test_subscript_contains(self):
self._create_test_data()
D = self.M.data
# 'k3' is mapped to another dictioary {'k4': [...]}. Therefore,
# 'k3' is said to contain 'k4', but *not* ['k4'] or ['k4', 'k5'].
self.assertObjects(D['k3'].has_key('k4'), 0)
self.assertObjects(D['k3'].contains(['k4']))
self.assertObjects(D['k3'].contains(['k4', 'k5']))
# We can check for the keys this way, though.
self.assertObjects(D['k3'].contains_all('k4', 'k5'), 0)
self.assertObjects(D['k3'].contains_any('k4', 'kx'), 0)
# However, in test object index=2, 'k4' can be said to contain
# both 'i1' and ['i1'].
self.assertObjects(D['k4'].contains('i1'), 2)
self.assertObjects(D['k4'].contains(['i1']), 2)
# Interestingly, we can also specify the list of contained values
# out-of-order.
self.assertObjects(D['k4'].contains(['i2', 'i1']), 2)
# We can test whether an object contains another JSON object fragment.
self.assertObjects(D['k3'].contains({'k4': ['i1']}), 0)
self.assertObjects(D['k3'].contains({'k4': ['i1', 'i2']}), 0)
# Check multiple levels of nesting / containment.
self.assertObjects(D['k3']['k4'].contains('i2'), 0)
self.assertObjects(D['k3']['k4'].contains_all('i1', 'i2'), 0)
self.assertObjects(D['k3']['k4'].contains_all('i0', 'i2'))
self.assertObjects(D['k4'].contains_all('i1', 'i2'), 2)
# Check array indexes.
self.assertObjects(D[2].has_key('a3'), 1)
self.assertObjects(D[2].contains('a3'))
self.assertObjects(D[0].contains('a1'), 1)
self.assertObjects(D[0].contains('k1'))
def test_contains(self):
self._create_test_data()
D = self.M.data
# Test for keys. 'k4' is both an object key and an array element.
self.assertObjects(D.has_key('k4'), 2, 5)
self.assertObjects(D.has_key('a1'), 1, 2)
self.assertObjects(D.contains('a1'), 1)
self.assertObjects(D.has_key('k3'), 0)
# We can test for multiple top-level keys/indexes.
self.assertObjects(D.contains_all('a1', 'a2'), 1, 2)
# If we test for both with .contains(), though, it is treated as
# an object match.
self.assertObjects(D.contains(['a1', 'a2']), 1)
# Check numbers.
self.assertObjects(D.contains([2, 5, 6, 7, 8]), 3)
self.assertObjects(D.contains([5, 6, 7, 8, 9]), 3, 4)
# We can check for partial objects.
self.assertObjects(D.contains({'a1': 'x1'}), 2)
self.assertObjects(D.contains({'k3': {'k4': []}}), 0)
self.assertObjects(D.contains([{'a3': 'a4'}]), 1)
# Check for simple keys.
self.assertObjects(D.contains(['a1']), 1)
self.assertObjects(D.contains('a1'), 1)
self.assertObjects(D.contains('k3'))
# Contains any.
self.assertObjects(D.contains_any('a1', 'k1'), 0, 1, 2, 5)
self.assertObjects(D.contains_any('k4', 'xx', 'yy', '2'), 2, 5)
self.assertObjects(D.contains_any('i1', 'i2', 'a3'))
# Contains all.
self.assertObjects(D.contains_all('k1', 'k2', 'k3'), 0)
self.assertObjects(D.contains_all('k1', 'k2', 'k3', 'k4'))
# Has key.
self.assertObjects(D.has_key('a1'), 1, 2)
self.assertObjects(D.has_key('k1'), 0, 5)
self.assertObjects(D.has_key('k4'), 2, 5)
self.assertObjects(D.has_key('a3'))
self.assertObjects(D['k3'].has_key('k4'), 0)
self.assertObjects(D['k4'].has_key('i2'), 2)
def test_contains_contained_by(self):
samples = (
{'k1': 'v1', 'k2': 'v2'},
{'k1': 'v10'},
['i1', 'i2', 'i3', 'test'],
'k1',
123,
1.5,
True,
False)
pks = []
for sample in samples:
pks.append(self.M.create(data=sample).id)
for i, sample in enumerate(samples):
q = self.M.select().where(self.M.data.contains(sample))
self.assertEqual([x.id for x in q], [pks[i]])
q = self.M.select().where(self.M.data.contained_by(sample))
self.assertEqual([x.id for x in q], [pks[i]])
def test_concat_data(self):
self.M.delete().execute()
self.M.create(data={'k1': {'x1': 'y1'}, 'k2': 'v2', 'k3': [0, 1]})
def assertData(exp, expected_data):
query = self.M.select(self.M.data.concat(exp)).tuples()
data = query[:][0][0]
self.assertEqual(data, expected_data)
D = self.M.data
assertData({'k2': 'v2-x', 'k1': {'x2': 'y2'}, 'k4': 'v4'}, {
'k1': {'x2': 'y2'}, # NB: not merged/patched!!
'k2': 'v2-x',
'k3': [0, 1],
'k4': 'v4'})
assertData({'k1': 'v1-x', 'k3': [2, 3, 4], 'k4': {'x4': 'y4'}}, {
'k1': 'v1-x',
'k2': 'v2',
'k3': [2, 3, 4],
'k4': {'x4': 'y4'}})
# We can update sub-keys.
query = self.M.select(D['k1'].concat({'x2': 'y2', 'x3': 'y3'}))
self.assertEqual(query.tuples()[0][0],
{'x1': 'y1', 'x2': 'y2', 'x3': 'y3'})
# Concat can be used to extend JSON arrays.
query = self.M.select(D['k3'].concat([2, 3]))
self.assertEqual(query.tuples()[0][0], [0, 1, 2, 3])
def test_update_data_inplace(self):
self.M.delete().execute()
b = self.M.create(data={'k1': {'x1': 'y1'}, 'k2': 'v2'})
self.M.update(data=self.M.data.concat({
'k1': {'x2': 'y2'},
'k3': 'v3'})).execute()
b2 = self.M.get(self.M.id == b.id)
self.assertEqual(b2.data, {'k1': {'x2': 'y2'}, 'k2': 'v2', 'k3': 'v3'})
def test_selecting(self):
self._create_test_data()
query = (self.M
.select(self.M.data['k3']['k4'].as_json().alias('k3k4'))
.order_by(self.M.id))
k3k4_data = [obj.k3k4 for obj in query]
self.assertEqual(k3k4_data, [
['i1', 'i2'],
None,
None,
None,
None,
None])
query = (self.M
.select(
self.M.data[0].as_json(),
self.M.data[2].as_json())
.order_by(self.M.id)
.tuples())
self.assertEqual(list(query), [
(None, None),
('a1', {'a3': 'a4'}),
(None, None),
(0, 2),
(5, 7),
('k4', None)])
def test_conflict_update(self):
b1 = self.M.create(data={'k1': 'v1'})
iq = (self.M
.insert(id=b1.id, data={'k1': 'v1-x'})
.on_conflict('update', conflict_target=[self.M.id],
update={self.M.data: {'k1': 'v1-z'}}))
b1_id_db = iq.execute()
self.assertEqual(b1.id, b1_id_db)
b1_db = self.M.get(self.M.id == b1.id)
self.assertEqual(self.M.data, {'k1': 'v1-z'})
iq = (self.M
.insert(id=b1.id, data={'k1': 'v1-y'})
.on_conflict('update', conflict_target=[self.M.id],
update={'data': {'k1': 'v1-w'}}))
b1_id_db = iq.execute()
self.assertEqual(b1.id, b1_id_db)
b1_db = self.M.get(self.M.id == b1.id)
self.assertEqual(self.M.data, {'k1': 'v1-w'})
self.assertEqual(self.M.select().count(), 1)
| BaseBinaryJsonFieldTestCase |
python | google__jax | jax/experimental/mosaic/gpu/constraints.py | {
"start": 2632,
"end": 2766
} | class ____:
expression: Expression
axes: tuple[int, ...]
shape: tuple[int, ...]
@dataclasses.dataclass(frozen=True)
| BroadcastInDim |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-postgresml/llama_index/indices/managed/postgresml/query.py | {
"start": 2343,
"end": 8263
} | class ____(BaseQueryEngine):
"""Retriever query engine for PostgresML."""
def __init__(
self,
retriever: PostgresMLRetriever,
streaming: Optional[bool] = False,
callback_manager: Optional[CallbackManager] = None,
pgml_query: Optional[Dict[str, Any]] = None,
vector_search_limit: Optional[int] = 4,
vector_search_rerank: Optional[Dict[str, Any]] = None,
vector_search_document: Optional[Dict[str, Any]] = {"keys": ["id", "metadata"]},
model: Optional[str] = "meta-llama/Meta-Llama-3-8B-Instruct",
model_parameters: Optional[Dict[str, Any]] = {"max_tokens": 2048},
**kwargs,
) -> None:
self._retriever = retriever
self._streaming = streaming
self._prompts = deepcopy(PROMPTS)
self._pgml_query = pgml_query
self._vector_search_limit = vector_search_limit
self._vector_search_rerank = vector_search_rerank
self._vector_search_document = vector_search_document
self._model = model
self._model_parameters = model_parameters
super().__init__(callback_manager=callback_manager)
@classmethod
def from_args(
cls,
retriever: PostgresMLRetriever,
**kwargs: Any,
) -> "PostgresMLQueryEngine":
"""
Initialize a PostgresMLQueryEngine object.".
Args:
retriever (PostgresMLRetriever): A PostgresML retriever object.
"""
return cls(retriever=retriever, **kwargs)
def with_retriever(self, retriever: PostgresMLRetriever) -> "PostgresMLQueryEngine":
return PostgresMLQueryEngine(
retriever=retriever,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
if self._streaming:
async_token_gen = run_async_tasks([self._do_query(query_bundle)])[0]
return StreamingResponse(response_gen=async_token_gen.response_gen)
else:
return run_async_tasks([self._do_query(query_bundle)])[0]
async def _aquery(self, query_bundle: QueryBundle, **kwargs) -> RESPONSE_TYPE:
"""Answer an async query."""
return await self._do_query(query_bundle, **kwargs)
async def _do_query(
self, query_bundle: Optional[QueryBundle] = None
) -> RESPONSE_TYPE:
query = self._pgml_query
if not query:
if not query_bundle:
raise Exception(
"Must provide either query or query_bundle to query and aquery"
)
# {CONTEXT} gets replaced with the correct context in the SQL query generated by the pgml SDK
messages = self._prompts["text_qa_template"].format_messages(
context_str="{CONTEXT}", query_str=query_bundle.query_str
)
messages = [
{"role": m["role"].value, "content": m["content"]}
for m in [m.dict() for m in messages]
]
model_parameters = deepcopy(self._model_parameters)
model_parameters["model"] = self._model
model_parameters["messages"] = messages
if self._vector_search_rerank is not None:
self._vector_search_rerank = self._vector_search_rerank | {
"query": query_bundle.query_str
}
query = {
"CONTEXT": {
"vector_search": {
"query": {
"fields": {
"content": {
"query": query_bundle.query_str,
"parameters": {"prompt": "query: "},
},
},
},
"document": self._vector_search_document,
"limit": self._vector_search_limit,
"rerank": self._vector_search_rerank,
},
"aggregate": {"join": "\n"},
},
"chat": model_parameters,
}
if self._streaming:
# The pgml SDK does not currently return sources for streaming
results = await self._retriever._index.collection.rag_stream(
query,
self._retriever._index.pipeline,
)
return AsyncStreamingResponse(response_gen=AsyncJsonGenerator(results))
else:
results = await self._retriever._index.collection.rag(
query,
self._retriever._index.pipeline,
)
source_nodes = [
NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["score"],
)
if self._vector_search_rerank is None
else NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["rerank_score"],
)
for r in results["sources"]["CONTEXT"]
]
return Response(response=results["rag"][0], source_nodes=source_nodes)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _get_prompts(self) -> Dict[str, BasePromptTemplate]:
"""Get prompts."""
return self._prompts
def _update_prompts(self, prompts_dict: PromptDictType):
"""Update prompts."""
for key in prompts_dict:
self._prompts[key] = prompts_dict[key]
| PostgresMLQueryEngine |
python | walkccc__LeetCode | solutions/1382. Balance a Binary Search Tree/1382.py | {
"start": 0,
"end": 594
} | class ____:
def balanceBST(self, root: TreeNode | None) -> TreeNode | None:
nums = []
def inorder(root: TreeNode | None) -> None:
if not root:
return
inorder(root.left)
nums.append(root.val)
inorder(root.right)
inorder(root)
# Same as 108. Convert Sorted Array to Binary Search Tree
def build(l: int, r: int) -> TreeNode | None:
if l > r:
return None
m = (l + r) // 2
return TreeNode(nums[m],
build(l, m - 1),
build(m + 1, r))
return build(0, len(nums) - 1)
| Solution |
python | sphinx-doc__sphinx | tests/roots/test-ext-inheritance_diagram/test.py | {
"start": 168,
"end": 213
} | class ____(object): # NoQA: UP004
pass
| Alice |
python | kubernetes-client__python | kubernetes/client/models/v1_linux_container_user.py | {
"start": 383,
"end": 5793
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'gid': 'int',
'supplemental_groups': 'list[int]',
'uid': 'int'
}
attribute_map = {
'gid': 'gid',
'supplemental_groups': 'supplementalGroups',
'uid': 'uid'
}
def __init__(self, gid=None, supplemental_groups=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1LinuxContainerUser - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._gid = None
self._supplemental_groups = None
self._uid = None
self.discriminator = None
self.gid = gid
if supplemental_groups is not None:
self.supplemental_groups = supplemental_groups
self.uid = uid
@property
def gid(self):
"""Gets the gid of this V1LinuxContainerUser. # noqa: E501
GID is the primary gid initially attached to the first process in the container # noqa: E501
:return: The gid of this V1LinuxContainerUser. # noqa: E501
:rtype: int
"""
return self._gid
@gid.setter
def gid(self, gid):
"""Sets the gid of this V1LinuxContainerUser.
GID is the primary gid initially attached to the first process in the container # noqa: E501
:param gid: The gid of this V1LinuxContainerUser. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and gid is None: # noqa: E501
raise ValueError("Invalid value for `gid`, must not be `None`") # noqa: E501
self._gid = gid
@property
def supplemental_groups(self):
"""Gets the supplemental_groups of this V1LinuxContainerUser. # noqa: E501
SupplementalGroups are the supplemental groups initially attached to the first process in the container # noqa: E501
:return: The supplemental_groups of this V1LinuxContainerUser. # noqa: E501
:rtype: list[int]
"""
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
"""Sets the supplemental_groups of this V1LinuxContainerUser.
SupplementalGroups are the supplemental groups initially attached to the first process in the container # noqa: E501
:param supplemental_groups: The supplemental_groups of this V1LinuxContainerUser. # noqa: E501
:type: list[int]
"""
self._supplemental_groups = supplemental_groups
@property
def uid(self):
"""Gets the uid of this V1LinuxContainerUser. # noqa: E501
UID is the primary uid initially attached to the first process in the container # noqa: E501
:return: The uid of this V1LinuxContainerUser. # noqa: E501
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1LinuxContainerUser.
UID is the primary uid initially attached to the first process in the container # noqa: E501
:param uid: The uid of this V1LinuxContainerUser. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LinuxContainerUser):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LinuxContainerUser):
return True
return self.to_dict() != other.to_dict()
| V1LinuxContainerUser |
python | mwaskom__seaborn | seaborn/_statistics.py | {
"start": 15937,
"end": 18598
} | class ____:
def __init__(self, estimator, errorbar=None, **boot_kws):
"""
Data aggregator that produces an estimate and error bar interval.
Parameters
----------
estimator : callable or string
Function (or method name) that maps a vector to a scalar.
errorbar : string, (string, number) tuple, or callable
Name of errorbar method (either "ci", "pi", "se", or "sd"), or a tuple
with a method name and a level parameter, or a function that maps from a
vector to a (min, max) interval, or None to hide errorbar. See the
:doc:`errorbar tutorial </tutorial/error_bars>` for more information.
boot_kws
Additional keywords are passed to bootstrap when error_method is "ci".
"""
self.estimator = estimator
method, level = _validate_errorbar_arg(errorbar)
self.error_method = method
self.error_level = level
self.boot_kws = boot_kws
def __call__(self, data, var):
"""Aggregate over `var` column of `data` with estimate and error interval."""
vals = data[var]
if callable(self.estimator):
# You would think we could pass to vals.agg, and yet:
# https://github.com/mwaskom/seaborn/issues/2943
estimate = self.estimator(vals)
else:
estimate = vals.agg(self.estimator)
# Options that produce no error bars
if self.error_method is None:
err_min = err_max = np.nan
elif len(data) <= 1:
err_min = err_max = np.nan
# Generic errorbars from user-supplied function
elif callable(self.error_method):
err_min, err_max = self.error_method(vals)
# Parametric options
elif self.error_method == "sd":
half_interval = vals.std() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
elif self.error_method == "se":
half_interval = vals.sem() * self.error_level
err_min, err_max = estimate - half_interval, estimate + half_interval
# Nonparametric options
elif self.error_method == "pi":
err_min, err_max = _percentile_interval(vals, self.error_level)
elif self.error_method == "ci":
units = data.get("units", None)
boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)
err_min, err_max = _percentile_interval(boots, self.error_level)
return pd.Series({var: estimate, f"{var}min": err_min, f"{var}max": err_max})
| EstimateAggregator |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cache_errors.py | {
"start": 3885,
"end": 4779
} | class ____(MarkdownFormattedException, Generic[R]):
def __init__(self, func: Callable[..., R], return_value: R) -> None:
MarkdownFormattedException.__init__(
self,
f"""
Cannot serialize the return value (of type {get_return_value_type(return_value)})
in {get_cached_func_name_md(func)}. `st.cache_data` uses
[pickle](https://docs.python.org/3/library/pickle.html) to serialize the
function's return value and safely store it in the cache
without mutating the original object. Please convert the return value to a
pickle-serializable type. If you want to cache unserializable objects such
as database connections or Tensorflow sessions, use `st.cache_resource`
instead (see [our docs]({CACHE_DOCS_URL}) for differences).""",
)
| UnserializableReturnValueError |
python | uqfoundation__dill | dill/tests/test_classdef.py | {
"start": 443,
"end": 532
} | class ____:
def _method(self):
pass
def ok(self):
return True
| _class |
python | coleifer__peewee | peewee.py | {
"start": 25224,
"end": 25711
} | class ____(Source):
__and__ = __join__(JOIN.INNER)
__add__ = __join__(JOIN.LEFT_OUTER)
__sub__ = __join__(JOIN.RIGHT_OUTER)
__or__ = __join__(JOIN.FULL_OUTER)
__mul__ = __join__(JOIN.CROSS)
__rand__ = __join__(JOIN.INNER, inverted=True)
__radd__ = __join__(JOIN.LEFT_OUTER, inverted=True)
__rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True)
__ror__ = __join__(JOIN.FULL_OUTER, inverted=True)
__rmul__ = __join__(JOIN.CROSS, inverted=True)
| BaseTable |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 118629,
"end": 123186
} | class ____(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, list(imported_names), level)
#
# module_name UnicodeNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# imported_names [ExprNode] or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
# is_import_as_name boolean is imported in an 'as' block
type = py_object_type
is_temp = True
subexprs = ['module_name', 'imported_names']
def analyse_types(self, env):
if self.level is None:
# For modules in packages, and without 'absolute_import' enabled, try relative (Py2) import first.
if env.global_scope().parent_module and (
env.directives['py2_import'] or
Future.absolute_import not in env.context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
assert self.module_name.is_string_literal
if self.imported_names is not None:
self.imported_names = [
name.analyse_types(env) for name in self.imported_names
]
if self.level != 0:
level = self.level if self.level > 0 else 1
qualname = env.global_scope().qualified_name
for _ in range(level):
qualname, _, _ = qualname.rpartition('.')
self.module_qualname = StringEncoding.EncodedString(f"{qualname}.{self.module_name.value}")
else:
self.module_qualname = None
return self
gil_message = "Python import"
def generate_result_code(self, code):
assert self.module_name.is_string_literal
module_qualname = 'NULL'
if self.module_qualname is not None:
module_qualname = code.get_py_string_const(self.module_qualname)
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
if self.imported_names is not None:
code.putln("{")
code.putln(
f"PyObject* const __pyx_imported_names[] = {{{','.join(n.result() for n in self.imported_names)}}};")
import_code = "__Pyx_Import(%s, %s, %d, %s, %d)" % (
self.module_name.py_result(),
'__pyx_imported_names' if self.imported_names is not None else '0',
len(self.imported_names) if self.imported_names is not None else 0,
module_qualname,
self.level)
tmp_submodule = code.funcstate.allocate_temp(self.type, manage_ref=False)
code.putln(
f"{tmp_submodule} = {import_code}; {code.error_goto_if_null(tmp_submodule, self.pos)}"
)
if self.imported_names is not None:
code.putln("}")
if self.is_import_as_name and "." in self.module_name.value:
# We need to get the submodules in this case
code.globalstate.use_utility_code(UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
submodule = code.funcstate.allocate_temp(self.type, manage_ref=False)
modules = self.module_name.value.split(".")
for module in modules[1:]:
module_obj = code.get_py_string_const(StringEncoding.EncodedString(module))
code.putln(f"{submodule} = __Pyx_ImportFrom({tmp_submodule}, {module_obj});")
code.putln(f"Py_DECREF({tmp_submodule});")
code.putln(code.error_goto_if_null(submodule, self.pos))
code.putln(f"{tmp_submodule} = {submodule};")
code.funcstate.release_temp(submodule)
code.putln(f"{self.result()} = {tmp_submodule};")
code.funcstate.release_temp(tmp_submodule)
self.generate_gotref(code)
def get_known_standard_library_import(self):
return self.module_name.value
| ImportNode |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 120773,
"end": 121667
} | class ____(BaseModel, extra="forbid"):
"""
This data structure is used in API interface and applied across multiple shards
"""
payload: "Payload" = Field(
..., description="This data structure is used in API interface and applied across multiple shards"
)
points: Optional[List["ExtendedPointId"]] = Field(
default=None, description="Assigns payload to each point in this list"
)
filter: Optional["Filter"] = Field(
default=None, description="Assigns payload to each point that satisfy this filter condition"
)
shard_key: Optional["ShardKeySelector"] = Field(
default=None, description="This data structure is used in API interface and applied across multiple shards"
)
key: Optional[str] = Field(
default=None, description="Assigns payload to each point that satisfy this path of property"
)
| SetPayload |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py | {
"start": 3782,
"end": 4264
} | class ____(dg.InputManager):
def load_input(self, context: dg.InputContext):
return read_dataframe_from_table(name="table_1")
@dg.input_manager
def table_1_manager():
return Table1InputManager()
@dg.job(resource_defs={"load_input_manager": table_1_manager})
def load_table_job():
my_op()
# end_load_unconnected_input
# start_load_unconnected_io
# in this example, TableIOManager is defined elsewhere and we just want to override load_input
| Table1InputManager |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 3110,
"end": 3468
} | class ____:
"""Other false positive cases."""
@staticmethod
def a(self): # pylint: disable=unused-argument,bad-staticmethod-argument
...
@staticmethod
def b():
...
@classmethod
def c(self): # pylint: disable=bad-classmethod-argument
...
def d(): # pylint: disable=no-method-argument
...
| Foo4 |
python | explosion__spaCy | spacy/lang/th/__init__.py | {
"start": 1237,
"end": 1324
} | class ____(Language):
lang = "th"
Defaults = ThaiDefaults
__all__ = ["Thai"]
| Thai |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 517946,
"end": 518327
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "milestone")
field = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field"
)
milestone = sgqlc.types.Field("Milestone", graphql_name="milestone")
| ProjectV2ItemFieldMilestoneValue |
python | davidhalter__parso | test/fuzz_diff_parser.py | {
"start": 2890,
"end": 3323
} | class ____:
def __init__(self, copy_line, insertion_line):
self._copy_line = copy_line
self._insertion_line = insertion_line
def apply(self, code_lines):
code_lines.insert(
self._insertion_line,
# Use some line from the file. This doesn't feel totally
# random, but for the diff parser it will feel like it.
code_lines[self._copy_line]
)
| LineCopy |
python | django__django | tests/admin_inlines/models.py | {
"start": 1972,
"end": 2218
} | class ____(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return "/inner/"
| Inner |
python | huggingface__transformers | tests/models/bert_japanese/test_tokenization_bert_japanese.py | {
"start": 18187,
"end": 19221
} | class ____(unittest.TestCase):
def test_tokenizer_mismatch_warning(self):
EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers", level="WARNING") as cm:
BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."
)
)
EXAMPLE_BERT_ID = "google-bert/bert-base-cased"
with self.assertLogs("transformers", level="WARNING") as cm:
BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."
)
)
| BertTokenizerMismatchTest |
python | getsentry__sentry | src/sentry/roles/manager.py | {
"start": 1372,
"end": 1994
} | class ____(Role):
is_global: bool = False
def get_minimum_team_role(self) -> TeamRole:
"""Return the minimum team role for this organization role.
A member with this organization role automatically receives the minimum role
when joining a new team, and can't be demoted below that team role for as
long as they hold the organization role.
"""
return self.parent.get_minimum_team_role(self.id)
def can_manage_team_role(self, other: TeamRole) -> bool:
return self.get_minimum_team_role().can_manage(other)
@dataclass(frozen=True, eq=True)
| OrganizationRole |
python | milvus-io__pymilvus | tests/test_grpc_handler.py | {
"start": 8141,
"end": 12702
} | class ____:
def test_register_state_change_callback(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_ch = MagicMock()
mock_channel.return_value = mock_ch
handler = GrpcHandler(uri="http://localhost:19530")
def callback(state: Any) -> None:
pass
handler.register_state_change_callback(callback)
assert callback in handler.callbacks
mock_ch.subscribe.assert_called_once_with(callback, try_to_connect=True)
def test_deregister_state_change_callbacks(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_ch = MagicMock()
mock_channel.return_value = mock_ch
handler = GrpcHandler(uri="http://localhost:19530")
def callback(state: Any) -> None:
pass
handler.register_state_change_callback(callback)
handler.deregister_state_change_callbacks()
assert len(handler.callbacks) == 0
mock_ch.unsubscribe.assert_called_once_with(callback)
def test_close(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_ch = MagicMock()
mock_channel.return_value = mock_ch
handler = GrpcHandler(uri="http://localhost:19530")
# Register a callback first
def callback(state: Any) -> None:
pass
handler.register_state_change_callback(callback)
handler.close()
# Verify close was called on the channel
mock_ch.close.assert_called_once()
def test_reset_db_name(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(uri="http://localhost:19530")
# Add some dummy data to schema_cache
handler.schema_cache["test_collection"] = {"field": "value"}
with patch.object(handler, '_setup_identifier_interceptor'):
handler.reset_db_name("new_db")
assert len(handler.schema_cache) == 0
def test_set_onetime_loglevel(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_channel.return_value = MagicMock()
handler = GrpcHandler(uri="http://localhost:19530")
# Test passes if no exception is raised
handler.set_onetime_loglevel("DEBUG")
def test_wait_for_channel_ready_success(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_ch = MagicMock()
mock_channel.return_value = mock_ch
handler = GrpcHandler(uri="http://localhost:19530")
with patch('pymilvus.client.grpc_handler.grpc.channel_ready_future') as mock_future:
mock_result = MagicMock()
mock_result.result.return_value = None
mock_future.return_value = mock_result
with patch.object(handler, '_setup_identifier_interceptor'):
handler._wait_for_channel_ready(timeout=10)
mock_future.assert_called_once_with(mock_ch)
def test_wait_for_channel_ready_timeout(self) -> None:
with patch('pymilvus.client.grpc_handler.grpc.insecure_channel') as mock_channel:
mock_ch = MagicMock()
mock_channel.return_value = mock_ch
handler = GrpcHandler(uri="http://localhost:19530")
with patch('pymilvus.client.grpc_handler.grpc.channel_ready_future') as mock_future:
mock_result = MagicMock()
mock_result.result.side_effect = grpc.FutureTimeoutError()
mock_future.return_value = mock_result
with pytest.raises(MilvusException) as exc_info:
handler._wait_for_channel_ready(timeout=10)
assert "Fail connecting to server" in str(exc_info.value)
def test_wait_for_channel_ready_no_channel(self) -> None:
handler = GrpcHandler(channel=None)
# Manually set channel to None to test the error case
handler._channel = None
with pytest.raises(MilvusException) as exc_info:
handler._wait_for_channel_ready()
assert "No channel in handler" in str(exc_info.value)
| TestGrpcHandlerStateManagement |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/scan_ops_test.py | {
"start": 7077,
"end": 11599
} | class ____(test.TestCase):
valid_dtypes = [
np.int32,
np.int64,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
dtypes.bfloat16.as_numpy_dtype,
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.cached_session():
tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()
atol = rtol = 1e-6
if x.dtype == dtypes.bfloat16.as_numpy_dtype:
atol = rtol = 1e-2
self.assertAllClose(np_out, tf_out, atol=atol, rtol=rtol)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumprod(x, axis).eval()
@test_util.run_deprecated_v1
def testNaN(self):
for dtype in (np.float16, np.float32, np.float64):
for nan_idx in range(0, 5):
x = np.arange(1, 6).reshape([5]).astype(dtype)
x[nan_idx] = np.nan
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session():
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
result = math_ops.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-2, -1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([2, 4], axis, exclusive, reverse)
if __name__ == "__main__":
test.main()
| CumprodTest |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 32992,
"end": 83744
} | class ____(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognized option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, str):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, str) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('%s({%s})' % (self.__class__.__name__,
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, str):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, bytes) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, str):
return infile.splitlines(True)
elif isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, bytes):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, bytes) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, str):
return infile.splitlines(True)
if isinstance(infile, bytes):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, bytes):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, bytes) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, str):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, str):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, bytes):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from .validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, str):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
| ConfigObj |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 4190,
"end": 4353
} | class ____(SentryAPIException):
status_code = status.HTTP_408_REQUEST_TIMEOUT
code = "request-timeout"
message = "Proxied request timed out"
| RequestTimeout |
python | ray-project__ray | rllib/utils/filter.py | {
"start": 9735,
"end": 15251
} | class ____(Filter):
"""Keeps track of a running mean for seen states"""
is_concurrent = False
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.shape = shape
# We don't have a preprocessor, if shape is None (Discrete) or
# flat_shape is Tuple[np.ndarray] or Dict[str, np.ndarray]
# (complex inputs).
flat_shape = tree.flatten(self.shape)
self.no_preprocessor = shape is None or (
isinstance(self.shape, (dict, tuple))
and len(flat_shape) > 0
and isinstance(flat_shape, np.ndarray)
)
# If preprocessing (flattening dicts/tuples), make sure shape
# is an np.ndarray, so we don't confuse it with a complex Tuple
# space's shape structure (which is a Tuple[np.ndarray, ...]).
if not self.no_preprocessor:
self.shape = np.array(self.shape)
self.demean = demean
self.destd = destd
self.clip = clip
# Running stats.
self.running_stats = tree.map_structure(lambda s: RunningStat(s), self.shape)
# In distributed rollouts, each worker sees different states.
# The buffer is used to keep track of deltas amongst all the
# observation filters.
self.buffer = None
self.reset_buffer()
def reset_buffer(self) -> None:
self.buffer = tree.map_structure(lambda s: RunningStat(s), self.shape)
def apply_changes(
self, other: "MeanStdFilter", with_buffer: bool = False, *args, **kwargs
) -> None:
"""Applies updates from the buffer of another filter.
Args:
other: Other filter to apply info from
with_buffer: Flag for specifying if the buffer should be
copied from other.
.. testcode::
:skipif: True
a = MeanStdFilter(())
a(1)
a(2)
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
.. testoutput::
[2, 1.5, 2]
.. testcode::
:skipif: True
b = MeanStdFilter(())
b(10)
a.apply_changes(b, with_buffer=False)
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
.. testoutput::
[3, 4.333333333333333, 2]
.. testcode::
:skipif: True
a.apply_changes(b, with_buffer=True)
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
.. testoutput::
[4, 5.75, 1]
"""
tree.map_structure(
lambda rs, other_rs: rs.update(other_rs), self.running_stats, other.buffer
)
if with_buffer:
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
def copy(self) -> "MeanStdFilter":
"""Returns a copy of `self`."""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def as_serializable(self) -> "MeanStdFilter":
return self.copy()
def sync(self, other: "MeanStdFilter") -> None:
"""Syncs all fields together from other filter.
.. testcode::
:skipif: True
a = MeanStdFilter(())
a(1)
a(2)
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
.. testoutput::
[2, array(1.5), 2]
.. testcode::
:skipif: True
b = MeanStdFilter(())
b(10)
print([b.running_stats.n, b.running_stats.mean, b.buffer.n])
.. testoutput::
[1, array(10.0), 1]
.. testcode::
:skipif: True
a.sync(b)
print([a.running_stats.n, a.running_stats.mean, a.buffer.n])
.. testoutput::
[1, array(10.0), 1]
"""
self.demean = other.demean
self.destd = other.destd
self.clip = other.clip
self.running_stats = tree.map_structure(
lambda rs: rs.copy(), other.running_stats
)
self.buffer = tree.map_structure(lambda b: b.copy(), other.buffer)
def __call__(self, x: TensorStructType, update: bool = True) -> TensorStructType:
if self.no_preprocessor:
x = tree.map_structure(lambda x_: np.asarray(x_), x)
else:
x = np.asarray(x)
def _helper(x, rs, buffer, shape):
# Discrete|MultiDiscrete spaces -> No normalization.
if shape is None:
return x
# Keep dtype as is througout this filter.
orig_dtype = x.dtype
if update:
if len(x.shape) == len(rs.shape) + 1:
# The vectorized case.
for i in range(x.shape):
rs.push(x[i])
buffer.push(x[i])
else:
# The unvectorized case.
rs.push(x)
buffer.push(x)
if self.demean:
x = x - rs.mean
if self.destd:
x = x / (rs.std + SMALL_NUMBER)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x.astype(orig_dtype)
if self.no_preprocessor:
return tree.map_structure_up_to(
x, _helper, x, self.running_stats, self.buffer, self.shape
)
else:
return _helper(x, self.running_stats, self.buffer, self.shape)
@OldAPIStack
| MeanStdFilter |
python | mlflow__mlflow | tests/paddle/test_paddle_model_export.py | {
"start": 11465,
"end": 24013
} | class ____(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc_ = paddle.nn.Linear(13, 1, None)
def forward(self, inputs):
return self.fc_(inputs)
@pytest.fixture
def pd_model_built_in_high_level_api(get_dataset_built_in_high_level_api):
train_dataset, test_dataset = get_dataset_built_in_high_level_api
model = paddle.Model(UCIHousing())
optim = paddle.optimizer.Adam(learning_rate=0.01, parameters=model.parameters())
model.prepare(optim, paddle.nn.MSELoss())
model.fit(train_dataset, epochs=6, batch_size=8, verbose=1)
return ModelWithData(model=model, inference_dataframe=test_dataset)
def test_model_save_load_built_in_high_level_api(pd_model_built_in_high_level_api, model_path):
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
mlflow.paddle.save_model(pd_model=model, path=model_path)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
np.testing.assert_array_almost_equal(
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
def test_model_built_in_high_level_api_load_from_remote_uri_succeeds(
pd_model_built_in_high_level_api, model_path, mock_s3_bucket
):
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
mlflow.paddle.save_model(pd_model=model, path=model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
reloaded_model = mlflow.paddle.load_model(model_uri=model_uri)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_model(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
def test_model_built_in_high_level_api_log(pd_model_built_in_high_level_api, model_path, tmp_path):
model = pd_model_built_in_high_level_api.model
test_dataset = pd_model_built_in_high_level_api.inference_dataframe
try:
artifact_path = "model"
conda_env = os.path.join(tmp_path, "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle"])
model_info = mlflow.paddle.log_model(model, name=artifact_path, conda_env=conda_env)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_info.model_uri)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]["conda"]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
@pytest.fixture
def model_retrain_path(tmp_path):
return os.path.join(tmp_path, "model_retrain")
@pytest.mark.allow_infer_pip_requirements_fallback
def test_model_retrain_built_in_high_level_api(
pd_model_built_in_high_level_api,
model_path,
model_retrain_path,
get_dataset_built_in_high_level_api,
):
model = pd_model_built_in_high_level_api.model
mlflow.paddle.save_model(pd_model=model, path=model_path, training=True)
training_dataset, test_dataset = get_dataset_built_in_high_level_api
model_retrain = paddle.Model(UCIHousing())
model_retrain = mlflow.paddle.load_model(model_uri=model_path, model=model_retrain)
optim = paddle.optimizer.Adam(learning_rate=0.015, parameters=model.parameters())
model_retrain.prepare(optim, paddle.nn.MSELoss())
model_retrain.fit(training_dataset, epochs=6, batch_size=8, verbose=1)
mlflow.paddle.save_model(pd_model=model_retrain, path=model_retrain_path, training=False)
with pytest.raises(TypeError, match="This model can't be loaded"):
mlflow.paddle.load_model(model_uri=model_retrain_path, model=model_retrain)
error_model = 0
error_model_type = type(error_model)
with pytest.raises(
TypeError,
match=f"Invalid object type `{error_model_type}` for `model`, must be `paddle.Model`",
):
mlflow.paddle.load_model(model_uri=model_retrain_path, model=error_model)
reloaded_pd_model = mlflow.paddle.load_model(model_uri=model_retrain_path)
reloaded_pyfunc = pyfunc.load_model(model_uri=model_retrain_path)
low_level_test_dataset = [x[0] for x in test_dataset]
np.testing.assert_array_almost_equal(
np.array(model_retrain.predict(test_dataset)).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
np.testing.assert_array_almost_equal(
np.array(reloaded_pd_model(np.array(low_level_test_dataset))).squeeze(),
np.array(reloaded_pyfunc.predict(np.array(low_level_test_dataset))).squeeze(),
decimal=5,
)
def test_log_model_built_in_high_level_api(
pd_model_built_in_high_level_api, model_path, tmp_path, get_dataset_built_in_high_level_api
):
model = pd_model_built_in_high_level_api.model
test_dataset = get_dataset_built_in_high_level_api[1]
try:
artifact_path = "model"
conda_env = os.path.join(tmp_path, "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["paddle"])
model_info = mlflow.paddle.log_model(
model, name=artifact_path, conda_env=conda_env, training=True
)
model_retrain = paddle.Model(UCIHousing())
optim = paddle.optimizer.Adam(learning_rate=0.015, parameters=model.parameters())
model_retrain.prepare(optim, paddle.nn.MSELoss())
model_retrain = mlflow.paddle.load_model(
model_uri=model_info.model_uri, model=model_retrain
)
np.testing.assert_array_almost_equal(
np.array(model.predict(test_dataset)).squeeze(),
np.array(model_retrain.predict(test_dataset)).squeeze(),
decimal=5,
)
model_path = _download_artifact_from_uri(artifact_uri=model_info.model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]["conda"]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
def test_log_model_with_pip_requirements(pd_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", pip_requirements=str(req_file)
)
_assert_pip_requirements(model_info.model_uri, [expected_mlflow_version, "a"], strict=True)
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, "a", "b"], strict=True
)
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, "b", "-c constraints.txt"],
["a"],
strict=True,
)
def test_log_model_with_extra_pip_requirements(pd_model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
default_reqs = mlflow.paddle.get_default_pip_requirements()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", extra_pip_requirements=str(req_file)
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a"]
)
# List of requirements
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", extra_pip_requirements=[f"-r {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri, [expected_mlflow_version, *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name="model", extra_pip_requirements=[f"-c {req_file}", "b"]
)
_assert_pip_requirements(
model_info.model_uri,
[expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_pyfunc_serve_and_score(pd_model):
model, inference_dataframe = pd_model
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
model,
name=artifact_path,
extra_pip_requirements=[PROTOBUF_REQUIREMENT]
if Version(paddle.__version__) < Version("2.5.0")
else None,
input_example=pd.DataFrame(inference_dataframe),
)
inference_payload = load_serving_example(model_info.model_uri)
resp = pyfunc_serve_and_score_model(
model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
scores = pd.DataFrame(
data=json.loads(resp.content.decode("utf-8"))["predictions"]
).values.squeeze()
np.testing.assert_array_almost_equal(
scores, model(paddle.to_tensor(inference_dataframe)).squeeze()
)
def test_log_model_with_code_paths(pd_model):
artifact_path = "model"
with (
mlflow.start_run(),
mock.patch("mlflow.paddle._add_code_from_conf_to_system_path") as add_mock,
):
model_info = mlflow.paddle.log_model(
pd_model.model, name=artifact_path, code_paths=[__file__]
)
_compare_logged_code_paths(__file__, model_info.model_uri, mlflow.paddle.FLAVOR_NAME)
mlflow.paddle.load_model(model_info.model_uri)
add_mock.assert_called()
def test_model_save_load_with_metadata(pd_model, model_path):
mlflow.paddle.save_model(
pd_model.model, path=model_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_metadata(pd_model):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name=artifact_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_signature_inference(pd_model, pd_model_signature):
artifact_path = "model"
test_dataset = pd_model.inference_dataframe
example = test_dataset[:3, :]
with mlflow.start_run():
model_info = mlflow.paddle.log_model(
pd_model.model, name=artifact_path, input_example=example
)
mlflow_model = Model.load(model_info.model_uri)
assert mlflow_model.signature == pd_model_signature
| UCIHousing |
python | django__django | tests/expressions_case/tests.py | {
"start": 670,
"end": 50868
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string="2")
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string="2")
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string="3")
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string="4")
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
cls.group_by_fields = [
f.name
for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created)
and (
connection.features.allows_group_by_lob
or not isinstance(f, (BinaryField, TextField))
)
]
def test_annotate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "other"),
(2, "two"),
(3, "other"),
(3, "other"),
(4, "other"),
],
transform=attrgetter("integer", "test"),
)
def test_annotate_without_default(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
)
).order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "test"),
)
def test_annotate_with_expression_as_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_test=Case(
When(integer=1, then=F("integer") + 1),
When(integer=2, then=F("integer") + 3),
default="integer",
)
).order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_test=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("integer") + 1, then=Value("+1")),
)
).order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_join_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(integer=1, then=F("o2o_rel__integer") + 1),
When(integer=2, then=F("o2o_rel__integer") + 3),
default="o2o_rel__integer",
)
).order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_in_clause(self):
fk_rels = FKCaseTestModel.objects.filter(integer__in=[5])
self.assertQuerySetEqual(
CaseTestModel.objects.only("pk", "integer")
.annotate(
in_test=Sum(
Case(
When(fk_rel__in=fk_rels, then=F("fk_rel__integer")),
default=Value(0),
)
)
)
.order_by("pk"),
[(1, 0), (2, 0), (3, 0), (2, 0), (3, 0), (3, 0), (4, 5)],
transform=attrgetter("integer", "in_test"),
)
def test_annotate_with_join_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(integer2=F("o2o_rel__integer"), then=Value("equal")),
When(integer2=F("o2o_rel__integer") + 1, then=Value("+1")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "other"),
],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
join_test=Case(
When(o2o_rel__integer=1, then=Value("one")),
When(o2o_rel__integer=2, then=Value("two")),
When(o2o_rel__integer=3, then=Value("three")),
default=Value("other"),
)
).order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "three"),
(2, "two"),
(3, "three"),
(3, "three"),
(4, "one"),
],
transform=attrgetter("integer", "join_test"),
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
f_plus_3=F("integer") + 3,
)
.annotate(
f_test=Case(
When(integer=1, then="f_plus_1"),
When(integer=2, then="f_plus_3"),
default="integer",
),
)
.order_by("pk"),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.annotate(
f_test=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("f_plus_1"), then=Value("+1")),
),
)
.order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "f_test"),
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F("integer") - 2,
)
.annotate(
test=Case(
When(f_minus_2=-1, then=Value("negative one")),
When(f_minus_2=0, then=Value("zero")),
When(f_minus_2=1, then=Value("one")),
default=Value("other"),
),
)
.order_by("pk"),
[
(1, "negative one"),
(2, "zero"),
(3, "one"),
(2, "zero"),
(3, "one"),
(3, "one"),
(4, "other"),
],
transform=attrgetter("integer", "test"),
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(integer=2, then="min"),
When(integer=3, then="max"),
),
)
.order_by("pk"),
[
(1, None, 1, 1),
(2, 2, 2, 3),
(3, 4, 3, 4),
(2, 2, 2, 3),
(3, 4, 3, 4),
(3, 4, 3, 4),
(4, None, 5, 5),
],
transform=itemgetter("integer", "test", "min", "max"),
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(integer2=F("min"), then=Value("min")),
When(integer2=F("max"), then=Value("max")),
),
)
.order_by("pk"),
[
(1, 1, "min"),
(2, 3, "max"),
(3, 4, "max"),
(2, 2, "min"),
(3, 4, "max"),
(3, 3, "min"),
(4, 5, "min"),
],
transform=itemgetter("integer", "integer2", "test"),
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
max=Max("fk_rel__integer"),
)
.annotate(
test=Case(
When(max=3, then=Value("max = 3")),
When(max=4, then=Value("max = 4")),
default=Value(""),
),
)
.order_by("pk"),
[
(1, 1, ""),
(2, 3, "max = 3"),
(3, 4, "max = 4"),
(2, 3, "max = 3"),
(3, 4, "max = 4"),
(3, 4, "max = 4"),
(4, 5, ""),
],
transform=itemgetter("integer", "max", "test"),
)
def test_annotate_exclude(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
)
)
.exclude(test="other")
.order_by("pk"),
[(1, "one"), (2, "two"), (2, "two")],
transform=attrgetter("integer", "test"),
)
def test_annotate_filter_decimal(self):
obj = CaseTestModel.objects.create(integer=0, decimal=Decimal("1"))
qs = CaseTestModel.objects.annotate(
x=Case(When(integer=0, then=F("decimal"))),
y=Case(When(integer=0, then=Value(Decimal("1")))),
)
self.assertSequenceEqual(qs.filter(Q(x=1) & Q(x=Decimal("1"))), [obj])
self.assertSequenceEqual(qs.filter(Q(y=1) & Q(y=Decimal("1"))), [obj])
def test_annotate_values_not_in_order_by(self):
self.assertEqual(
list(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
When(integer=3, then=Value("three")),
default=Value("other"),
)
)
.order_by("test")
.values_list("integer", flat=True)
),
[1, 4, 3, 3, 3, 2, 2],
)
def test_annotate_with_empty_when(self):
objects = CaseTestModel.objects.annotate(
selected=Case(
When(pk__in=[], then=Value("selected")),
default=Value("not selected"),
)
)
self.assertEqual(len(objects), CaseTestModel.objects.count())
self.assertTrue(all(obj.selected == "not selected" for obj in objects))
def test_annotate_with_full_when(self):
objects = CaseTestModel.objects.annotate(
selected=Case(
When(~Q(pk__in=[]), then=Value("selected")),
default=Value("not selected"),
)
)
self.assertEqual(len(objects), CaseTestModel.objects.count())
self.assertTrue(all(obj.selected == "selected" for obj in objects))
def test_combined_expression(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
+ 1,
).order_by("pk"),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter("integer", "test"),
)
def test_in_subquery(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F("integer2"), then="pk"),
When(integer=4, then="pk"),
),
).values("test")
).order_by("pk"),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter("integer", "integer2"),
)
def test_condition_with_lookups(self):
qs = CaseTestModel.objects.annotate(
test=Case(
When(Q(integer2=1), string="2", then=Value(False)),
When(Q(integer2=1), string="1", then=Value(True)),
default=Value(False),
output_field=BooleanField(),
),
)
self.assertIs(qs.get(integer=1).test, True)
def test_case_reuse(self):
SOME_CASE = Case(
When(pk=0, then=Value("0")),
default=Value("1"),
)
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by("pk"),
CaseTestModel.objects.annotate(somecase=SOME_CASE)
.order_by("pk")
.values_list("pk", "somecase"),
lambda x: (x.pk, x.somecase),
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(
Case(
When(integer=1, then=1),
)
),
two=Sum(
Case(
When(integer=2, then=1),
)
),
three=Sum(
Case(
When(integer=3, then=1),
)
),
four=Sum(
Case(
When(integer=4, then=1),
)
),
),
{"one": 1, "two": 2, "three": 3, "four": 1},
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=Sum(Case(When(integer=1, then="integer"))),
two=Sum(Case(When(integer=2, then=F("integer") - 1))),
three=Sum(Case(When(integer=3, then=F("integer") + 1))),
),
{"one": 1, "two": 2, "three": 12},
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=Sum(
Case(
When(integer2=F("integer"), then=1),
)
),
plus_one=Sum(
Case(
When(integer2=F("integer") + 1, then=1),
)
),
),
{"equal": 3, "plus_one": 4},
)
def test_filter(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_without_default(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
)
).order_by("pk"),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_expression_as_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=F("integer") + 1),
When(integer=3, then=F("integer")),
default="integer",
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_expression_as_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
string=Case(
When(integer2=F("integer"), then=Value("2")),
When(integer2=F("integer") + 1, then=Value("3")),
)
).order_by("pk"),
[(3, 4, "3"), (2, 2, "2"), (3, 4, "3")],
transform=attrgetter("integer", "integer2", "string"),
)
def test_filter_with_join_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(integer=2, then=F("o2o_rel__integer") + 1),
When(integer=3, then=F("o2o_rel__integer")),
default="o2o_rel__integer",
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_join_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer=Case(
When(integer2=F("o2o_rel__integer") + 1, then=2),
When(integer2=F("o2o_rel__integer"), then=3),
)
).order_by("pk"),
[(2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_join_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(
integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
)
).order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f=F("integer"),
f_plus_1=F("integer") + 1,
)
.filter(
integer2=Case(
When(integer=2, then="f_plus_1"),
When(integer=3, then="f"),
),
)
.order_by("pk"),
[(2, 3), (3, 3)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.filter(
integer=Case(
When(integer2=F("integer"), then=2),
When(integer2=F("f_plus_1"), then=3),
),
)
.order_by("pk"),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F("integer") + 1,
)
.filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
),
)
.order_by("pk"),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter("integer", "integer2"),
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.filter(
integer2=Case(
When(integer=2, then="min"),
When(integer=3, then="max"),
),
)
.order_by("pk"),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter("integer", "integer2", "min", "max"),
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
min=Min("fk_rel__integer"),
max=Max("fk_rel__integer"),
)
.filter(
integer=Case(
When(integer2=F("min"), then=2),
When(integer2=F("max"), then=3),
),
)
.order_by("pk"),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter("integer", "integer2", "min", "max"),
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerySetEqual(
CaseTestModel.objects.values(*self.group_by_fields)
.annotate(
max=Max("fk_rel__integer"),
)
.filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
)
.order_by("pk"),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter("integer", "integer2", "max"),
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value("one")),
When(integer=2, then=Value("two")),
default=Value("other"),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "one"),
(2, "two"),
(3, "other"),
(2, "two"),
(3, "other"),
(3, "other"),
(4, "other"),
],
transform=attrgetter("integer", "string"),
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "integer2"),
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F("integer") + 1),
When(integer=2, then=F("integer") + 3),
default="integer",
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[("1", 2), ("2", 5), ("3", 3), ("2", 5), ("3", 3), ("3", 3), ("4", 4)],
transform=attrgetter("string", "integer"),
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F("integer"), then=Value("equal")),
When(integer2=F("integer") + 1, then=Value("+1")),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "equal"),
(2, "+1"),
(3, "+1"),
(2, "equal"),
(3, "+1"),
(3, "equal"),
(4, "+1"),
],
transform=attrgetter("integer", "string"),
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(
FieldError, "Joined field references are not permitted in this query"
):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F("o2o_rel__integer") + 1, then=2),
When(integer2=F("o2o_rel__integer"), then=3),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(
FieldError, "Joined field references are not permitted in this query"
):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value("one")),
When(o2o_rel__integer=2, then=Value("two")),
When(o2o_rel__integer=3, then=Value("three")),
default=Value("other"),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "big_integer"),
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
When(integer=1, then=b"one"),
When(integer=2, then=b"two"),
default=b"",
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, b"one"),
(2, b"two"),
(3, b""),
(2, b"two"),
(3, b""),
(3, b""),
(4, b""),
],
transform=lambda o: (o.integer, bytes(o.binary)),
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, True),
(2, True),
(3, False),
(2, True),
(3, False),
(3, False),
(4, False),
],
transform=attrgetter("integer", "boolean"),
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, date(2015, 1, 1)),
(2, date(2015, 1, 2)),
(3, None),
(2, date(2015, 1, 2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "date"),
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, datetime(2015, 1, 1)),
(2, datetime(2015, 1, 2)),
(3, None),
(2, datetime(2015, 1, 2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "date_time"),
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal("1.1")),
When(
integer=2, then=Value(Decimal("2.2"), output_field=DecimalField())
),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, Decimal("1.1")),
(2, Decimal("2.2")),
(3, None),
(2, Decimal("2.2")),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "decimal"),
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
When(integer=1, then=timedelta(1)),
When(integer=2, then=timedelta(2)),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, timedelta(1)),
(2, timedelta(2)),
(3, None),
(2, timedelta(2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "duration"),
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value("1@example.com")),
When(integer=2, then=Value("2@example.com")),
default=Value(""),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "1@example.com"),
(2, "2@example.com"),
(3, ""),
(2, "2@example.com"),
(3, ""),
(3, ""),
(4, ""),
],
transform=attrgetter("integer", "email"),
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=lambda o: (o.integer, str(o.file)),
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
default=Value(""),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "file_path"),
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "float"),
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value("~/1")),
When(integer=2, then=Value("~/2")),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "~/1"), (2, "~/2"), (3, ""), (2, "~/2"), (3, ""), (3, ""), (4, "")],
transform=lambda o: (o.integer, str(o.image)),
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
When(integer=1, then=Value("1.1.1.1")),
When(integer=2, then=Value("2.2.2.2")),
output_field=GenericIPAddressField(),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "1.1.1.1"),
(2, "2.2.2.2"),
(3, None),
(2, "2.2.2.2"),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "generic_ip_address"),
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, True),
(2, False),
(3, None),
(2, False),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "null_boolean"),
)
def test_update_positive_big_integer(self):
CaseTestModel.objects.update(
positive_big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_big_integer"),
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_integer"),
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "positive_small_integer"),
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
default=Value(""),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "1"), (2, "2"), (3, ""), (2, "2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "slug"),
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter("integer", "small_integer"),
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=["1", "2"]).update(
string=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.filter(string__in=["1", "2"]).order_by("pk"),
[(1, "1"), (2, "2"), (2, "2")],
transform=attrgetter("integer", "string"),
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value("1")),
When(integer=2, then=Value("2")),
default=Value(""),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[(1, "1"), (2, "2"), (3, ""), (2, "2"), (3, ""), (3, ""), (4, "")],
transform=attrgetter("integer", "text"),
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
When(integer=1, then=time(1)),
When(integer=2, then=time(2)),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, time(1)),
(2, time(2)),
(3, None),
(2, time(2)),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "time"),
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value("http://1.example.com/")),
When(integer=2, then=Value("http://2.example.com/")),
default=Value(""),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, "http://1.example.com/"),
(2, "http://2.example.com/"),
(3, ""),
(2, "http://2.example.com/"),
(3, ""),
(3, ""),
(4, ""),
],
transform=attrgetter("integer", "url"),
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
When(integer=1, then=UUID("11111111111111111111111111111111")),
When(integer=2, then=UUID("22222222222222222222222222222222")),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, UUID("11111111111111111111111111111111")),
(2, UUID("22222222222222222222222222222222")),
(3, None),
(2, UUID("22222222222222222222222222222222")),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "uuid"),
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerySetEqual(
CaseTestModel.objects.order_by("pk"),
[
(1, obj1.pk),
(2, obj2.pk),
(3, None),
(2, obj2.pk),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter("integer", "fk_id"),
)
def test_lookup_in_condition(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value("less than 2")),
When(integer__gt=2, then=Value("greater than 2")),
default=Value("equal to 2"),
),
).order_by("pk"),
[
(1, "less than 2"),
(2, "equal to 2"),
(3, "greater than 2"),
(2, "equal to 2"),
(3, "greater than 2"),
(3, "greater than 2"),
(4, "greater than 2"),
],
transform=attrgetter("integer", "test"),
)
def test_lookup_different_fields(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value("when")),
default=Value("default"),
),
).order_by("pk"),
[
(1, 1, "default"),
(2, 3, "when"),
(3, 4, "default"),
(2, 2, "default"),
(3, 4, "default"),
(3, 3, "default"),
(4, 5, "default"),
],
transform=attrgetter("integer", "integer2", "test"),
)
def test_combined_q_object(self):
self.assertQuerySetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value("when")),
default=Value("default"),
),
).order_by("pk"),
[
(1, 1, "default"),
(2, 3, "when"),
(3, 4, "default"),
(2, 2, "when"),
(3, 4, "default"),
(3, 3, "when"),
(4, 5, "default"),
],
transform=attrgetter("integer", "integer2", "test"),
)
def test_order_by_conditional_implicit(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(integer__lte=2)
.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
)
.order_by("test", "pk"),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter("integer", "test"),
)
def test_order_by_conditional_explicit(self):
self.assertQuerySetEqual(
CaseTestModel.objects.filter(integer__lte=2)
.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
)
)
.order_by(F("test").asc(), "pk"),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter("integer", "test"),
)
def test_join_promotion(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerySetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
),
[(o, 3)],
lambda x: (x, x.foo),
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerySetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
),
[(o, 2)],
lambda x: (x, x.foo),
)
def test_join_promotion_multiple_annotations(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string="1")
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerySetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
),
bar=Case(
When(fk_rel__pk=1, then=4),
default=5,
),
),
[(o, 3, 5)],
lambda x: (x, x.foo, x.bar),
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerySetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
),
bar=Case(
When(fk_rel__isnull=True, then=4),
default=5,
),
),
[(o, 2, 4)],
lambda x: (x, x.foo, x.bar),
)
def test_m2m_exclude(self):
CaseTestModel.objects.create(integer=10, integer2=1, string="1")
qs = (
CaseTestModel.objects.values_list("id", "integer")
.annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.order_by("integer")
)
# The first o has 2 as its fk_rel__integer=1, thus it hits the
# default=2 case. The other ones have 2 as the result as they have 2
# fk_rel objects, except for integer=4 and integer=10 (created above).
# The integer=4 case has one integer, thus the result is 1, and
# integer=10 doesn't have any and this too generates 1 (instead of 0)
# as ~Q() also matches nulls.
self.assertQuerySetEqual(
qs,
[(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)],
lambda x: x[1:],
)
def test_m2m_reuse(self):
CaseTestModel.objects.create(integer=10, integer2=1, string="1")
# Need to use values before annotate so that Oracle will not group
# by fields it isn't capable of grouping by.
qs = (
CaseTestModel.objects.values_list("id", "integer")
.annotate(
cnt=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.annotate(
cnt2=Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
),
)
.order_by("integer")
)
self.assertEqual(str(qs.query).count(" JOIN "), 1)
self.assertQuerySetEqual(
qs,
[
(1, 2, 2),
(2, 2, 2),
(2, 2, 2),
(3, 2, 2),
(3, 2, 2),
(3, 2, 2),
(4, 1, 1),
(10, 1, 1),
],
lambda x: x[1:],
)
def test_aggregation_empty_cases(self):
tests = [
# Empty cases and default.
(Case(output_field=IntegerField()), None),
# Empty cases and a constant default.
(Case(default=Value("empty")), "empty"),
# Empty cases and column in the default.
(Case(default=F("url")), ""),
]
for case, value in tests:
with self.subTest(case=case):
self.assertQuerySetEqual(
CaseTestModel.objects.values("string")
.annotate(
case=case,
integer_sum=Sum("integer"),
)
.order_by("string"),
[
("1", value, 1),
("2", value, 4),
("3", value, 9),
("4", value, 4),
],
transform=itemgetter("string", "case", "integer_sum"),
)
| CaseExpressionTests |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/shard_test.py | {
"start": 4534,
"end": 5538
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, num_elements, num_shards, index, options=None):
dataset = dataset_ops.Dataset.range(num_elements).shard(num_shards, index)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True]),
combinations.combine(
elems=[10, 100], num_shards=[2, 5], index=[0, 1])))
def test(self, verify_fn, symbolic_checkpoint, elems, num_shards, index):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_dataset(elems, num_shards, index, options),
num_outputs=elems // num_shards)
| ShardCheckpointTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform3.py | {
"start": 975,
"end": 1239
} | class ____(ModelBase, frozen=True):
id: int = model_field()
name: str = model_field()
name2: str = model_field(alias="other_name", default="None")
# This should generate an error because a non-frozen dataclass cannot
# derive from a frozen one.
| Customer1 |
python | walkccc__LeetCode | solutions/2181. Merge Nodes in Between Zeros/2181-2.py | {
"start": 0,
"end": 346
} | class ____:
def mergeNodes(self, head: ListNode | None) -> ListNode | None:
curr = head.next
while curr:
running = curr
summ = 0
while running.val > 0:
summ += running.val
running = running.next
curr.val = summ
curr.next = running.next
curr = running.next
return head.next
| Solution |
python | getsentry__sentry | tests/sentry/api/serializers/test_commit.py | {
"start": 401,
"end": 4762
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
repository = Repository.objects.create(
organization_id=project.organization_id, name="test/test"
)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
result = serialize(commit, user)
assert result["message"] == "waddap"
assert result["repository"]["name"] == "test/test"
assert result["author"] == {"name": "stebe", "email": "stebe@sentry.io"}
def test_no_author(self) -> None:
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
repository = Repository.objects.create(
organization_id=project.organization_id, name="test/test"
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="abc",
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
result = serialize(commit, user)
assert result["author"] == {}
def test_pull_requests(self) -> None:
"""Test we can correctly match pull requests to commits."""
user = self.create_user()
project = self.create_project()
release = Release.objects.create(
organization_id=project.organization_id, version=uuid4().hex
)
release.add_project(project)
repository = Repository.objects.create(
organization_id=project.organization_id, name="test/test"
)
commit1 = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="abc",
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit1,
order=1,
)
commit2 = Commit.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="def",
message="waddap2",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit2,
order=2,
)
PullRequest.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="pr1",
merge_commit_sha=commit1.key,
)
PullRequest.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="pr2",
merge_commit_sha=commit2.key,
)
PullRequest.objects.create(
organization_id=project.organization_id,
repository_id=repository.id,
key="pr3",
merge_commit_sha=commit1.key,
)
results = serialize([commit1, commit2], user)
# In the case of multiple pull requests, one is chosen arbitrarily.
assert results[0]["pullRequest"]["id"] == "pr1" or results[0]["pullRequest"]["id"] == "pr3"
assert results[1]["pullRequest"]["id"] == "pr2"
| CommitSerializerTest |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_monitors.py | {
"start": 4400,
"end": 11389
} | class ____(BaseMonitor):
"""Monitor for Infinity and NaN in tensor values."""
def __init__(self, debug_events_reader, limit=0):
super(InfNanMonitor, self).__init__(debug_events_reader)
self._limit = limit # Track only the first _ alert events, for efficiency.
self._alerts = []
def _check_full_tensor_value(self,
tensor_value,
wall_time,
op_type,
output_slot,
execution_index=None,
graph_execution_trace_index=None):
"""Check a full tensor value.
Appends to the list of alerts if any inf or nan is found in the full tensor
value.
Args:
tensor_value: The full tensor value as a `np.ndarray`.
wall_time: Wall timestamp for the execution event that generated the
tensor value.
op_type: Op type executed.
output_slot: The output slot of the op.
execution_index: Index to the top-level execution event.
graph_execution_trace_index: Index to the intra-graph execution trace
(if applicable.)
"""
size = np.size(tensor_value)
if not size or not np.issubdtype(tensor_value.dtype, np.floating):
return
is_inf = np.isinf(tensor_value)
num_neg_inf = np.count_nonzero(
np.logical_and(is_inf, np.less(tensor_value, 0.0)))
num_pos_inf = np.count_nonzero(
np.logical_and(is_inf, np.greater(tensor_value, 0.0)))
num_nan = np.count_nonzero(np.isnan(tensor_value))
if num_neg_inf or num_pos_inf or num_nan:
self._alerts.append(InfNanAlert(
wall_time,
op_type,
output_slot,
size=size,
num_neg_inf=num_neg_inf,
num_pos_inf=num_pos_inf,
num_nan=num_nan,
execution_index=execution_index,
graph_execution_trace_index=graph_execution_trace_index))
def _check_debug_tensor_value(self,
tensor_debug_mode,
debug_tensor_value,
wall_time,
op_type,
output_slot,
execution_index=None,
graph_execution_trace_index=None):
"""Check for bad numerical values based on debug summary of tensor value.
If tensor_debug_mode is one in which debug_tensor_value does not carry
information about the presence or count of inf / nan values (e.g., SHAPE),
this method is a no-op.
When infs and/or nans are found, `InfNanAlert` objects are created and
appended to `self._alerts`.
Args:
tensor_debug_mode: TensorDebugMode proto enum.
debug_tensor_value: Debug tensor value as a list of numbers.
wall_time: Wall timestamp for the tensor event.
op_type: Type of the op that generated the tensor (e.g., "Conv2D").
output_slot: Output slot index of the tensor for the op.
execution_index: Top-level execution index.
graph_execution_trace_index: Intra-graph execution index.
"""
# FULL_TENSOR mode is handled by a separate code path.
assert tensor_debug_mode != debug_event_pb2.TensorDebugMode.FULL_TENSOR
if not debug_tensor_value:
return
if tensor_debug_mode == debug_event_pb2.TensorDebugMode.CURT_HEALTH:
_, any_nan_inf = debug_tensor_value
if any_nan_inf:
self._alerts.append(InfNanAlert(
wall_time,
op_type,
output_slot,
execution_index=execution_index,
graph_execution_trace_index=graph_execution_trace_index))
elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.CONCISE_HEALTH:
_, size, num_neg_inf, num_pos_inf, num_nan = debug_tensor_value
if num_neg_inf or num_pos_inf or num_nan:
self._alerts.append(InfNanAlert(
wall_time,
op_type,
output_slot,
size=size,
num_neg_inf=num_neg_inf,
num_pos_inf=num_pos_inf,
num_nan=num_nan,
execution_index=execution_index,
graph_execution_trace_index=graph_execution_trace_index))
elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_HEALTH:
(_, _, _, _, size, num_neg_inf, num_pos_inf, num_nan,
_, _, _) = debug_tensor_value
if num_neg_inf or num_pos_inf or num_nan:
self._alerts.append(InfNanAlert(
wall_time,
op_type,
output_slot,
size=size,
num_neg_inf=num_neg_inf,
num_pos_inf=num_pos_inf,
num_nan=num_nan,
execution_index=execution_index,
graph_execution_trace_index=graph_execution_trace_index))
def on_execution(self,
execution_index,
execution):
if self._limit > 0 and len(self._alerts) >= self._limit:
return
if (execution.tensor_debug_mode ==
debug_event_pb2.TensorDebugMode.FULL_TENSOR):
tensor_values = self._debug_data_reader.execution_to_tensor_values(
execution)
for output_slot, tensor_value in enumerate(tensor_values):
self._check_full_tensor_value(
tensor_value, execution.wall_time, execution.op_type, output_slot,
execution_index=execution_index)
elif execution.debug_tensor_values:
for output_slot, debug_tensor_value in enumerate(
execution.debug_tensor_values):
self._check_debug_tensor_value(
execution.tensor_debug_mode,
debug_tensor_value,
execution.wall_time,
execution.op_type,
output_slot,
execution_index=execution_index)
def on_graph_execution_trace(self,
graph_execution_trace_index,
graph_execution_trace):
"""Monitor method for GraphExecutionTrace data object."""
if self._limit > 0 and len(self._alerts) >= self._limit:
return
if (graph_execution_trace.tensor_debug_mode ==
debug_event_pb2.TensorDebugMode.FULL_TENSOR):
tensor_value = (
self._debug_data_reader.graph_execution_trace_to_tensor_value(
graph_execution_trace))
self._check_full_tensor_value(
tensor_value, graph_execution_trace.wall_time,
graph_execution_trace.op_type, graph_execution_trace.output_slot,
graph_execution_trace_index=graph_execution_trace_index)
elif graph_execution_trace.debug_tensor_value:
self._check_debug_tensor_value(
graph_execution_trace.tensor_debug_mode,
graph_execution_trace.debug_tensor_value,
graph_execution_trace.wall_time,
graph_execution_trace.op_type,
graph_execution_trace.output_slot,
graph_execution_trace_index=graph_execution_trace_index)
def alerts(self):
return self._alerts
| InfNanMonitor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/deprecated6.py | {
"start": 416,
"end": 977
} | class ____(Generic[P, R]):
def __init__(self, cb: Callable[P, R]) -> None:
self.cb = cb
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
return self.cb(*args, **kwargs)
@B
@deprecated("Don't use this.")
def func1(x: int) -> None:
pass
# This should generate an error if reportDeprecated is enabled.
func1(3)
def deco1(cb: Callable[P, R]) -> B[P, R]:
return B(cb)
@deco1
@deprecated("Don't use this.")
def func2(x: int) -> None:
pass
# This should generate an error if reportDeprecated is enabled.
func2(3)
| B |
python | django__django | tests/forms_tests/field_tests/test_splitdatetimefield.py | {
"start": 212,
"end": 3868
} | class ____(SimpleTestCase):
def test_splitdatetimefield_1(self):
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]),
)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean("hello")
with self.assertRaisesMessage(
ValidationError, "'Enter a valid date.', 'Enter a valid time.'"
):
f.clean(["hello", "there"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(["2006-01-10", "there"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(["hello", "07:30"])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]),
)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30), f.clean(["2006-01-10", "07:30"])
)
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean([""]))
self.assertIsNone(f.clean(["", ""]))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean("hello")
with self.assertRaisesMessage(
ValidationError, "'Enter a valid date.', 'Enter a valid time.'"
):
f.clean(["hello", "there"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(["2006-01-10", "there"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(["hello", "07:30"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(["2006-01-10", ""])
with self.assertRaisesMessage(ValidationError, "'Enter a valid time.'"):
f.clean(["2006-01-10"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(["", "07:30"])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=["%d/%m/%Y"])
self.assertFalse(
f.has_changed(["11/01/2012", "09:18:15"], ["11/01/2012", "09:18:15"])
)
self.assertTrue(
f.has_changed(
datetime.datetime(2008, 5, 6, 12, 40, 00), ["2008-05-06", "12:40:00"]
)
)
self.assertFalse(
f.has_changed(
datetime.datetime(2008, 5, 6, 12, 40, 00), ["06/05/2008", "12:40"]
)
)
self.assertTrue(
f.has_changed(
datetime.datetime(2008, 5, 6, 12, 40, 00), ["06/05/2008", "12:41"]
)
)
def test_form_as_table(self):
class TestForm(Form):
datetime = SplitDateTimeField()
f = TestForm()
self.assertHTMLEqual(
f.as_table(),
"<tr><th><label>Datetime:</label></th><td>"
'<input type="text" name="datetime_0" required id="id_datetime_0">'
'<input type="text" name="datetime_1" required id="id_datetime_1">'
"</td></tr>",
)
| SplitDateTimeFieldTest |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py | {
"start": 8610,
"end": 109198
} | class ____(Validator):
REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['module'])
# win_dsc is a dynamic arg spec, the docs won't ever match
PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', ))
def __init__(self, path, git_cache: GitCache, analyze_arg_spec=False, collection=None, collection_version=None,
reporter=None, routing=None, plugin_type='module'):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name = os.path.splitext(self.basename)[0]
self.plugin_type = plugin_type
self.analyze_arg_spec = analyze_arg_spec and plugin_type == 'module'
self._Version: type[LooseVersion | SemanticVersion] = LooseVersion
self._StrictVersion: type[StrictVersion | SemanticVersion] = StrictVersion
self.collection = collection
self.collection_name = 'ansible.builtin'
if self.collection:
self._Version = SemanticVersion
self._StrictVersion = SemanticVersion
collection_namespace_path, collection_name = os.path.split(self.collection)
self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
self.routing = routing
self.collection_version = None
if collection_version is not None:
self.collection_version_str = collection_version
self.collection_version = SemanticVersion(collection_version)
self.git_cache = git_cache
self.base_module = self.git_cache.get_original_path(self.path)
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
def _create_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return LooseVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._Version(v)
def _create_strict_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return StrictVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._StrictVersion(v)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _get_collection_meta(self):
"""Implement if we need this for version_added comparisons
"""
pass
def _python_module(self):
if self.path.endswith('.py'):
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _sidecar_doc(self):
if self.path.endswith('.yml') or self.path.endswith('.yaml'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allow string constant expressions (these are docstrings)
if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str):
continue
# allow __future__ imports (the specific allowed imports are checked by other sanity tests)
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
continue
return False
return True
except AttributeError:
return False
def _is_new_module(self) -> bool | None:
"""Return True if the content is new, False if it is not and None if the information is not available."""
return self.git_cache.is_new(self.path)
def _check_interpreter(self, powershell=False):
if self._powershell_module():
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code='missing-powershell-interpreter',
msg='Interpreter line is not "#!powershell"'
)
return
if self._python_module():
missing_python_interpreter = False
if not self.text.startswith('#!/usr/bin/python'):
if NEW_STYLE_PYTHON_MODULE_RE.search(to_bytes(self.text)):
missing_python_interpreter = self.text.startswith('#!') # shebang optional, but if present must match
else:
missing_python_interpreter = True # shebang required
if missing_python_interpreter:
self.reporter.error(
path=self.object_path,
code='missing-python-interpreter',
msg='Interpreter line is not "#!/usr/bin/python"',
)
def _check_for_sys_exit(self):
# Optimize out the happy path
if 'sys.exit' not in self.text:
return
for line_no, line in enumerate(self.text.splitlines()):
sys_exit_usage = SYS_EXIT_REGEX.match(line)
if sys_exit_usage:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='use-fail-json-not-sys-exit',
msg='sys.exit() call found. Should be exit_json/fail_json',
line=line_no + 1
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code='missing-gplv3-license',
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code='use-short-gplv3-license',
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible-core/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_subprocess(self):
for child in self.ast.body:
if isinstance(child, ast.Import):
if child.names[0].name == 'subprocess':
for line_no, line in enumerate(self.text.splitlines()):
sp_match = SUBPROCESS_REGEX.search(line)
if sp_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-popen',
msg=('subprocess.Popen call found. Should be module.run_command'),
line=(line_no + 1),
column=(sp_match.span()[0] + 1)
)
def _check_for_os_call(self):
if 'os.call' in self.text:
for line_no, line in enumerate(self.text.splitlines()):
os_call_match = OS_CALL_REGEX.search(line)
if os_call_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-os-call',
msg=('os.call() call found. Should be module.run_command'),
line=(line_no + 1),
column=(os_call_match.span()[0] + 1)
)
def _find_rejectlist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for rejectlist_import, options in REJECTLIST_IMPORTS.items():
if re.search(rejectlist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
'module-utils-specific-import',
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not found_basic:
self.reporter.warning(
path=self.object_path,
code='missing-module-utils-basic-import',
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos) if linenos else None
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if not isinstance(target, ast.Name):
continue
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code='try-except-missing-has',
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
doc_line_numbers = [lineno for lineno in (doc_info[key]['lineno'] for key in doc_info) if lineno > 0]
min_doc_line = min(doc_line_numbers) if doc_line_numbers else None
max_doc_line = max(doc_info[key]['end_lineno'] for key in doc_info)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
# allow __future__ imports (the specific allowed imports are checked by other sanity tests)
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
continue
import_lines.append(child.lineno)
if min_doc_line and child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if min_doc_line and grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
'import-placement',
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
legacy_ps_requires = r'(?im)^#\s*Requires\s+\-Module(?:s?)\s+(Ansible\.ModuleUtils\..+)'
ps_requires = r"""(?imx)
^\#\s*AnsibleRequires\s+-PowerShell\s+
(
# Builtin PowerShell module
(Ansible\.ModuleUtils\.[\w\.]+)
|
# Fully qualified collection PowerShell module
(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)
|
# Relative collection PowerShell module
(\.[\w\.]+)
)
(\s+-Optional)?"""
csharp_requires = r"""(?imx)
^\#\s*AnsibleRequires\s+-CSharpUtil\s+
(
# Builtin C# util
(Ansible\.[\w\.]+)
|
# Fully qualified collection C# util
(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)
|
# Relative collection C# util
(\.[\w\.]+)
)
(\s+-Optional)?"""
found_requires = False
for pattern, required_type in [(legacy_ps_requires, "Requires"), (ps_requires, "AnsibleRequires")]:
for req_stmt in re.finditer(pattern, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-utils-per-requires',
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code='invalid-requires-extension',
msg='Module #%s should not end in .psm1: "%s"' % (required_type, module_name)
)
for req_stmt in re.finditer(csharp_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-csharp-utils-per-requires',
msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.cs'):
self.reporter.error(
path=self.object_path,
code='illegal-extension-cs',
msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code='missing-module-utils-import-csharp-requirements',
msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
)
def _find_ps_docs_file(self):
sidecar = self._find_sidecar_docs()
if sidecar:
return sidecar
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided'
)
return py_path
def _find_sidecar_docs(self):
base_path = os.path.splitext(self.path)[0]
for ext in ('.yml', '.yaml'):
doc_path = f"{base_path}{ext}"
if os.path.isfile(doc_path):
return doc_path
def _get_py_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if not isinstance(grandchild, ast.Name):
continue
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.value
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.value
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.value
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.value.splitlines())
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
local_error_code = getattr(error, 'ansible_error_code', error_code)
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
if path:
combined_path = '%s.%s' % (name, '.'.join(path))
else:
combined_path = name
self.reporter.error(
path=self.object_path,
code=local_error_code,
msg='%s: %s' % (combined_path, error_message)
)
def _validate_option_docs(self, options, context=None):
if not isinstance(options, dict):
return
if context is None:
context = []
normalized_option_alias_names = dict()
def add_option_alias_name(name, option_name):
normalized_name = str(name).lower()
normalized_option_alias_names.setdefault(normalized_name, {}).setdefault(option_name, set()).add(name)
for option, data in options.items():
if 'suboptions' in data:
self._validate_option_docs(data.get('suboptions'), context + [option])
add_option_alias_name(option, option)
if 'aliases' in data and isinstance(data['aliases'], list):
for alias in data['aliases']:
add_option_alias_name(alias, option)
for normalized_name, options in normalized_option_alias_names.items():
if len(options) < 2:
continue
what = []
for option_name, names in sorted(options.items()):
if option_name in names:
what.append("option '%s'" % option_name)
else:
what.append("alias '%s' of option '%s'" % (sorted(names)[0], option_name))
msg = "Multiple options/aliases"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " are equal up to casing: %s" % ", ".join(what)
self.reporter.error(
path=self.object_path,
code='option-equal-up-to-casing',
msg=msg,
)
def _validate_return_docs(self, returns: object, context: list[str] | None = None) -> None:
if not isinstance(returns, dict):
return
if context is None:
context = []
for rv, data in returns.items():
if isinstance(data, dict) and "contains" in data:
self._validate_return_docs(data["contains"], context + [rv])
if str(rv) in FORBIDDEN_DICTIONARY_KEYS or not str(rv).isidentifier():
msg = f"Return value key {rv!r}"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " should not be used for return values since it cannot be accessed with dot notation in Jinja"
self.reporter.error(
path=self.object_path,
code='bad-return-value-key',
msg=msg,
)
def _validate_docs(self):
doc = None
# We have three ways of marking deprecated/removed files. Have to check each one
# individually and then make sure they all agree
filename_deprecated_or_removed = False
deprecated = False
doc_deprecated = None # doc legally might not exist
routing_says_deprecated = False
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
filename_deprecated_or_removed = True
# We are testing a collection
if self.routing:
routing_deprecation = self.routing.get('plugin_routing', {})
routing_deprecation = routing_deprecation.get('modules' if self.plugin_type == 'module' else self.plugin_type, {})
routing_deprecation = routing_deprecation.get(self.name, {}).get('deprecation', {})
if routing_deprecation:
# meta/runtime.yml says this is deprecated
routing_says_deprecated = True
deprecated = True
if self._python_module():
doc_info = self._get_py_docs()
else:
doc_info = None
sidecar_text = None
if self._sidecar_doc():
sidecar_text = self.text
elif sidecar_path := self._find_sidecar_docs():
with open(sidecar_path, mode='r', encoding='utf-8') as fd:
sidecar_text = fd.read()
if sidecar_text:
sidecar_doc, errors, traces = parse_yaml(sidecar_text, 0, self.name, 'DOCUMENTATION')
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
doc = sidecar_doc.get('DOCUMENTATION', None)
examples_raw = sidecar_doc.get('EXAMPLES', None)
examples_lineno = 1
returns = sidecar_doc.get('RETURN', None)
elif doc_info:
if bool(doc_info['DOCUMENTATION']['value']):
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
examples_raw = doc_info['EXAMPLES']['value']
examples_lineno = doc_info['EXAMPLES']['lineno']
returns = None
if bool(doc_info['RETURN']['value']):
returns, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
for error in errors:
self.reporter.error(
path=self.object_path,
code='return-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if doc:
add_collection_to_versions_and_dates(doc, self.collection_name,
is_module=self.plugin_type == 'module')
with CaptureStd():
try:
get_docstring(os.path.abspath(self.path), fragment_loader=fragment_loader,
verbose=True,
collection_name=self.collection_name,
plugin_type=self.plugin_type)
except AnsibleFragmentError:
# Will be re-triggered below when explicitly calling add_fragments()
pass
except Exception as e:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code='documentation-error',
msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
)
try:
add_fragments(doc, os.path.abspath(self.object_path), fragment_loader=fragment_loader,
is_module=self.plugin_type == 'module', section='DOCUMENTATION')
except AnsibleFragmentError as exc:
error = str(exc).replace(os.path.abspath(self.object_path), self.object_path)
self.reporter.error(
path=self.object_path,
code='doc-fragment-error',
msg=f'Error while adding fragments: {error}'
)
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-options',
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if 'deprecated' in doc and doc.get('deprecated'):
doc_deprecated = True
doc_deprecation = doc['deprecated']
documentation_collection = doc_deprecation.get('removed_from_collection')
if documentation_collection != self.collection_name:
self.reporter.error(
path=self.object_path,
code='deprecation-wrong-collection',
msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
documentation_collection, self.collection_name)
)
else:
doc_deprecated = False
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(
doc,
doc_schema(
os.readlink(self.object_path).split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
plugin_type=self.plugin_type,
),
'DOCUMENTATION',
'invalid-documentation',
)
else:
# This is the normal case
self._validate_docs_schema(
doc,
doc_schema(
self.object_name.split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
plugin_type=self.plugin_type,
),
'DOCUMENTATION',
'invalid-documentation',
)
if doc:
self._validate_option_docs(doc.get('options'))
self._validate_all_semantic_markup(doc, returns)
if not self.collection:
existing_doc = self._check_for_new_args(doc)
self._check_version_added(doc, existing_doc)
else:
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided',
)
if not examples_raw and self.plugin_type in PLUGINS_WITH_EXAMPLES:
if self.plugin_type in PLUGINS_WITH_EXAMPLES:
self.reporter.error(
path=self.object_path,
code='missing-examples',
msg='No EXAMPLES provided'
)
elif self.plugin_type in PLUGINS_WITH_YAML_EXAMPLES:
dummy, errors, traces = parse_yaml(examples_raw,
examples_lineno,
self.name, 'EXAMPLES',
load_all=True,
ansible_loader=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='invalid-examples',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if returns:
if returns:
add_collection_to_versions_and_dates(
returns,
self.collection_name,
is_module=self.plugin_type == 'module',
return_docs=True)
try:
add_fragments(returns, os.path.abspath(self.object_path), fragment_loader=fragment_loader,
is_module=self.plugin_type == 'module', section='RETURN')
except AnsibleFragmentError as exc:
error = str(exc).replace(os.path.abspath(self.object_path), self.object_path)
self.reporter.error(
path=self.object_path,
code='return-fragment-error',
msg=f'Error while adding fragments: {error}'
)
self._validate_docs_schema(
returns,
return_schema(for_collection=bool(self.collection), plugin_type=self.plugin_type),
'RETURN', 'return-syntax-error')
self._validate_return_docs(returns)
elif self.plugin_type in PLUGINS_WITH_RETURN_VALUES:
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code='missing-return',
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code='missing-return-legacy',
msg='No RETURN provided'
)
# Check for mismatched deprecation
if not self.collection:
mismatched_deprecation = True
if not (filename_deprecated_or_removed or deprecated or doc_deprecated):
mismatched_deprecation = False
else:
if (filename_deprecated_or_removed and doc_deprecated):
mismatched_deprecation = False
if (filename_deprecated_or_removed and not doc):
mismatched_deprecation = False
if mismatched_deprecation:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='Module deprecation/removed must agree in documentation, by prepending filename with'
' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
' documentation for removed'
)
else:
if not (doc_deprecated == routing_says_deprecated):
# DOCUMENTATION.deprecated and meta/runtime.yml disagree
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
)
elif routing_says_deprecated:
# Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
# Make sure they give the same version or date.
routing_date = routing_deprecation.get('removal_date')
routing_version = routing_deprecation.get('removal_version')
# The versions and dates in the module documentation are auto-tagged, so remove the tag
# to make comparison possible and to avoid confusing the user.
documentation_date = doc_deprecation.get('removed_at_date')
documentation_version = doc_deprecation.get('removed_in')
if not compare_dates(routing_date, documentation_date):
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
routing_date, documentation_date)
)
if routing_version != documentation_version:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
routing_version, documentation_version)
)
# In the future we should error if ANSIBLE_METADATA exists in a collection
return doc_info, doc
def _check_sem_option(self, part: dom.OptionNamePart, current_plugin: dom.PluginIdentifier) -> None:
if part.plugin is None or part.plugin != current_plugin:
return
if part.entrypoint is not None:
return
if tuple(part.link) not in self._all_options:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-markup',
msg='Directive "%s" contains a non-existing option "%s"' % (part.source, part.name)
)
def _check_sem_return_value(self, part: dom.ReturnValuePart, current_plugin: dom.PluginIdentifier) -> None:
if part.plugin is None or part.plugin != current_plugin:
return
if part.entrypoint is not None:
return
if tuple(part.link) not in self._all_return_values:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-markup',
msg='Directive "%s" contains a non-existing return value "%s"' % (part.source, part.name)
)
def _validate_semantic_markup(self, object) -> None:
# Make sure we operate on strings
if is_iterable(object):
for entry in object:
self._validate_semantic_markup(entry)
return
if not isinstance(object, str):
return
if self.collection:
fqcn = f'{self.collection_name}.{self.name}'
else:
fqcn = f'ansible.builtin.{self.name}'
current_plugin = dom.PluginIdentifier(fqcn=fqcn, type=self.plugin_type)
for par in parse(object, Context(current_plugin=current_plugin), errors='message', add_source=True):
for part in par:
# Errors are already covered during schema validation, we only check for option and
# return value references
if part.type == dom.PartType.OPTION_NAME:
self._check_sem_option(part, current_plugin)
if part.type == dom.PartType.RETURN_VALUE:
self._check_sem_return_value(part, current_plugin)
def _validate_semantic_markup_collect(self, destination, sub_key, data, all_paths):
if not isinstance(data, dict):
return
for key, value in data.items():
if not isinstance(value, dict):
continue
keys = {key}
if is_iterable(value.get('aliases')):
keys.update(value['aliases'])
new_paths = [path + [key] for path in all_paths for key in keys]
destination.update([tuple(path) for path in new_paths])
self._validate_semantic_markup_collect(destination, sub_key, value.get(sub_key), new_paths)
def _validate_semantic_markup_options(self, options):
if not isinstance(options, dict):
return
for key, value in options.items():
if isinstance(value, dict):
self._validate_semantic_markup(value.get('description'))
self._validate_semantic_markup_options(value.get('suboptions'))
def _validate_semantic_markup_return_values(self, return_vars):
if not isinstance(return_vars, dict):
return
for key, value in return_vars.items():
if isinstance(value, dict):
self._validate_semantic_markup(value.get('description'))
self._validate_semantic_markup(value.get('returned'))
self._validate_semantic_markup_return_values(value.get('contains'))
def _validate_all_semantic_markup(self, docs, return_docs):
if not isinstance(docs, dict):
docs = {}
if not isinstance(return_docs, dict):
return_docs = {}
self._all_options = set()
self._all_return_values = set()
self._validate_semantic_markup_collect(self._all_options, 'suboptions', docs.get('options'), [[]])
self._validate_semantic_markup_collect(self._all_return_values, 'contains', return_docs, [[]])
for string_keys in ('short_description', 'description', 'notes', 'requirements', 'todo'):
self._validate_semantic_markup(docs.get(string_keys))
if is_iterable(docs.get('seealso')):
for entry in docs.get('seealso'):
if isinstance(entry, dict):
self._validate_semantic_markup(entry.get('description'))
if isinstance(docs.get('attributes'), dict):
for entry in docs.get('attributes').values():
if isinstance(entry, dict):
for key in ('description', 'details'):
self._validate_semantic_markup(entry.get(key))
if isinstance(docs.get('deprecated'), dict):
for key in ('why', 'alternative', 'alternatives'):
self._validate_semantic_markup(docs.get('deprecated').get(key))
self._validate_semantic_markup_options(docs.get('options'))
self._validate_semantic_markup_return_values(return_docs)
def _check_version_added(self, doc, existing_doc):
version_added_raw = doc.get('version_added')
try:
collection_name = doc.get('version_added_collection')
version_added = self._create_strict_version(
str(version_added_raw or '0.0'),
collection_name=collection_name)
except ValueError as e:
version_added = version_added_raw or '0.0'
if self._is_new_module() or version_added != 'historical':
# already reported during schema validation, except:
if version_added == 'historical':
self.reporter.error(
path=self.object_path,
code='module-invalid-version-added',
msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
)
return
if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
)
if not self._is_new_module():
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
)
def _validate_ansible_module_call(self, docs):
try:
if self._python_module():
spec, kwargs = get_py_argument_spec(self.path, self.collection)
elif self._powershell_module():
spec, kwargs = get_ps_argument_spec(self.path, self.collection)
else:
raise NotImplementedError()
except AnsibleModuleNotInitialized:
self.reporter.error(
path=self.object_path,
code='ansible-module-not-initialized',
msg="Execution of the module did not result in initialization of AnsibleModule",
)
return
except AnsibleModuleImportError as e:
self.reporter.error(
path=self.object_path,
code='import-error',
msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
schema = ansible_module_kwargs_schema(self.object_name.split('.')[0], for_collection=bool(self.collection))
self._validate_docs_schema(kwargs, schema, 'AnsibleModule', 'invalid-ansiblemodule-schema')
self._validate_argument_spec(docs, spec, kwargs)
if isinstance(docs, Mapping) and isinstance(docs.get('attributes'), Mapping):
if isinstance(docs['attributes'].get('check_mode'), Mapping):
support_value = docs['attributes']['check_mode'].get('support')
if not kwargs.get('supports_check_mode', False):
if support_value != 'none':
self.reporter.error(
path=self.object_path,
code='attributes-check-mode',
msg="The module does not declare support for check mode, but the check_mode attribute's"
" support value is '%s' and not 'none'" % support_value
)
else:
if support_value not in ('full', 'partial', 'N/A'):
self.reporter.error(
path=self.object_path,
code='attributes-check-mode',
msg="The module does declare support for check mode, but the check_mode attribute's support value is '%s'" % support_value
)
if support_value in ('partial', 'N/A') and docs['attributes']['check_mode'].get('details') in (None, '', []):
self.reporter.error(
path=self.object_path,
code='attributes-check-mode-details',
msg="The module declares it does not fully support check mode, but has no details on what exactly that means"
)
def _validate_list_of_module_args(self, name, terms, spec, context):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)):
# This is already reported by schema checking
continue
bad_term = False
for term in check:
if not isinstance(term, str):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must contain strings in the lists or tuples; found value %r" % (term, )
self.reporter.error(
path=self.object_path,
code=name + '-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(check)) != len(check):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code=name + '-collision',
msg=msg,
)
if not set(check) <= set(spec):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code=name + '-unknown',
msg=msg,
)
def _validate_required_if(self, terms, spec, context, module):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
# This is already reported by schema checking
continue
if len(check) == 4 and not isinstance(check[3], bool):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
self.reporter.error(
path=self.object_path,
code='required_if-is_one_of-type',
msg=msg,
)
requirements = check[2]
if not isinstance(requirements, (list, tuple)):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
continue
bad_term = False
for term in requirements:
if not isinstance(term, str):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have only strings in third value (requirements); got %r" % (term, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(requirements)) != len(requirements):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms in requirements"
self.reporter.error(
path=self.object_path,
code='required_if-requirements-collision',
msg=msg,
)
if not set(requirements) <= set(spec):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_if-requirements-unknown',
msg=msg,
)
key = check[0]
if key not in spec:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have its key %s in argument_spec" % key
self.reporter.error(
path=self.object_path,
code='required_if-unknown-key',
msg=msg,
)
continue
if key in requirements:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains its key %s in requirements" % key
self.reporter.error(
path=self.object_path,
code='required_if-key-in-requirements',
msg=msg,
)
value = check[1]
if value is not None:
_type = spec[key].get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
try:
with CaptureStd():
dummy = _type_checker(value)
except (Exception, SystemExit):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
self.reporter.error(
path=self.object_path,
code='required_if-value-type',
msg=msg,
)
def _validate_required_by(self, terms, spec, context):
if terms is None:
return
if not isinstance(terms, Mapping):
# This is already reported by schema checking
return
for key, value in terms.items():
if isinstance(value, str):
value = [value]
if not isinstance(value, (list, tuple)):
# This is already reported by schema checking
continue
for term in value:
if not isinstance(term, str):
# This is already reported by schema checking
continue
if len(set(value)) != len(value) or key in value:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code='required_by-collision',
msg=msg,
)
if not set(value) <= set(spec) or key not in spec:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_by-unknown',
msg=msg,
)
def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
if not self.analyze_arg_spec:
return
if docs is None:
docs = {}
if context is None:
context = []
if last_context_spec is None:
last_context_spec = kwargs
try:
if not context:
add_fragments(docs, os.path.abspath(self.object_path), fragment_loader=fragment_loader,
is_module=self.plugin_type == 'module', section='DOCUMENTATION')
except Exception:
# Cannot merge fragments
return
# Use this to access type checkers later
module = NoArgsAnsibleModule({})
self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
self._validate_required_by(last_context_spec.get('required_by'), spec, context)
provider_args = set()
args_from_argspec = set()
deprecated_args_from_argspec = set()
doc_options = docs.get('options', {})
if doc_options is None:
doc_options = {}
for arg, data in spec.items():
restricted_argument_names = ('message', 'syslog_facility')
if arg.lower() in restricted_argument_names:
msg = "Argument '%s' in argument_spec " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if 'aliases' in data:
for al in data['aliases']:
if al.lower() in restricted_argument_names:
msg = "Argument alias '%s' in argument_spec " % al
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
# Could this a place where secrets are leaked?
# If it is type: path we know it's not a secret key as it's a file path.
# If it is type: bool it is more likely a flag indicating that something is secret, than an actual secret.
if all((
data.get('no_log') is None, is_potential_secret_option(arg),
data.get('type') not in ("path", "bool"), data.get('choices') is None,
)):
msg = "Argument '%s' in argument_spec could be a secret, though doesn't have `no_log` set" % arg
if context:
msg += " found in %s" % " -> ".join(context)
self.reporter.error(
path=self.object_path,
code='no-log-needed',
msg=msg,
)
if not isinstance(data, dict):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must be a dictionary/hash when used"
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec',
msg=msg,
)
continue
removed_at_date = data.get('removed_at_date', None)
if removed_at_date is not None:
try:
if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a removed_at_date '%s' before today" % removed_at_date
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when removed_at_date is not in ISO format. Since schema
# validation already reported this as an error, don't report it a second time.
pass
deprecated_aliases = data.get('deprecated_aliases', None)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'date' in deprecated_alias:
try:
date = deprecated_alias['date']
if parse_isodate(date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
deprecated_alias['name'], deprecated_alias['date'])
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when deprecated_alias['date'] is not in ISO format. Since
# schema validation already reported this as an error, don't report it a second
# time.
pass
has_version = False
if self.collection and self.collection_version is not None:
compare_version = self.collection_version
version_of_what = "this collection (%s)" % self.collection_version_str
code_prefix = 'collection'
has_version = True
elif not self.collection:
compare_version = LOOSE_ANSIBLE_VERSION
version_of_what = "Ansible (%s)" % ansible_version
code_prefix = 'ansible'
has_version = True
removed_in_version = data.get('removed_in_version', None)
if removed_in_version is not None:
try:
collection_name = data.get('removed_from_collection')
removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= removed_in:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a deprecated removed_in_version %r," % removed_in_version
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'version' in deprecated_alias:
try:
collection_name = deprecated_alias.get('collection_name')
version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= version:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal in version %r," % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
deprecated_alias['name'], deprecated_alias['version'], e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
aliases = data.get('aliases', [])
if arg in aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is specified as its own alias"
self.reporter.error(
path=self.object_path,
code='parameter-alias-self',
msg=msg
)
if len(aliases) > len(set(aliases)):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has at least one alias specified multiple times in aliases"
self.reporter.error(
path=self.object_path,
code='parameter-alias-repeated',
msg=msg
)
if not context and arg == 'state':
bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
for bad_state in bad_states:
self.reporter.error(
path=self.object_path,
code='parameter-state-invalid-choice',
msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
args_from_argspec.add(arg)
args_from_argspec.update(aliases)
else:
deprecated_args_from_argspec.add(arg)
deprecated_args_from_argspec.update(aliases)
if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec-options',
msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
)
elif data.get('options'):
# Record provider options from network modules, for later comparison
for provider_arg, provider_data in data.get('options', {}).items():
provider_args.add(provider_arg)
provider_args.update(provider_data.get('aliases', []))
if data.get('required') and data.get('default', object) != object:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is marked as required but specifies a default. Arguments with a" \
" default should not be marked as required"
self.reporter.error(
path=self.object_path,
code='no-default-for-required-parameter',
msg=msg
)
if arg in provider_args:
# Provider args are being removed from network module top level
# don't validate docs<->arg_spec checks below
continue
_type = data.get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
_elements = data.get('elements')
if (_type == 'list') and not _elements:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as list but elements is not defined"
self.reporter.error(
path=self.object_path,
code='parameter-list-no-elements',
msg=msg
)
if _elements:
if not callable(_elements):
DEFAULT_TYPE_VALIDATORS.get(_elements)
if _type != 'list':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
self.reporter.error(
path=self.object_path,
code='parameter-invalid-elements',
msg=msg
)
arg_default = None
if 'default' in data and data['default'] is not None:
try:
with CaptureStd():
arg_default = _type_checker(data['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
self.reporter.error(
path=self.object_path,
code='incompatible-default-type',
msg=msg
)
continue
doc_options_args = []
for alias in sorted(set([arg] + list(aliases))):
if alias in doc_options:
doc_options_args.append(alias)
if len(doc_options_args) == 0:
# Undocumented arguments will be handled later (search for undocumented-parameter)
doc_options_arg = {}
doc_option_name = None
else:
doc_option_name = doc_options_args[0]
doc_options_arg = doc_options[doc_option_name]
if len(doc_options_args) > 1:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " with aliases %s is documented multiple times, namely as %s" % (
", ".join([("'%s'" % alias) for alias in aliases]),
", ".join([("'%s'" % alias) for alias in doc_options_args])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-multiple-times',
msg=msg
)
all_aliases = set(aliases + [arg])
all_docs_aliases = set(
([doc_option_name] if doc_option_name is not None else [])
+
(doc_options_arg['aliases'] if isinstance(doc_options_arg.get('aliases'), list) else [])
)
if all_docs_aliases and all_aliases != all_docs_aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has names %s, but its documentation has names %s" % (
", ".join([("'%s'" % alias) for alias in sorted(all_aliases)]),
", ".join([("'%s'" % alias) for alias in sorted(all_docs_aliases)])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-aliases-differ',
msg=msg
)
try:
doc_default = None
if 'default' in doc_options_arg and doc_options_arg['default'] is not None:
with CaptureStd():
doc_default = _type_checker(doc_options_arg['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
self.reporter.error(
path=self.object_path,
code='doc-default-incompatible-type',
msg=msg
)
continue
if arg_default != doc_default:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
self.reporter.error(
path=self.object_path,
code='doc-default-does-not-match-spec',
msg=msg
)
doc_type = doc_options_arg.get('type')
if 'type' in data and data['type'] is not None:
if doc_type is None:
if not arg.startswith('_'): # hidden parameter, for example _raw_params
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation doesn't define type" % (data['type'])
self.reporter.error(
path=self.object_path,
code='parameter-type-not-in-doc',
msg=msg
)
elif data['type'] != doc_type:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
self.reporter.error(
path=self.object_path,
code='doc-type-does-not-match-spec',
msg=msg
)
else:
if doc_type is None:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " uses default type ('str') but documentation doesn't define type"
self.reporter.error(
path=self.object_path,
code='doc-missing-type',
msg=msg
)
elif doc_type != 'str':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " implies type as 'str' but documentation defines as %r" % doc_type
self.reporter.error(
path=self.object_path,
code='implied-parameter-type-mismatch',
msg=msg
)
doc_choices = []
try:
for choice in doc_options_arg.get('choices', []):
try:
with CaptureStd():
doc_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='doc-choices-incompatible-type',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
arg_choices = []
try:
for choice in data.get('choices', []):
try:
with CaptureStd():
arg_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='incompatible-choices',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
if not compare_unordered_lists(arg_choices, doc_choices):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
self.reporter.error(
path=self.object_path,
code='doc-choices-do-not-match-spec',
msg=msg
)
doc_required = doc_options_arg.get('required', False)
data_required = data.get('required', False)
if (doc_required or data_required) and not (doc_required and data_required):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if doc_required:
msg += " is not required, but is documented as being required"
else:
msg += " is required, but is not documented as being required"
self.reporter.error(
path=self.object_path,
code='doc-required-mismatch',
msg=msg
)
doc_elements = doc_options_arg.get('elements', None)
doc_type = doc_options_arg.get('type', 'str')
data_elements = data.get('elements', None)
if (doc_elements or data_elements) and not (doc_elements == data_elements):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if data_elements:
msg += " specifies elements as %s," % data_elements
else:
msg += " does not specify elements,"
if doc_elements:
msg += "but elements is documented as being %s" % doc_elements
else:
msg += "but elements is not documented"
self.reporter.error(
path=self.object_path,
code='doc-elements-mismatch',
msg=msg
)
spec_suboptions = data.get('options')
doc_suboptions = doc_options_arg.get('suboptions', {})
if spec_suboptions:
if not doc_suboptions:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has sub-options but documentation does not define it"
self.reporter.error(
path=self.object_path,
code='missing-suboption-docs',
msg=msg
)
self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
context=context + [arg], last_context_spec=data)
for arg in args_from_argspec:
if not str(arg).isidentifier():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is not a valid python identifier"
self.reporter.error(
path=self.object_path,
code='parameter-invalid',
msg=msg
)
if docs:
args_from_docs = set()
for arg, data in doc_options.items():
args_from_docs.add(arg)
args_from_docs.update(data.get('aliases', []))
args_missing_from_docs = args_from_argspec.difference(args_from_docs)
docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
for arg in args_missing_from_docs:
if arg in provider_args:
# Provider args are being removed from network module top level
# So they are likely not documented on purpose
continue
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in the argument_spec, but not documented in the module documentation"
self.reporter.error(
path=self.object_path,
code='undocumented-parameter',
msg=msg
)
for arg in docs_missing_from_args:
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
self.reporter.error(
path=self.object_path,
code='nonexistent-parameter-documented',
msg=msg
)
def _check_for_new_args(self, doc):
if not self.base_module:
return
with CaptureStd():
try:
existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
os.path.abspath(self.base_module), fragment_loader, verbose=True, collection_name=self.collection_name,
is_module=self.plugin_type == 'module')
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code='missing-existing-doc-fragment',
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code='unknown-doc-fragment',
msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
)
return
try:
mod_collection_name = existing_doc.get('version_added_collection')
mod_version_added = self._create_strict_version(
str(existing_doc.get('version_added', '0.0')),
collection_name=mod_collection_name)
except ValueError:
mod_collection_name = self.collection_name
mod_version_added = self._create_strict_version('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
# The option already existed. Make sure version_added didn't change.
for name in names:
existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
existing_version = existing_options.get(name, {}).get('version_added')
if existing_version:
break
current_collection_name = details.get('version_added_collection')
current_version = details.get('version_added')
if current_collection_name != existing_collection_name:
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added-collection',
msg=('version_added for existing option (%s) should '
'belong to collection %r. Currently belongs to %r' %
(option, current_collection_name, existing_collection_name))
)
elif str(current_version) != str(existing_version):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for existing option (%s) should '
'be %r. Currently %r' %
(option, existing_version, current_version))
)
continue
try:
collection_name = details.get('version_added_collection')
version_added = self._create_strict_version(
str(details.get('version_added', '0.0')),
collection_name=collection_name)
except ValueError as e:
# already reported during schema validation
continue
builtin = self.collection_name == 'ansible.builtin' and collection_name in ('ansible.builtin', None)
if not builtin and collection_name != self.collection_name:
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for new option (%s) should '
'be %r. Currently %r' %
(option, should_be, version_added))
)
return existing_doc
@staticmethod
def is_on_rejectlist(path):
base_name = os.path.basename(path)
file_name = os.path.splitext(base_name)[0]
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST):
return True
for pat in ModuleValidator.REJECTLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module() and not self._sidecar_doc():
self.reporter.error(
path=self.object_path,
code='invalid-extension',
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code='python-syntax-error',
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
end_of_deprecation_should_be_removed_only = False
doc_info = None
if self._python_module() or self._sidecar_doc():
doc_info, docs = self._validate_docs()
# See if current version => deprecated.removed_in, ie, should be docs only
if docs and docs.get('deprecated', False):
if 'removed_in' in docs['deprecated']:
removed_in = None
collection_name = docs['deprecated'].get('removed_from_collection')
version = docs['deprecated']['removed_in']
if collection_name != self.collection_name:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-source',
msg=('The deprecation version for a module must be added in this collection')
)
else:
try:
removed_in = self._create_strict_version(str(version), collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-version',
msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
)
if removed_in:
if not self.collection:
strict_ansible_version = self._create_strict_version(
'.'.join(ansible_version.split('.')[:2]), self.collection_name)
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='ansible-deprecated-module',
msg='Module is marked for removal in version %s of Ansible when the current version is %s' % (
version, ansible_version),
)
elif self.collection_version:
strict_ansible_version = self.collection_version
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='collection-deprecated-module',
msg='Module is marked for removal in version %s of this collection when the current version is %s' % (
version, self.collection_version_str),
)
# handle deprecation by date
if 'removed_at_date' in docs['deprecated']:
try:
removed_at_date = docs['deprecated']['removed_at_date']
if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
except ValueError:
# This happens if the date cannot be parsed. This is already checked by the schema.
pass
if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
if self.plugin_type == 'module':
self._validate_ansible_module_call(docs)
self._check_for_sys_exit()
self._find_rejectlist_imports()
if self.plugin_type == 'module':
self._find_module_utils()
self._find_has_import()
if doc_info:
first_callable = self._get_first_callable() or 1000000 # use a bogus "high" line number if no callable exists
self._ensure_imports_below_docs(doc_info, first_callable)
if self.plugin_type == 'module':
self._check_for_subprocess()
self._check_for_os_call()
if self._powershell_module():
self._validate_ps_replacers()
docs_path = self._find_ps_docs_file()
# We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST:
with ModuleValidator(docs_path, git_cache=self.git_cache) as docs_mv:
docs = docs_mv._validate_docs()[1]
self._validate_ansible_module_call(docs)
self._check_gpl3_header()
if not self._just_docs() and not self._sidecar_doc() and not end_of_deprecation_should_be_removed_only:
if self.plugin_type == 'module':
self._check_interpreter()
| ModuleValidator |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 3308,
"end": 4125
} | class ____(util.MdCase):
"""Test Highlight's stripping of new lines."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {'pymdownx.highlight': {'stripnl': False}}
def test_no_stripnl(self):
"""Test no stripping of leading and trailing new lines."""
self.check_markdown(
r'''
```py
import foo
import bar
```
''',
r'''
<div class="highlight"><pre><span></span><code>
<span class="kn">import</span><span class="w"> </span><span class="nn">foo</span>
<span class="kn">import</span><span class="w"> </span><span class="nn">bar</span>
</code></pre></div>
''',
True
)
| TestHighlightStrip |
python | eventlet__eventlet | tests/dagpool_test.py | {
"start": 19429,
"end": 23091
} | class ____(Exception):
pass
def raiser(key, results, exc):
raise exc
def consumer(key, results):
for k, v in results:
pass
return True
def test_waitall_exc():
pool = DAGPool()
pool.spawn("a", (), raiser, BogusError("bogus"))
try:
pool.waitall()
except PropagateError as err:
assert_equal(err.key, "a")
assert isinstance(err.exc, BogusError), \
"exc attribute is {}, not BogusError".format(err.exc)
assert_equal(str(err.exc), "bogus")
msg = str(err)
assert_in("PropagateError(a)", msg)
assert_in("BogusError", msg)
assert_in("bogus", msg)
def test_propagate_exc():
pool = DAGPool()
pool.spawn("a", (), raiser, BogusError("bogus"))
pool.spawn("b", "a", consumer)
pool.spawn("c", "b", consumer)
try:
pool["c"]
except PropagateError as errc:
assert_equal(errc.key, "c")
errb = errc.exc
assert_equal(errb.key, "b")
erra = errb.exc
assert_equal(erra.key, "a")
assert isinstance(erra.exc, BogusError), \
"exc attribute is {}, not BogusError".format(erra.exc)
assert_equal(str(erra.exc), "bogus")
msg = str(errc)
assert_in("PropagateError(a)", msg)
assert_in("PropagateError(b)", msg)
assert_in("PropagateError(c)", msg)
assert_in("BogusError", msg)
assert_in("bogus", msg)
def test_wait_each_exc():
pool = DAGPool()
pool.spawn("a", (), raiser, BogusError("bogus"))
with assert_raises(PropagateError):
for k, v in pool.wait_each("a"):
pass
with assert_raises(PropagateError):
for k, v in pool.wait_each():
pass
def test_post_get_exc():
pool = DAGPool()
bogua = BogusError("bogua")
pool.post("a", bogua)
assert isinstance(pool.get("a"), BogusError), \
"should have delivered BogusError instead of raising"
bogub = PropagateError("b", BogusError("bogub"))
pool.post("b", bogub)
with assert_raises(PropagateError):
pool.get("b")
# Notice that although we have both "a" and "b" keys, items() is
# guaranteed to raise PropagateError because one of them is
# PropagateError. Other values don't matter.
with assert_raises(PropagateError):
pool.items()
# Similar remarks about waitall() and wait().
with assert_raises(PropagateError):
pool.waitall()
with assert_raises(PropagateError):
pool.wait()
with assert_raises(PropagateError):
pool.wait("b")
with assert_raises(PropagateError):
pool.wait("ab")
# but if we're only wait()ing for success results, no exception
assert isinstance(pool.wait("a")["a"], BogusError), \
"should have delivered BogusError instead of raising"
# wait_each() is guaranteed to eventually raise PropagateError, though you
# may obtain valid values before you hit it.
with assert_raises(PropagateError):
for k, v in pool.wait_each():
pass
# wait_each_success() filters
assert_equal(dict(pool.wait_each_success()), dict(a=bogua))
assert_equal(dict(pool.wait_each_success("ab")), dict(a=bogua))
assert_equal(dict(pool.wait_each_success("a")), dict(a=bogua))
assert_equal(dict(pool.wait_each_success("b")), {})
# wait_each_exception() filters the other way
assert_equal(dict(pool.wait_each_exception()), dict(b=bogub))
assert_equal(dict(pool.wait_each_exception("ab")), dict(b=bogub))
assert_equal(dict(pool.wait_each_exception("a")), {})
assert_equal(dict(pool.wait_each_exception("b")), dict(b=bogub))
| BogusError |
python | neetcode-gh__leetcode | python/0160-intersection-of-two-linked-lists.py | {
"start": 136,
"end": 413
} | class ____:
def getIntersectionNode(
self, headA: ListNode, headB: ListNode
) -> Optional[ListNode]:
l1, l2 = headA, headB
while l1 != l2:
l1 = l1.next if l1 else headB
l2 = l2.next if l2 else headA
return l1
| Solution |
python | pydantic__pydantic | tests/typechecking/computed_field.py | {
"start": 88,
"end": 799
} | class ____(BaseModel):
side: float
# mypy limitation, see:
# https://mypy.readthedocs.io/en/stable/error_code_list.html#decorator-preceding-property-not-supported-prop-decorator
@computed_field # type: ignore[prop-decorator]
@property
def area(self) -> float:
return self.side**2
@computed_field # type: ignore[prop-decorator]
@cached_property
def area_cached(self) -> float:
return self.side**2
sq = Square(side=10)
y = 12.4 + sq.area
z = 'x' + sq.area # type: ignore[operator] # pyright: ignore[reportOperatorIssue]
y_cached = 12.4 + sq.area_cached
z_cached = 'x' + sq.area_cached # type: ignore[operator] # pyright: ignore[reportOperatorIssue]
| Square |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 17388,
"end": 17455
} | class ____(AtomicRule, ABC):
a: Expr
b: Expr
@dataclass
| IRule |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 16363,
"end": 17348
} | class ____:
fname = "__test__.csv"
number = 5
def setup(self):
lines = []
line_length = 128
start_char = " "
end_char = "\U00010080"
# This for loop creates a list of 128-char strings
# consisting of consecutive Unicode chars
for lnum in range(ord(start_char), ord(end_char), line_length):
line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
try:
line.encode("utf-8")
except UnicodeEncodeError:
# Some 16-bit words are not valid Unicode chars and must be skipped
continue
lines.append(line)
df = DataFrame(lines)
df = concat([df for n in range(100)], ignore_index=True)
df.to_csv(self.fname, index=False, header=False, encoding="utf-8")
def time_read_memmapped_utf8(self):
read_csv(self.fname, header=None, memory_map=True, encoding="utf-8", engine="c")
| ReadCSVMemMapUTF8 |
python | sqlalchemy__sqlalchemy | examples/performance/__init__.py | {
"start": 7702,
"end": 13076
} | class ____:
tests = []
_setup = None
_setup_once = None
name = None
num = 0
def __init__(self, options):
self.test = options.test
self.dburl = options.dburl
self.profile = options.profile
self.dump = options.dump
self.raw = options.raw
self.callers = options.callers
self.num = options.num
self.echo = options.echo
self.sort = options.sort
self.gc = options.gc
self.stats = []
@classmethod
def init(cls, name, num):
cls.name = name
cls.num = num
@classmethod
def profile(cls, fn):
if cls.name is None:
raise ValueError(
"Need to call Profile.init(<suitename>, <default_num>) first."
)
cls.tests.append(fn)
return fn
@classmethod
def setup(cls, fn):
if cls._setup is not None:
raise ValueError("setup function already set to %s" % cls._setup)
cls._setup = staticmethod(fn)
return fn
@classmethod
def setup_once(cls, fn):
if cls._setup_once is not None:
raise ValueError(
"setup_once function already set to %s" % cls._setup_once
)
cls._setup_once = staticmethod(fn)
return fn
def run(self):
if self.test:
tests = [fn for fn in self.tests if fn.__name__ in self.test]
if not tests:
raise ValueError("No such test(s): %s" % self.test)
else:
tests = self.tests
if self._setup_once:
print("Running setup once...")
self._setup_once(self.dburl, self.echo, self.num)
print("Tests to run: %s" % ", ".join([t.__name__ for t in tests]))
for test in tests:
self._run_test(test)
self.stats[-1].report()
def _run_with_profile(self, fn, sort):
pr = cProfile.Profile()
pr.enable()
try:
result = fn(self.num)
finally:
pr.disable()
stats = pstats.Stats(pr)
self.stats.append(TestResult(self, fn, stats=stats, sort=sort))
return result
def _run_with_time(self, fn):
now = time.time()
try:
return fn(self.num)
finally:
total = time.time() - now
self.stats.append(TestResult(self, fn, total_time=total))
def _run_test(self, fn):
if self._setup:
self._setup(self.dburl, self.echo, self.num)
if self.gc:
# gc.set_debug(gc.DEBUG_COLLECTABLE)
gc.set_debug(gc.DEBUG_STATS)
if self.profile or self.dump:
self._run_with_profile(fn, self.sort)
else:
self._run_with_time(fn)
if self.gc:
gc.set_debug(0)
@classmethod
def main(cls):
parser = argparse.ArgumentParser("python -m examples.performance")
if cls.name is None:
parser.add_argument(
"name", choices=cls._suite_names(), help="suite to run"
)
if len(sys.argv) > 1:
potential_name = sys.argv[1]
try:
__import__(__name__ + "." + potential_name)
except ImportError:
pass
parser.add_argument(
"--test", nargs="+", type=str, help="run specific test(s)"
)
parser.add_argument(
"--dburl",
type=str,
default="sqlite:///profile.db",
help="database URL, default sqlite:///profile.db",
)
parser.add_argument(
"--num",
type=int,
default=cls.num,
help="Number of iterations/items/etc for tests; "
"default is %d module-specific" % cls.num,
)
parser.add_argument(
"--profile",
action="store_true",
help="run profiling and dump call counts",
)
parser.add_argument(
"--sort",
type=str,
default="cumulative",
help="profiling sort, defaults to cumulative",
)
parser.add_argument(
"--dump",
action="store_true",
help="dump full call profile (implies --profile)",
)
parser.add_argument(
"--raw",
type=str,
help="dump raw profile data to file (implies --profile)",
)
parser.add_argument(
"--callers",
action="store_true",
help="print callers as well (implies --dump)",
)
parser.add_argument(
"--gc", action="store_true", help="turn on GC debug stats"
)
parser.add_argument(
"--echo", action="store_true", help="Echo SQL output"
)
args = parser.parse_args()
args.dump = args.dump or args.callers
args.profile = args.profile or args.dump or args.raw
if cls.name is None:
__import__(__name__ + "." + args.name)
Profiler(args).run()
@classmethod
def _suite_names(cls):
suites = []
for file_ in os.listdir(os.path.dirname(__file__)):
match = re.match(r"^([a-z].*).py$", file_)
if match:
suites.append(match.group(1))
return suites
| Profiler |
python | mlflow__mlflow | examples/rest_api/mlflow_tracking_rest_api.py | {
"start": 631,
"end": 4353
} | class ____:
def __init__(self, hostname, port, experiment_id):
self.base_url = "http://" + hostname + ":" + str(port) + "/api/2.0/mlflow"
self.experiment_id = experiment_id
self.run_id = self.create_run()
def create_run(self):
"""Create a new run for tracking."""
url = self.base_url + "/runs/create"
# user_id is deprecated and will be removed from the API in a future release
payload = {
"experiment_id": self.experiment_id,
"start_time": get_current_time_millis(),
"user_id": _get_user_id(),
}
r = requests.post(url, json=payload)
run_id = None
if r.status_code == 200:
run_id = r.json()["run"]["info"]["run_uuid"]
else:
print("Creating run failed!")
return run_id
def search_experiments(self):
"""Get all experiments."""
url = self.base_url + "/experiments/search"
r = requests.get(url)
experiments = None
if r.status_code == 200:
experiments = r.json()["experiments"]
return experiments
def log_param(self, param):
"""Log a parameter dict for the given run."""
url = self.base_url + "/runs/log-parameter"
payload = {"run_id": self.run_id, "key": param["key"], "value": param["value"]}
r = requests.post(url, json=payload)
return r.status_code
def log_metric(self, metric):
"""Log a metric dict for the given run."""
url = self.base_url + "/runs/log-metric"
payload = {
"run_id": self.run_id,
"key": metric["key"],
"value": metric["value"],
"timestamp": metric["timestamp"],
"step": metric["step"],
}
r = requests.post(url, json=payload)
return r.status_code
def _get_user_id():
"""Get the ID of the user for the current run."""
try:
return pwd.getpwuid(os.getuid())[0]
except ImportError:
return _DEFAULT_USER_ID
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser(description="MLflow REST API Example")
parser.add_argument(
"--hostname",
type=str,
default="localhost",
dest="hostname",
help="MLflow server hostname/ip (default: localhost)",
)
parser.add_argument(
"--port",
type=int,
default=5000,
dest="port",
help="MLflow server port number (default: 5000)",
)
parser.add_argument(
"--experiment-id",
type=int,
default=0,
dest="experiment_id",
help="Experiment ID (default: 0)",
)
print("Running mlflow_tracking_rest_api.py")
args = parser.parse_args()
mlflow_rest = MlflowTrackingRestApi(args.hostname, args.port, args.experiment_id)
# Parameter is a key/val pair (str types)
param = {"key": "alpha", "value": "0.1980"}
status_code = mlflow_rest.log_param(param)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(param["key"], param["value"])
)
else:
print("Logging parameter failed!")
# Metric is a key/val pair (key/val have str/float types)
metric = {
"key": "precision",
"value": 0.769,
"timestamp": get_current_time_millis(),
"step": 1,
}
status_code = mlflow_rest.log_metric(metric)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(
metric["key"], metric["value"]
)
)
else:
print("Logging metric failed!")
| MlflowTrackingRestApi |
python | pypa__pipenv | pipenv/cli/options.py | {
"start": 2330,
"end": 16352
} | class ____:
def __init__(self):
self.dev_only = False
pass_state = make_pass_decorator(State, ensure=True)
def index_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.index = value
return value
return option(
"-i",
"--index",
expose_value=False,
envvar="PIP_INDEX_URL",
help="Specify target package index by url or index name from Pipfile.",
nargs=1,
callback=callback,
)(f)
def editable_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.editables.extend(value)
return value
return option(
"-e",
"--editable",
expose_value=False,
multiple=True,
callback=callback,
type=click_types.Path(file_okay=False),
help="An editable Python package URL or path, often to a VCS repository.",
)(f)
def ignore_pipfile_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.ignore_pipfile = value
return value
return option(
"--ignore-pipfile",
is_flag=True,
default=False,
expose_value=False,
help="Ignore Pipfile when installing, using the Pipfile.lock.",
callback=callback,
type=click_types.BOOL,
show_envvar=True,
)(f)
def _dev_option(f, help_text):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.dev = value
return value
return option(
"--dev",
"-d",
is_flag=True,
default=False,
type=click_types.BOOL,
help=help_text,
callback=callback,
expose_value=False,
show_envvar=True,
)(f)
def categories_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.installstate.categories += re.split(r", *| ", value)
return value
return option(
"--categories",
nargs=1,
required=False,
callback=callback,
expose_value=True,
type=click_types.STRING,
)(f)
def install_dev_option(f):
return _dev_option(f, "Install both develop and default packages")
def lock_dev_option(f):
return _dev_option(f, "Generate both develop and default requirements")
def uninstall_dev_option(f):
return _dev_option(
f, "Deprecated (as it has no effect). May be removed in a future release."
)
def pre_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.pre = value
return value
return option(
"--pre",
is_flag=True,
default=False,
help="Allow pre-releases.",
callback=callback,
type=click_types.BOOL,
expose_value=False,
)(f)
def package_arg(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.packages.extend(value)
return value
return argument(
"packages",
nargs=-1,
callback=callback,
expose_value=True,
type=click_types.STRING,
)(f)
def extra_pip_args(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.installstate.extra_pip_args += value.split(" ")
return value
return option(
"--extra-pip-args",
nargs=1,
required=False,
callback=callback,
expose_value=True,
type=click_types.STRING,
)(f)
def python_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value is not None:
state.python = validate_python_path(ctx, param, value)
return value
return option(
"--python",
default="",
nargs=1,
callback=callback,
help="Specify which version of Python virtualenv should use.",
expose_value=False,
allow_from_autoenv=False,
type=click_types.STRING,
)(f)
def pypi_mirror_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
value = value or state.project.s.PIPENV_PYPI_MIRROR
if value is not None:
state.pypi_mirror = validate_pypi_mirror(ctx, param, value)
return value
return option(
"--pypi-mirror",
nargs=1,
callback=callback,
help="Specify a PyPI mirror.",
expose_value=False,
)(f)
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
if state.quiet:
raise BadArgumentUsage(
"--verbose and --quiet are mutually exclusive! Please choose one!",
ctx=ctx,
)
state.verbose = True
setup_verbosity(ctx, param, 1)
return option(
"--verbose",
"-v",
is_flag=True,
expose_value=False,
callback=callback,
help="Verbose mode.",
type=click_types.BOOL,
)(f)
def quiet_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
if state.verbose:
raise BadArgumentUsage(
"--verbose and --quiet are mutually exclusive! Please choose one!",
ctx=ctx,
)
state.quiet = True
setup_verbosity(ctx, param, -1)
return option(
"--quiet",
"-q",
is_flag=True,
expose_value=False,
callback=callback,
help="Quiet mode.",
type=click_types.BOOL,
)(f)
def site_packages_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
validate_bool_or_none(ctx, param, value)
state.site_packages = value
return value
return option(
"--site-packages/--no-site-packages",
is_flag=True,
default=None,
help="Enable site-packages for the virtualenv.",
callback=callback,
expose_value=False,
show_envvar=True,
)(f)
def clear_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.clear = value
return value
return option(
"--clear",
is_flag=True,
callback=callback,
type=click_types.BOOL,
help="Clears caches (pipenv, pip).",
expose_value=False,
show_envvar=True,
)(f)
def system_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value is not None:
state.system = value
return value
return option(
"--system",
is_flag=True,
default=False,
help="System pip management.",
callback=callback,
type=click_types.BOOL,
expose_value=False,
show_envvar=True,
)(f)
def requirementstxt_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.installstate.requirementstxt = value
return value
return option(
"--requirements",
"-r",
nargs=1,
default="",
expose_value=False,
help="Import a requirements.txt file.",
callback=callback,
type=click_types.Path(dir_okay=False),
)(f)
def dev_only_flag(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.lockoptions.dev_only = value
return value
return option(
"--dev-only",
default=False,
is_flag=True,
expose_value=False,
help="Emit development dependencies *only* (overrides --dev)",
callback=callback,
)(f)
def deploy_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.deploy = value
return value
return option(
"--deploy",
is_flag=True,
default=False,
type=click_types.BOOL,
help="Abort if the Pipfile.lock is out-of-date, or Python version is wrong.",
callback=callback,
expose_value=False,
)(f)
def lock_only_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.lock_only = value
return value
return option(
"--lock-only",
is_flag=True,
default=False,
help="Only update lock file (specifiers not added to Pipfile).",
callback=callback,
type=click_types.BOOL,
expose_value=False,
)(f)
def setup_verbosity(ctx, param, value):
if not value:
return
ctx.ensure_object(State).project.s.PIPENV_VERBOSITY = value
def validate_python_path(ctx, param, value):
# Validating the Python path is complicated by accepting a number of
# friendly options: the default will be boolean False to enable
# autodetection but it may also be a value which will be searched in
# the path or an absolute path. To report errors as early as possible
# we'll report absolute paths which do not exist:
if isinstance(value, (str, bytes)):
path = Path(value)
if path.is_absolute() and not path.is_file():
raise BadParameter(f"Expected Python at path {value} does not exist")
return value
def validate_bool_or_none(ctx, param, value):
if value is not None:
return click_types.BOOL(value)
return False
def validate_pypi_mirror(ctx, param, value):
if value and not is_valid_url(value):
raise BadParameter(f"Invalid PyPI mirror URL: {value}")
return value
def skip_lock_option(f):
def callback(ctx, param, value):
if value:
err.print(
"The flag --skip-lock has been reintroduced (but is not recommended). "
"Without the lock resolver it is difficult to manage multiple package indexes, and hash checking is not provided. "
"However it can help manage installs with current deficiencies in locking across platforms.",
style="yellow bold",
)
state = ctx.ensure_object(State)
state.installstate.skip_lock = value
return value
return option(
"--skip-lock",
is_flag=True,
default=False,
expose_value=True,
envvar="PIPENV_SKIP_LOCK",
help="Install from Pipfile bypassing lock mechanisms.",
callback=callback,
type=click_types.BOOL,
show_envvar=True,
)(f)
# OLD REMOVED COMMANDS THAT WE STILL DISPLAY HELP TEXT FOR WHEN USED #
def keep_outdated_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.keep_outdated = value
if value:
err.print(
"The flag --keep-outdated has been removed. "
"The flag did not respect package resolver results and lead to inconsistent lock files. "
"Consider using the `pipenv upgrade` command to selectively upgrade packages.",
style="yellow bold",
)
raise ValueError("The flag --keep-outdated flag has been removed.")
return value
return option(
"--keep-outdated",
is_flag=True,
default=False,
expose_value=False,
callback=callback,
type=click_types.BOOL,
show_envvar=True,
hidden=True, # This hides the option from the help text.
)(f)
def selective_upgrade_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.installstate.selective_upgrade = value
if value:
err.print(
"The flag --selective-upgrade has been removed. "
"The flag was buggy and lead to inconsistent lock files. "
"Consider using the `pipenv upgrade` command to selectively upgrade packages.",
style="yellow bold",
)
raise ValueError("The flag --selective-upgrade flag has been removed.")
return value
return option(
"--selective-upgrade",
is_flag=True,
default=False,
type=click_types.BOOL,
help="Update specified packages.",
callback=callback,
expose_value=False,
)(f)
def common_options(f):
f = pypi_mirror_option(f)
f = verbose_option(f)
f = quiet_option(f)
f = clear_option(f)
f = python_option(f)
return f
def install_base_options(f):
f = common_options(f)
f = pre_option(f)
f = extra_pip_args(f)
f = keep_outdated_option(f) # Removed, but still displayed in help text.
return f
def uninstall_options(f):
f = install_base_options(f)
f = categories_option(f)
f = uninstall_dev_option(f)
f = editable_option(f)
f = package_arg(f)
f = skip_lock_option(f) # Removed, but still displayed in help text.
return f
def lock_options(f):
f = install_base_options(f)
f = lock_dev_option(f)
f = dev_only_flag(f)
f = categories_option(f)
return f
def sync_options(f):
f = install_base_options(f)
f = install_dev_option(f)
f = categories_option(f)
return f
def install_options(f):
f = sync_options(f)
f = index_option(f)
f = requirementstxt_option(f)
f = ignore_pipfile_option(f)
f = editable_option(f)
f = package_arg(f)
f = skip_lock_option(f) # Removed, but still display help text.
f = selective_upgrade_option(f) # Removed, but still display help text.
return f
def upgrade_options(f):
f = lock_only_option(f)
return f
def general_options(f):
f = common_options(f)
f = site_packages_option(f)
return f
| LockOptions |
python | doocs__leetcode | solution/0600-0699/0691.Stickers to Spell Word/Solution.py | {
"start": 0,
"end": 812
} | class ____:
def minStickers(self, stickers: List[str], target: str) -> int:
n = len(target)
q = deque([0])
vis = [False] * (1 << n)
vis[0] = True
ans = 0
while q:
for _ in range(len(q)):
cur = q.popleft()
if cur == (1 << n) - 1:
return ans
for s in stickers:
cnt = Counter(s)
nxt = cur
for i, c in enumerate(target):
if (cur >> i & 1) == 0 and cnt[c] > 0:
cnt[c] -= 1
nxt |= 1 << i
if not vis[nxt]:
vis[nxt] = True
q.append(nxt)
ans += 1
return -1
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_non_null.py | {
"start": 1446,
"end": 3195
} | class ____(MetricProvider):
"""A convenience class to provide an alias for easier access to the null count in a column."""
metric_name = "column_values.nonnull.count"
@metric_value(engine=PandasExecutionEngine)
def _pandas(*, metrics, **kwargs):
return metrics[
f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(*, metrics, **kwargs):
return metrics[
f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@metric_value(engine=SparkDFExecutionEngine)
def _spark(*, metrics, **kwargs):
return metrics[
f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
dependencies[
f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
] = MetricConfiguration(
metric_name=f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
return dependencies
| ColumnValuesNonNullCount |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 10198,
"end": 10524
} | class ____(Tokenizer):
def __init__(self):
Tokenizer.__init__(self)
self._in_arguments = False
def _tokenize(self, value, index):
token = self._in_arguments and ARGUMENT or SYNTAX
if value.upper() in ('IN', 'IN RANGE'):
self._in_arguments = True
return token
| ForLoop |
python | django__django | tests/queries/tests.py | {
"start": 55542,
"end": 56420
} | class ____(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertSequenceEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes("name", "month")
def test_ticket22023(self):
with self.assertRaisesMessage(
TypeError, "Cannot call only() after .values() or .values_list()"
):
Valid.objects.values().only()
with self.assertRaisesMessage(
TypeError, "Cannot call defer() after .values() or .values_list()"
):
Valid.objects.values().defer()
| Queries3Tests |
python | doocs__leetcode | solution/0700-0799/0727.Minimum Window Subsequence/Solution.py | {
"start": 0,
"end": 651
} | class ____:
def minWindow(self, s1: str, s2: str) -> str:
m, n = len(s1), len(s2)
f = [[0] * (n + 1) for _ in range(m + 1)]
for i, a in enumerate(s1, 1):
for j, b in enumerate(s2, 1):
if a == b:
f[i][j] = i if j == 1 else f[i - 1][j - 1]
else:
f[i][j] = f[i - 1][j]
p, k = 0, m + 1
for i, a in enumerate(s1, 1):
if a == s2[n - 1] and f[i][n]:
j = f[i][n] - 1
if i - j < k:
k = i - j
p = j
return "" if k > m else s1[p : p + k]
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 17271,
"end": 22946
} | class ____:
"""Dataclass to be used when annotating a generic cross reference property with options for retrieving data from the cross referenced object when querying.
Example:
>>> import typing
>>> import weaviate.classes as wvc
>>>
>>> class One(typing.TypedDict):
... prop: str
>>>
>>> class Two(typing.TypedDict):
... one: typing.Annotated[
... wvc.CrossReference[One],
... wvc.CrossReferenceAnnotation(include_vector=True)
... ]
"""
include_vector: bool = field(default=False)
metadata: Optional[MetadataQuery] = field(default=None)
target_collection: Optional[str] = field(default=None)
def _extract_types_from_reference(
type_: CrossReference[Properties, "References"], field: str
) -> Tuple[Type[Properties], Type["References"]]:
"""Extract first inner type from CrossReference[Properties, References]."""
if get_origin(type_) == _CrossReference:
return cast(Tuple[Type[Properties], Type[References]], get_args(type_))
raise WeaviateInvalidInputError(
f"Type: {type_} of field: {field} is not CrossReference[Properties, References]"
)
def _extract_types_from_annotated_reference(
type_: Annotated[CrossReference[Properties, "References"], CrossReferenceAnnotation],
field: str,
) -> Tuple[Type[Properties], Type["References"]]:
"""Extract inner type from Annotated[CrossReference[Properties, References]]."""
assert get_origin(type_) is Annotated, f"field: {field} with type: {type_} must be annotated"
args = get_args(type_)
inner_type = cast(CrossReference[Properties, References], args[0])
return _extract_types_from_reference(inner_type, field)
def __is_annotated_reference(value: Any) -> bool:
return (
get_origin(value) is Annotated
and len(get_args(value)) == 2
and get_origin(get_args(value)[0]) is _CrossReference
)
def __create_link_to_from_annotated_reference(
link_on: str,
value: Annotated[CrossReference[Properties, "References"], CrossReferenceAnnotation],
) -> Union[_QueryReference, _QueryReferenceMultiTarget]:
"""Create FromReference or FromReferenceMultiTarget from Annotated[CrossReference[Properties], ReferenceAnnotation]."""
assert get_origin(value) is Annotated, (
f"field: {link_on} with type: {value} must be Annotated[CrossReference]"
)
args = cast(List[CrossReference[Properties, References]], get_args(value))
inner_type = args[0]
assert get_origin(inner_type) is _CrossReference, (
f"field: {link_on} with inner_type: {inner_type} must be CrossReference"
)
inner_type_metadata = cast(
Tuple[CrossReferenceAnnotation], getattr(value, "__metadata__", None)
)
annotation = inner_type_metadata[0]
types = _extract_types_from_annotated_reference(value, link_on)
if annotation.target_collection is not None:
return _QueryReferenceMultiTarget(
link_on=link_on,
include_vector=annotation.include_vector,
return_metadata=annotation.metadata,
return_properties=_extract_properties_from_data_model(types[0]),
return_references=_extract_references_from_data_model(types[1]),
target_collection=annotation.target_collection,
)
else:
return _QueryReference(
link_on=link_on,
include_vector=annotation.include_vector,
return_metadata=annotation.metadata,
return_properties=_extract_properties_from_data_model(types[0]),
return_references=_extract_references_from_data_model(types[1]),
)
def __create_link_to_from_reference(
link_on: str,
value: CrossReference[Properties, "References"],
) -> _QueryReference:
"""Create FromReference from CrossReference[Properties]."""
types = _extract_types_from_reference(value, link_on)
return _QueryReference(
link_on=link_on,
return_properties=_extract_properties_from_data_model(types[0]),
return_references=_extract_references_from_data_model(types[1]),
)
def _extract_properties_from_data_model(type_: Type[Properties]) -> PROPERTIES:
"""Extract properties of Properties recursively from Properties.
Checks to see if there is a _Reference[Properties], Annotated[_Reference[Properties]], or _Nested[Properties]
in the data model and lists out the properties as classes readily consumable by the underlying API.
"""
return [
__create_nested_property_from_nested(key, value) if __is_nested(value) else key
for key, value in get_type_hints(type_, include_extras=True).items()
]
def _extract_references_from_data_model(
type_: Type["References"],
) -> Optional[REFERENCES]:
"""Extract references of References recursively from References.
Checks to see if there is a _Reference[References], Annotated[_Reference[References]], or _Nested[References]
in the data model and lists out the references as classes readily consumable by the underlying API.
"""
refs = [
(
__create_link_to_from_annotated_reference(key, value)
if __is_annotated_reference(value)
else __create_link_to_from_reference(key, value)
)
for key, value in get_type_hints(type_, include_extras=True).items()
]
return refs if len(refs) > 0 else None
ReturnProperties: TypeAlias = Union[PROPERTIES, bool, Type[TProperties]]
ReturnReferences: TypeAlias = Union[
Union[_QueryReference, Sequence[_QueryReference]], Type[TReferences]
]
@dataclass
| CrossReferenceAnnotation |
python | doocs__leetcode | solution/3100-3199/3110.Score of a String/Solution.py | {
"start": 0,
"end": 125
} | class ____:
def scoreOfString(self, s: str) -> int:
return sum(abs(a - b) for a, b in pairwise(map(ord, s)))
| Solution |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/class.py | {
"start": 331,
"end": 376
} | class ____[T](): ...
# TypeVar with default
| Test |
python | sqlalchemy__sqlalchemy | examples/dogpile_caching/local_session_caching.py | {
"start": 705,
"end": 3518
} | class ____(CacheBackend):
"""A dogpile backend which will cache objects locally on
the current session.
When used with the query_cache system, the effect is that the objects
in the cache are the same as that within the session - the merge()
is a formality that doesn't actually create a second instance.
This makes it safe to use for updates of data from an identity
perspective (still not ideal for deletes though).
When the session is removed, the cache is gone too, so the cache
is automatically disposed upon session.remove().
"""
def __init__(self, arguments):
self.scoped_session = arguments["scoped_session"]
def get(self, key):
return self._cache_dictionary.get(key, NO_VALUE)
def set(self, key, value):
self._cache_dictionary[key] = value
def delete(self, key):
self._cache_dictionary.pop(key, None)
@property
def _cache_dictionary(self):
"""Return the cache dictionary linked to the current Session."""
sess = self.scoped_session()
try:
cache_dict = sess._cache_dictionary
except AttributeError:
sess._cache_dictionary = cache_dict = {}
return cache_dict
register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend")
if __name__ == "__main__":
# set up a region based on the ScopedSessionBackend,
# pointing to the scoped_session declared in the example
# environment.
regions["local_session"] = make_region().configure(
"sqlalchemy.session", arguments={"scoped_session": Session}
)
from .model import Person
# query to load Person by name, with criterion
# of "person 10"
q = (
select(Person)
.filter(Person.name == "person 10")
.options(FromCache("local_session"))
)
# load from DB
person10 = Session.scalars(q).one()
# next call, the query is cached.
person10 = Session.scalars(q).one()
# clear out the Session. The "_cache_dictionary" dictionary
# disappears with it.
Session.remove()
# query calls from DB again
person10 = Session.scalars(q).one()
# identity is preserved - person10 is the *same* object that's
# ultimately inside the cache. So it is safe to manipulate
# the not-queried-for attributes of objects when using such a
# cache without the need to invalidate - however, any change
# that would change the results of a cached query, such as
# inserts, deletes, or modification to attributes that are
# part of query criterion, still require careful invalidation.
cache_key = FromCache("local_session")._generate_cache_key(
q, {}, environment.cache
)
assert person10 is regions["local_session"].get(cache_key)().scalar()
| ScopedSessionBackend |
python | pandas-dev__pandas | pandas/core/arrays/interval.py | {
"start": 3735,
"end": 73815
} | class ____(IntervalMixin, ExtensionArray):
"""
Pandas array for interval data that are closed on the same side.
Parameters
----------
data : array-like (1-dimensional)
Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing
Interval objects from which to build the IntervalArray.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
copy : bool, default False
Copy the input data.
verify_integrity : bool, default True
Verify that the IntervalArray is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an IntervalArray.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`__
for more.
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""
can_hold_na = True
_na_value = _fill_value = np.nan
@property
def ndim(self) -> Literal[1]:
return 1
# To make mypy recognize the fields
_left: IntervalSide
_right: IntervalSide
_dtype: IntervalDtype
# ---------------------------------------------------------------------
# Constructors
def __new__(
cls,
data,
closed: IntervalClosedType | None = None,
dtype: Dtype | None = None,
copy: bool = False,
verify_integrity: bool = True,
) -> Self:
data = extract_array(data, extract_numpy=True)
if isinstance(data, cls):
left: IntervalSide = data._left
right: IntervalSide = data._right
closed = closed or data.closed
dtype = IntervalDtype(left.dtype, closed=closed)
else:
# don't allow scalars
if is_scalar(data):
msg = (
f"{cls.__name__}(...) must be called with a collection "
f"of some kind, {data} was passed"
)
raise TypeError(msg)
# might need to convert empty or purely na data
data = _maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None
)
if left.dtype == object:
left = lib.maybe_convert_objects(left)
right = lib.maybe_convert_objects(right)
closed = closed or infer_closed
left, right, dtype = cls._ensure_simple_new_inputs(
left,
right,
closed=closed,
copy=copy,
dtype=dtype,
)
if verify_integrity:
cls._validate(left, right, dtype=dtype)
return cls._simple_new(
left,
right,
dtype=dtype,
)
@classmethod
def _simple_new(
cls,
left: IntervalSide,
right: IntervalSide,
dtype: IntervalDtype,
) -> Self:
result = IntervalMixin.__new__(cls)
result._left = left
result._right = right
result._dtype = dtype
return result
@classmethod
def _ensure_simple_new_inputs(
cls,
left,
right,
closed: IntervalClosedType | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> tuple[IntervalSide, IntervalSide, IntervalDtype]:
"""Ensure correctness of input parameters for cls._simple_new."""
from pandas.core.indexes.base import ensure_index
left = ensure_index(left, copy=copy)
left = maybe_upcast_numeric_to_64bit(left)
right = ensure_index(right, copy=copy)
right = maybe_upcast_numeric_to_64bit(right)
if closed is None and isinstance(dtype, IntervalDtype):
closed = dtype.closed
closed = closed or "right"
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if isinstance(dtype, IntervalDtype):
if dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
else:
msg = f"dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg)
if dtype.closed is None:
# possibly loading an old pickle
dtype = IntervalDtype(dtype.subtype, closed)
elif closed != dtype.closed:
raise ValueError("closed keyword does not match dtype.closed")
# coerce dtypes to match if needed
if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype):
right = right.astype(left.dtype)
elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = (
f"must not have differing left [{type(left).__name__}] and "
f"right [{type(right).__name__}] types"
)
raise ValueError(msg)
if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalArray"
)
raise TypeError(msg)
if isinstance(left, ABCPeriodIndex):
msg = "Period dtypes are not supported, use a PeriodIndex instead"
raise ValueError(msg)
if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
msg = (
"left and right must have the same time zone, got "
f"'{left.tz}' and '{right.tz}'"
)
raise ValueError(msg)
elif needs_i8_conversion(left.dtype) and left.unit != right.unit:
# e.g. m8[s] vs m8[ms], try to cast to a common dtype GH#55714
left_arr, right_arr = left._data._ensure_matching_resos(right._data)
left = ensure_index(left_arr)
right = ensure_index(right_arr)
# For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
left = ensure_wrapped_if_datetimelike(left)
left = extract_array(left, extract_numpy=True)
right = ensure_wrapped_if_datetimelike(right)
right = extract_array(right, extract_numpy=True)
if isinstance(left, ArrowExtensionArray) or isinstance(
right, ArrowExtensionArray
):
pass
else:
lbase = getattr(left, "_ndarray", left)
lbase = getattr(lbase, "_data", lbase).base
rbase = getattr(right, "_ndarray", right)
rbase = getattr(rbase, "_data", rbase).base
if lbase is not None and lbase is rbase:
# If these share data, then setitem could corrupt our IA
right = right.copy()
dtype = IntervalDtype(left.dtype, closed=closed)
# Check for mismatched signed/unsigned integer dtypes after casting
left_dtype = left.dtype
right_dtype = right.dtype
if (
left_dtype.kind in "iu"
and right_dtype.kind in "iu"
and left_dtype.kind != right_dtype.kind
):
raise TypeError(
f"Left and right arrays must have matching signedness. "
f"Got {left_dtype} and {right_dtype}."
)
return left, right, dtype
@classmethod
def _from_sequence(
cls,
scalars,
*,
dtype: Dtype | None = None,
copy: bool = False,
) -> Self:
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self:
return cls._from_sequence(values, dtype=original.dtype)
_interval_shared_docs["from_breaks"] = textwrap.dedent(
"""
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.\
%(name)s
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
%(examples)s\
"""
)
@classmethod
def from_breaks(
cls,
breaks,
closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> Self:
"""
Construct an IntervalArray from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalArray
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalArray.from_arrays : Construct from a left and right array.
IntervalArray.from_tuples : Construct from a sequence of tuples.
Examples
--------
>>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
"""
breaks = _maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
_interval_shared_docs["from_arrays"] = textwrap.dedent(
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.\
%(name)s
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
%(klass)s
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
%(examples)s\
"""
)
@classmethod
def from_arrays(
cls,
left,
right,
closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> Self:
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
IntervalArray
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalArray.from_breaks : Construct an IntervalArray from an array of
splits.
IntervalArray.from_tuples : Construct an IntervalArray from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Examples
--------
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
"""
left = _maybe_convert_platform_interval(left)
right = _maybe_convert_platform_interval(right)
left, right, dtype = cls._ensure_simple_new_inputs(
left,
right,
closed=closed,
copy=copy,
dtype=dtype,
)
cls._validate(left, right, dtype=dtype)
return cls._simple_new(left, right, dtype=dtype)
_interval_shared_docs["from_tuples"] = textwrap.dedent(
"""
Construct an %(klass)s from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.\
%(name)s
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(examples)s\
"""
)
@classmethod
def from_tuples(
cls,
data,
closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> Self:
"""
Construct an IntervalArray from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalArray
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalArray.from_arrays : Construct an IntervalArray from a left and
right array.
IntervalArray.from_breaks : Construct an IntervalArray from an array of
splits.
Examples
--------
>>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
<IntervalArray>
[(0, 1], (1, 2]]
Length: 2, dtype: interval[int64, right]
"""
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if not isinstance(d, tuple) and isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError as err:
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
raise ValueError(msg) from err
except TypeError as err:
msg = f"{name}.from_tuples received an invalid item, {d}"
raise TypeError(msg) from err
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
@classmethod
def _validate(cls, left, right, dtype: IntervalDtype) -> None:
"""
Verify that the IntervalArray is valid.
Checks that
* dtype is correct
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if not isinstance(dtype, IntervalDtype):
msg = f"invalid dtype: {dtype}"
raise ValueError(msg)
if len(left) != len(right):
msg = "left and right must have the same length"
raise ValueError(msg)
left_mask = notna(left)
right_mask = notna(right)
if not (left_mask == right_mask).all():
msg = (
"missing values must be missing in the same "
"location both left and right sides"
)
raise ValueError(msg)
if not (left[left_mask] <= right[left_mask]).all():
msg = "left side of interval must be <= right side"
raise ValueError(msg)
def _shallow_copy(self, left, right) -> Self:
"""
Return a new IntervalArray with the replacement attributes
Parameters
----------
left : Index
Values to be used for the left-side of the intervals.
right : Index
Values to be used for the right-side of the intervals.
"""
dtype = IntervalDtype(left.dtype, closed=self.closed)
left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype)
return self._simple_new(left, right, dtype=dtype)
# ---------------------------------------------------------------------
# Descriptive
@property
def dtype(self) -> IntervalDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self.left.nbytes + self.right.nbytes
@property
def size(self) -> int:
# Avoid materializing self.values
return self.left.size
# ---------------------------------------------------------------------
# EA Interface
def __iter__(self) -> Iterator:
return iter(np.asarray(self))
def __len__(self) -> int:
return len(self._left)
@overload
def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ...
@overload
def __getitem__(self, key: SequenceIndexer) -> Self: ...
def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA:
key = check_array_indexer(self, key)
left = self._left[key]
right = self._right[key]
if not isinstance(left, (np.ndarray, ExtensionArray)):
# scalar
if is_scalar(left) and isna(left):
return self._fill_value
return Interval(left, right, self.closed)
if np.ndim(left) > 1:
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
# Argument 2 to "_simple_new" of "IntervalArray" has incompatible type
# "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray,
# ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray],
# ndarray[Any, Any]]"
result = self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type]
if getitem_returns_view(self, key):
result._readonly = self._readonly
return result
def __setitem__(self, key, value) -> None:
if self._readonly:
raise ValueError("Cannot modify read-only array")
value_left, value_right = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
self._left[key] = value_left
self._right[key] = value_right
def _cmp_method(self, other, op):
# ensure pandas array for list-like and eliminate non-interval scalars
if is_list_like(other):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
other = pd_array(other)
elif not isinstance(other, Interval):
# non-interval scalar -> no matches
if other is NA:
# GH#31882
from pandas.core.arrays import BooleanArray
arr = np.empty(self.shape, dtype=bool)
mask = np.ones(self.shape, dtype=bool)
return BooleanArray(arr, mask)
return invalid_comparison(self, other, op)
# determine the dtype of the elements we want to compare
if isinstance(other, Interval):
other_dtype = pandas_dtype("interval")
elif not isinstance(other.dtype, CategoricalDtype):
other_dtype = other.dtype
else:
# for categorical defer to categories for dtype
other_dtype = other.categories.dtype
# extract intervals if we have interval categories with matching closed
if isinstance(other_dtype, IntervalDtype):
if self.closed != other.categories.closed:
return invalid_comparison(self, other, op)
other = other.categories._values.take(
other.codes, allow_fill=True, fill_value=other.categories._na_value
)
# interval-like -> need same closed and matching endpoints
if isinstance(other_dtype, IntervalDtype):
if self.closed != other.closed:
return invalid_comparison(self, other, op)
elif not isinstance(other, Interval):
other = type(self)(other)
if op is operator.eq:
return (self._left == other.left) & (self._right == other.right)
elif op is operator.ne:
return (self._left != other.left) | (self._right != other.right)
elif op is operator.gt:
return (self._left > other.left) | (
(self._left == other.left) & (self._right > other.right)
)
elif op is operator.ge:
return (self == other) | (self > other)
elif op is operator.lt:
return (self._left < other.left) | (
(self._left == other.left) & (self._right < other.right)
)
else:
# operator.lt
return (self == other) | (self < other)
# non-interval/non-object dtype -> no matches
if not is_object_dtype(other_dtype):
return invalid_comparison(self, other, op)
# object dtype -> iteratively check for intervals
result = np.zeros(len(self), dtype=bool)
for i, obj in enumerate(other):
try:
result[i] = op(self[i], obj)
except TypeError:
if obj is NA:
# comparison with np.nan returns NA
# github.com/pandas-dev/pandas/pull/37124#discussion_r509095092
result = result.astype(object)
result[i] = NA
else:
raise
return result
@unpack_zerodim_and_defer("__eq__")
def __eq__(self, other):
return self._cmp_method(other, operator.eq)
@unpack_zerodim_and_defer("__ne__")
def __ne__(self, other):
return self._cmp_method(other, operator.ne)
@unpack_zerodim_and_defer("__gt__")
def __gt__(self, other):
return self._cmp_method(other, operator.gt)
@unpack_zerodim_and_defer("__ge__")
def __ge__(self, other):
return self._cmp_method(other, operator.ge)
@unpack_zerodim_and_defer("__lt__")
def __lt__(self, other):
return self._cmp_method(other, operator.lt)
@unpack_zerodim_and_defer("__le__")
def __le__(self, other):
return self._cmp_method(other, operator.le)
def argsort(
self,
*,
ascending: bool = True,
kind: SortKind = "quicksort",
na_position: str = "last",
**kwargs,
) -> np.ndarray:
ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)
if ascending and kind == "quicksort" and na_position == "last":
# TODO: in an IntervalIndex we can reuse the cached
# IntervalTree.left_sorter
return np.lexsort((self.right, self.left))
# TODO: other cases we can use lexsort for? much more performant.
return super().argsort(
ascending=ascending, kind=kind, na_position=na_position, **kwargs
)
def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
nv.validate_minmax_axis(axis, self.ndim)
if not len(self):
return self._na_value
mask = self.isna()
if mask.any():
if not skipna:
return self._na_value
obj = self[~mask]
else:
obj = self
indexer = obj.argsort()[0]
return obj[indexer]
def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA:
nv.validate_minmax_axis(axis, self.ndim)
if not len(self):
return self._na_value
mask = self.isna()
if mask.any():
if not skipna:
return self._na_value
obj = self[~mask]
else:
obj = self
indexer = obj.argsort()[-1]
return obj[indexer]
def fillna(self, value, limit: int | None = None, copy: bool = True) -> Self:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
limit : int, default None
(Not implemented yet for IntervalArray)
The maximum number of entries where NA values will be filled.
copy : bool, default True
Whether to make a copy of the data before filling. If False, then
the original should be modified and no new memory should be allocated.
For ExtensionArray subclasses that cannot do this, it is at the
author's discretion whether to ignore "copy=False" or to raise.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if copy is False:
raise NotImplementedError
if limit is not None:
raise ValueError("limit must be None")
value_left, value_right = self._validate_scalar(value)
left = self.left.fillna(value=value_left)
right = self.right.fillna(value=value_right)
return self._shallow_copy(left, right)
def astype(self, dtype, copy: bool = True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
from pandas import Index
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(dtype, IntervalDtype):
if dtype == self.dtype:
return self.copy() if copy else self
if is_float_dtype(self.dtype.subtype) and needs_i8_conversion(
dtype.subtype
):
# This is allowed on the Index.astype but we disallow it here
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg)
# need to cast to different subtype
try:
# We need to use Index rules for astype to prevent casting
# np.nan entries to int subtypes
new_left = Index(self._left, copy=False).astype(dtype.subtype)
new_right = Index(self._right, copy=False).astype(dtype.subtype)
except IntCastingNaNError:
# e.g test_subtype_integer
raise
except (TypeError, ValueError) as err:
# e.g. test_subtype_integer_errors f8->u8 can be lossy
# and raises ValueError
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg) from err
return self._shallow_copy(new_left, new_right)
else:
try:
return super().astype(dtype, copy=copy)
except (TypeError, ValueError) as err:
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg) from err
def equals(self, other) -> bool:
if type(self) != type(other):
return False
return bool(
self.closed == other.closed
and self.left.equals(other.left)
and self.right.equals(other.right)
)
@classmethod
def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self:
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed_set = {interval.closed for interval in to_concat}
if len(closed_set) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed_set.pop()
left: IntervalSide = np.concatenate([interval.left for interval in to_concat])
right: IntervalSide = np.concatenate([interval.right for interval in to_concat])
left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed)
return cls._simple_new(left, right, dtype=dtype)
def copy(self) -> Self:
"""
Return a copy of the array.
Returns
-------
IntervalArray
"""
left = self._left.copy()
right = self._right.copy()
dtype = self.dtype
return self._simple_new(left, right, dtype=dtype)
def isna(self) -> np.ndarray:
return isna(self._left)
def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray:
if not len(self) or periods == 0:
return self.copy()
self._validate_scalar(fill_value)
# ExtensionArray.shift doesn't work for two reasons
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
# 2. IntervalArray._from_sequence only accepts NaN for missing values,
# not other values like NaT
empty_len = min(abs(periods), len(self))
if isna(fill_value):
from pandas import Index
fill_value = Index(self._left, copy=False)._na_value
empty = IntervalArray.from_breaks(
[fill_value] * (empty_len + 1), closed=self.closed
)
else:
empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def take(
self,
indices,
*,
allow_fill: bool = False,
fill_value=None,
axis=None,
**kwargs,
) -> Self:
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
nv.validate_take((), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
fill_left, fill_right = self._validate_scalar(fill_value)
left_take = take(
self._left, indices, allow_fill=allow_fill, fill_value=fill_left
)
right_take = take(
self._right, indices, allow_fill=allow_fill, fill_value=fill_right
)
return self._shallow_copy(left_take, right_take)
def _validate_listlike(self, value):
# list-like of intervals
try:
array = IntervalArray(value)
self._check_closed_matches(array, name="value")
value_left, value_right = array.left, array.right
except TypeError as err:
# wrong type: not interval or NA
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg) from err
try:
self.left._validate_fill_value(value_left)
except (LossySetitemError, TypeError) as err:
msg = (
"'value' should be a compatible interval type, "
f"got {type(value)} instead."
)
raise TypeError(msg) from err
return value_left, value_right
def _validate_scalar(self, value):
if isinstance(value, Interval):
self._check_closed_matches(value, name="value")
left, right = value.left, value.right
# TODO: check subdtype match like _validate_setitem_value?
elif is_valid_na_for_dtype(value, self.left.dtype):
# GH#18295
left = right = self.left._na_value
else:
raise TypeError(
"can only insert Interval objects and NA into an IntervalArray"
)
return left, right
def _validate_setitem_value(self, value):
if is_valid_na_for_dtype(value, self.left.dtype):
# na value: need special casing to set directly on numpy arrays
value = self.left._na_value
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
# GH#45484 TypeError, not ValueError, matches what we get with
# non-NA un-holdable value.
raise TypeError("Cannot set float NaN to integer-backed IntervalArray")
value_left, value_right = value, value
elif isinstance(value, Interval):
# scalar interval
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
self.left._validate_fill_value(value_left)
self.left._validate_fill_value(value_right)
else:
return self._validate_listlike(value)
return value_left, value_right
# ---------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed: bool = False) -> Callable[[object], str]:
# returning 'str' here causes us to render as e.g. "(0, 1]" instead of
# "Interval(0, 1, closed='right')"
return str
# ---------------------------------------------------------------------
# Vectorized Interval Properties/Attributes
@property
def left(self) -> Index:
"""
Return the left endpoints of each Interval in the IntervalArray as an Index.
This property provides access to the left endpoints of the intervals
contained within the IntervalArray. This can be useful for analyses where
the starting point of each interval is of interest, such as in histogram
creation, data aggregation, or any scenario requiring the identification
of the beginning of defined ranges. This property returns a ``pandas.Index``
object containing the midpoint for each interval.
See Also
--------
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
arrays.IntervalArray.contains : Check elementwise if the Intervals contain
the value.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (2, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.left
Index([0, 2], dtype='int64')
"""
from pandas import Index
return Index(self._left, copy=False)
@property
def right(self) -> Index:
"""
Return the right endpoints of each Interval in the IntervalArray as an Index.
This property extracts the right endpoints from each interval contained within
the IntervalArray. This can be helpful in use cases where you need to work
with or compare only the upper bounds of intervals, such as when performing
range-based filtering, determining interval overlaps, or visualizing the end
boundaries of data segments.
See Also
--------
arrays.IntervalArray.left : Return the left endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
arrays.IntervalArray.contains : Check elementwise if the Intervals contain
the value.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (2, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.right
Index([1, 5], dtype='int64')
"""
from pandas import Index
return Index(self._right, copy=False)
@property
def length(self) -> Index:
"""
Return an Index with entries denoting the length of each Interval.
The length of an interval is calculated as the difference between
its `right` and `left` bounds. This property is particularly useful
when working with intervals where the size of the interval is an important
attribute, such as in time-series analysis or spatial data analysis.
See Also
--------
arrays.IntervalArray.left : Return the left endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.length
Index([1, 4], dtype='int64')
"""
return self.right - self.left
@property
def mid(self) -> Index:
"""
Return the midpoint of each Interval in the IntervalArray as an Index.
The midpoint of an interval is calculated as the average of its
``left`` and ``right`` bounds. This property returns a ``pandas.Index`` object
containing the midpoint for each interval.
See Also
--------
Interval.left : Return left bound for the interval.
Interval.right : Return right bound for the interval.
Interval.length : Return the length of each interval.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.mid
Index([0.5, 3.0], dtype='float64')
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
_interval_shared_docs["overlaps"] = textwrap.dedent(
"""
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
Parameters
----------
other : Interval
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
%(examples)s
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
"""
)
def overlaps(self, other):
"""
Check elementwise if an Interval overlaps the values in the IntervalArray.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
Parameters
----------
other : IntervalArray
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed="left"))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed="right"))
array([False, True, False])
"""
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
if not isinstance(other, Interval):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
# ---------------------------------------------------------------------
@property
def closed(self) -> IntervalClosedType:
"""
String describing the inclusive side the intervals.
Either ``left``, ``right``, ``both`` or ``neither``.
See Also
--------
IntervalArray.closed : Returns inclusive side of the IntervalArray.
Interval.closed : Returns inclusive side of the Interval.
IntervalIndex.closed : Returns inclusive side of the IntervalIndex.
Examples
--------
For arrays:
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.closed
'right'
For Interval Index:
>>> interv_idx = pd.interval_range(start=0, end=2)
>>> interv_idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> interv_idx.closed
'right'
"""
return self.dtype.closed
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
Return an identical %(klass)s closed on the specified side.
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
%(klass)s
%(examples)s\
"""
)
def set_closed(self, closed: IntervalClosedType) -> Self:
"""
Return an identical IntervalArray closed on the specified side.
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalArray
A new IntervalArray with the specified side closures.
See Also
--------
IntervalArray.closed : Returns inclusive side of the Interval.
arrays.IntervalArray.closed : Returns inclusive side of the IntervalArray.
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
>>> index.set_closed("both")
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, dtype: interval[int64, both]
"""
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
left, right = self._left, self._right
dtype = IntervalDtype(left.dtype, closed=closed)
return self._simple_new(left, right, dtype=dtype)
_interval_shared_docs["is_non_overlapping_monotonic"] = """
Return a boolean whether the %(klass)s is non-overlapping and monotonic.
Non-overlapping means (no Intervals share points), and monotonic means
either monotonic increasing or monotonic decreasing.
Examples
--------
For arrays:
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.is_non_overlapping_monotonic
True
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),
... pd.Interval(-1, 0.1)])
>>> interv_arr
<IntervalArray>
[(0.0, 1.0], (-1.0, 0.1]]
Length: 2, dtype: interval[float64, right]
>>> interv_arr.is_non_overlapping_monotonic
False
For Interval Index:
>>> interv_idx = pd.interval_range(start=0, end=2)
>>> interv_idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> interv_idx.is_non_overlapping_monotonic
True
>>> interv_idx = pd.interval_range(start=0, end=2, closed='both')
>>> interv_idx
IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
>>> interv_idx.is_non_overlapping_monotonic
False
"""
@property
def is_non_overlapping_monotonic(self) -> bool:
"""
Return a boolean whether the IntervalArray/IntervalIndex\
is non-overlapping and monotonic.
Non-overlapping means (no Intervals share points), and monotonic means
either monotonic increasing or monotonic decreasing.
See Also
--------
overlaps : Check if two IntervalIndex objects overlap.
Examples
--------
For arrays:
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.is_non_overlapping_monotonic
True
>>> interv_arr = pd.arrays.IntervalArray(
... [pd.Interval(0, 1), pd.Interval(-1, 0.1)]
... )
>>> interv_arr
<IntervalArray>
[(0.0, 1.0], (-1.0, 0.1]]
Length: 2, dtype: interval[float64, right]
>>> interv_arr.is_non_overlapping_monotonic
False
For Interval Index:
>>> interv_idx = pd.interval_range(start=0, end=2)
>>> interv_idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> interv_idx.is_non_overlapping_monotonic
True
>>> interv_idx = pd.interval_range(start=0, end=2, closed="both")
>>> interv_idx
IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
>>> interv_idx.is_non_overlapping_monotonic
False
"""
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self._right[:-1] < self._left[1:]).all()
or (self._left[:-1] > self._right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self._right[:-1] <= self._left[1:]).all()
or (self._left[:-1] >= self._right[1:]).all()
)
# ---------------------------------------------------------------------
# Conversion
def __array__(
self, dtype: NpDtype | None = None, copy: bool | None = None
) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
if copy is False:
raise ValueError(
"Unable to avoid copy while creating an array as requested."
)
left = self._left
right = self._right
mask = self.isna()
closed = self.closed
result = np.empty(len(left), dtype=object)
for i, left_value in enumerate(left):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left_value, right[i], closed)
return result
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
try:
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError as err:
raise TypeError(
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
"is not supported"
) from err
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
[
pyarrow.array(self._left, type=subtype, from_pandas=True),
pyarrow.array(self._right, type=subtype, from_pandas=True),
],
names=["left", "right"],
)
mask = self.isna()
if mask.any():
# if there are missing values, set validity bitmap also on the array level
null_bitmap = pyarrow.array(~mask).buffers()[1]
storage_array = pyarrow.StructArray.from_buffers(
storage_array.type,
len(storage_array),
[null_bitmap],
children=[storage_array.field(0), storage_array.field(1)],
)
if type is not None:
if type.equals(interval_type.storage_type):
return storage_array
elif isinstance(type, ArrowIntervalType):
# ensure we have the same subtype and closed attributes
if not type.equals(interval_type):
raise TypeError(
"Not supported to convert IntervalArray to type with "
f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
f"and 'closed' ({self.closed} vs {type.closed}) attributes"
)
else:
raise TypeError(
f"Not supported to convert IntervalArray to '{type}' type"
)
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
_interval_shared_docs["to_tuples"] = textwrap.dedent(
"""
Return an %(return_type)s of tuples of the form (left, right).
Parameters
----------
na_tuple : bool, default True
If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``,
just return ``NA`` as ``nan``.
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
)
def to_tuples(self, na_tuple: bool = True) -> np.ndarray:
"""
Return an ndarray (if self is IntervalArray) or Index \
(if self is IntervalIndex) of tuples of the form (left, right).
Parameters
----------
na_tuple : bool, default True
If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``,
just return ``NA`` as ``nan``.
Returns
-------
ndarray or Index
An ndarray of tuples representing the intervals
if `self` is an IntervalArray.
An Index of tuples representing the intervals
if `self` is an IntervalIndex.
See Also
--------
IntervalArray.to_list : Convert IntervalArray to a list of tuples.
IntervalArray.to_numpy : Convert IntervalArray to a numpy array.
IntervalArray.unique : Find unique intervals in an IntervalArray.
Examples
--------
For :class:`pandas.IntervalArray`:
>>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
>>> idx
<IntervalArray>
[(0, 1], (1, 2]]
Length: 2, dtype: interval[int64, right]
>>> idx.to_tuples()
array([(np.int64(0), np.int64(1)), (np.int64(1), np.int64(2))],
dtype=object)
For :class:`pandas.IntervalIndex`:
>>> idx = pd.interval_range(start=0, end=2)
>>> idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> idx.to_tuples()
Index([(0, 1), (1, 2)], dtype='object')
"""
tuples = com.asarray_tuplesafe(zip(self._left, self._right, strict=True))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
# ---------------------------------------------------------------------
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
value_left, value_right = self._validate_setitem_value(value)
if isinstance(self._left, np.ndarray):
np.putmask(self._left, mask, value_left)
assert isinstance(self._right, np.ndarray)
np.putmask(self._right, mask, value_right)
else:
self._left._putmask(mask, value_left)
assert not isinstance(self._right, np.ndarray)
self._right._putmask(mask, value_right)
def insert(self, loc: int, item: Interval) -> Self:
"""
Return a new IntervalArray inserting new item at location. Follows
Python numpy.insert semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : Interval
Returns
-------
IntervalArray
"""
left_insert, right_insert = self._validate_scalar(item)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def delete(self, loc) -> Self:
new_left: np.ndarray | DatetimeArray | TimedeltaArray
new_right: np.ndarray | DatetimeArray | TimedeltaArray
if isinstance(self._left, np.ndarray):
new_left = np.delete(self._left, loc)
assert isinstance(self._right, np.ndarray)
new_right = np.delete(self._right, loc)
else:
new_left = self._left.delete(loc)
assert not isinstance(self._right, np.ndarray)
new_right = self._right.delete(loc)
return self._shallow_copy(left=new_left, right=new_right)
def repeat(
self,
repeats: int | Sequence[int],
axis: AxisInt | None = None,
) -> Self:
"""
Repeat elements of an IntervalArray.
Returns a new IntervalArray where each element of the current IntervalArray
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
IntervalArray.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
IntervalArray
Newly created IntervalArray with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(["a", "b", "c"])
>>> cat
['a', 'b', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat(2)
['a', 'a', 'b', 'b', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat([1, 2, 3])
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
"""
nv.validate_repeat((), {"axis": axis})
left_repeat = self.left.repeat(repeats)
right_repeat = self.right.repeat(repeats)
return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs["contains"] = textwrap.dedent(
"""
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the %(klass)s.
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
See Also
--------
Interval.contains : Check whether Interval object contains value.
%(klass)s.overlaps : Check if an Interval overlaps the values in the
%(klass)s.
Examples
--------
%(examples)s
>>> intervals.contains(0.5)
array([ True, False, False])
"""
)
def contains(self, other):
"""
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the IntervalArray.
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
A boolean mask whether the value is contained in the Intervals.
See Also
--------
Interval.contains : Check whether Interval object contains value.
IntervalArray.overlaps : Check if an Interval overlaps the values in the
IntervalArray.
Examples
--------
>>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
>>> intervals.contains(0.5)
array([ True, False, False])
"""
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
return (self._left < other if self.open_left else self._left <= other) & (
other < self._right if self.open_right else other <= self._right
)
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
if isinstance(values, IntervalArray):
if self.closed != values.closed:
# not comparable -> no overlap
return np.zeros(self.shape, dtype=bool)
if self.dtype == values.dtype:
left = self._combined
right = values._combined
return np.isin(left, right).ravel()
elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
values.left.dtype
):
# not comparable -> no overlap
return np.zeros(self.shape, dtype=bool)
return isin(self.astype(object), values.astype(object))
@property
def _combined(self) -> IntervalSide:
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
# has no attribute "reshape" [union-attr]
left = self.left._values.reshape(-1, 1) # type: ignore[union-attr]
right = self.right._values.reshape(-1, 1) # type: ignore[union-attr]
# GH#38353 instead of casting to object, operating on a
# complex128 ndarray is much more performant.
if needs_i8_conversion(left.dtype):
# error: Item "ndarray[Any, Any]" of "Any | ndarray[Any, Any]" has
# no attribute "_concat_same_type"
comb = left._concat_same_type( # type: ignore[union-attr]
[left, right], axis=1
)
comb = comb.view("complex128")[:, 0]
else:
comb = (np.array(left.ravel(), dtype="complex128")) + (
1j * np.array(right.ravel(), dtype="complex128")
)
return comb
def _from_combined(self, combined: np.ndarray) -> IntervalArray:
"""
Create a new IntervalArray with our dtype from a 1D complex128 ndarray.
"""
dtype = self._left.dtype
if needs_i8_conversion(dtype):
nc = combined.view("i8").reshape(-1, 2)
assert isinstance(self._left, (DatetimeArray, TimedeltaArray))
new_left: DatetimeArray | TimedeltaArray | np.ndarray = type(
self._left
)._from_sequence(nc[:, 0], dtype=dtype)
assert isinstance(self._right, (DatetimeArray, TimedeltaArray))
new_right: DatetimeArray | TimedeltaArray | np.ndarray = type(
self._right
)._from_sequence(nc[:, 1], dtype=dtype)
else:
assert isinstance(dtype, np.dtype)
new_left = np.real(combined).astype(dtype).ravel()
new_right = np.imag(combined).astype(dtype).ravel()
return self._shallow_copy(left=new_left, right=new_right)
def unique(self) -> IntervalArray:
nc = unique(self._combined)
return self._from_combined(np.asarray(nc)[:, None])
def _maybe_convert_platform_interval(values) -> ArrayLike:
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif not is_list_like(values) or isinstance(values, ABCDataFrame):
# This will raise later, but we avoid passing to maybe_convert_platform
return values
elif isinstance(getattr(values, "dtype", None), CategoricalDtype):
values = np.asarray(values)
elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)):
# TODO: should we just cast these to list?
return values
else:
values = extract_array(values, extract_numpy=True)
if not hasattr(values, "dtype"):
values = np.asarray(values)
if values.dtype.kind in "iu" and values.dtype != np.int64:
values = values.astype(np.int64)
return values
| IntervalArray |
python | aimacode__aima-python | probability4e.py | {
"start": 8621,
"end": 13823
} | class ____:
"""
A conditional probability distribution for a boolean variable,
P(X | parents). Part of a BayesNet.
"""
def __init__(self, X, parents, cpt):
"""
:param X: variable name,
:param parents: a sequence of variable names or a space-separated string. Representing the names of parent nodes
:param cpt: the conditional probability table, takes one of these forms:
* A number, the unconditional probability P(X=true). You can
use this form when there are no parents.
* A dict {v: p, ...}, the conditional probability distribution
P(X=true | parent=v) = p. When there's just one parent.
* A dict {(v1, v2, ...): p, ...}, the distribution P(X=true |
parent1=v1, parent2=v2, ...) = p. Each key must have as many
values as there are parents. You can use this form always;
the first two are just conveniences.
In all cases the probability of X being false is left implicit,
since it follows from P(X=true).
>>> X = BayesNode('X', '', 0.2)
>>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7})
>>> Z = BayesNode('Z', 'P Q',
... {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7})
"""
if isinstance(parents, str):
parents = parents.split()
# We store the table always in the third form above.
if isinstance(cpt, (float, int)): # no parents, 0-tuple
cpt = {(): cpt}
elif isinstance(cpt, dict):
# one parent, 1-tuple
if cpt and isinstance(list(cpt.keys())[0], bool):
cpt = {(v,): p for v, p in cpt.items()}
assert isinstance(cpt, dict)
for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
assert all(isinstance(v, bool) for v in vs)
assert 0 <= p <= 1
self.variable = X
self.parents = parents
self.cpt = cpt
self.children = []
def p(self, value, event):
"""
Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in event. (event must assign each
parent a value.)
>>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
>>> bn.p(False, {'Burglary': False, 'Earthquake': True})
0.375
"""
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
return ptrue if value else 1 - ptrue
def sample(self, event):
"""
Sample from the distribution for this variable conditioned
on event's values for parent_variables. That is, return True/False
at random according with the conditional probability given the
parents.
"""
return probability(self.p(True, event))
def __repr__(self):
return repr((self.variable, ' '.join(self.parents)))
# Burglary example [Figure 13 .2]
T, F = True, False
burglary = BayesNet([
('Burglary', '', 0.001),
('Earthquake', '', 0.002),
('Alarm', 'Burglary Earthquake',
{(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
])
# ______________________________________________________________________________
# Section 13.2. The Semantics of Bayesian Networks
# Bayesian nets with continuous variables
def gaussian_probability(param, event, value):
"""
Gaussian probability of a continuous Bayesian network node on condition of
certain event and the parameters determined by the event
:param param: parameters determined by discrete parent events of current node
:param event: a dict, continuous event of current node, the values are used
as parameters in calculating distribution
:param value: float, the value of current continuous node
:return: float, the calculated probability
>>> param = {'sigma':0.5, 'b':1, 'a':{'h1':0.5, 'h2': 1.5}}
>>> event = {'h1':0.6, 'h2': 0.3}
>>> gaussian_probability(param, event, 1)
0.2590351913317835
"""
assert isinstance(event, dict)
assert isinstance(param, dict)
buff = 0
for k, v in event.items():
# buffer varianle to calculate h1*a_h1 + h2*a_h2
buff += param['a'][k] * v
res = 1 / (param['sigma'] * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((value - buff - param['b']) / param['sigma']) ** 2)
return res
def logistic_probability(param, event, value):
"""
Logistic probability of a discrete node in Bayesian network with continuous parents,
:param param: a dict, parameters determined by discrete parents of current node
:param event: a dict, names and values of continuous parent variables of current node
:param value: boolean, True or False
:return: int, probability
"""
buff = 1
for _, v in event.items():
# buffer variable to calculate (value-mu)/sigma
buff *= (v - param['mu']) / param['sigma']
p = 1 - 1 / (1 + np.exp(-4 / np.sqrt(2 * np.pi) * buff))
return p if value else 1 - p
| BayesNode |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 134245,
"end": 135047
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_token: str, domain_name: str, email: str):
"""Airbyte Source for Confluence.
Args:
name (str): The name of the destination.
api_token (str): Please follow the Jira confluence for generating an API token: https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/
domain_name (str): Your Confluence domain name
email (str): Your Confluence login email
"""
self.api_token = check.str_param(api_token, "api_token")
self.domain_name = check.str_param(domain_name, "domain_name")
self.email = check.str_param(email, "email")
super().__init__("Confluence", name)
| ConfluenceSource |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-lantern/llama_index/vector_stores/lantern/base.py | {
"start": 3675,
"end": 22822
} | class ____(BasePydanticVectorStore):
"""
Latern vector store.
Examples:
`pip install llama-index-vector-stores-lantern`
```python
from llama_index.vector_stores.lantern import LanternVectorStore
# Set up connection details
connection_string = "postgresql://postgres:postgres@localhost:5432"
db_name = "postgres"
url = make_url(connection_string)
# Create an instance of LanternVectorStore
vector_store = LanternVectorStore.from_params(
database=db_name,
host=url.host,
password=url.password,
port=url.port,
user=url.username,
table_name="your_table_name",
embed_dim=1536, # openai embedding dimension
m=16, # HNSW M parameter
ef_construction=128, # HNSW ef construction parameter
ef=64, # HNSW ef search parameter
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
connection_string: str
async_connection_string: str
table_name: str
schema_name: str
embed_dim: int
hybrid_search: bool
text_search_config: str
cache_ok: bool
perform_setup: bool
debug: bool
_base: Any = PrivateAttr()
_table_class: Any = PrivateAttr()
_engine: Any = PrivateAttr()
_session: Any = PrivateAttr()
_async_engine: Any = PrivateAttr()
_async_session: Any = PrivateAttr()
_is_initialized: bool = PrivateAttr(default=False)
def __init__(
self,
connection_string: str,
async_connection_string: str,
table_name: str,
schema_name: str,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
m: int = 16,
ef_construction: int = 128,
ef: int = 64,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
) -> None:
table_name = table_name.lower()
schema_name = schema_name.lower()
if hybrid_search and text_search_config is None:
raise ValueError(
"Sparse vector index creation requires "
"a text search configuration specification."
)
from sqlalchemy.orm import declarative_base
super().__init__(
connection_string=connection_string,
async_connection_string=async_connection_string,
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
)
# sqlalchemy model
self._base = declarative_base()
self._table_class = get_data_model(
self._base,
table_name,
schema_name,
hybrid_search,
text_search_config,
cache_ok,
embed_dim=embed_dim,
m=m,
ef_construction=ef_construction,
ef=ef,
)
async def close(self) -> None:
if not self._is_initialized:
return
self._session.close_all()
self._engine.dispose()
await self._async_engine.dispose()
@classmethod
def class_name(cls) -> str:
return "LanternStore"
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
table_name: str = "llamaindex",
schema_name: str = "public",
connection_string: Optional[str] = None,
async_connection_string: Optional[str] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
m: int = 16,
ef_construction: int = 128,
ef: int = 64,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
) -> "LanternVectorStore":
"""Return connection string from database parameters."""
conn_str = (
connection_string
or f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}"
)
async_conn_str = async_connection_string or (
f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{database}"
)
return cls(
connection_string=conn_str,
async_connection_string=async_conn_str,
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
m=m,
ef_construction=ef_construction,
ef=ef,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
)
@property
def client(self) -> Any:
if not self._is_initialized:
return None
return self._engine
def _connect(self) -> Any:
from sqlalchemy import create_engine
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
self._engine = create_engine(self.connection_string, echo=self.debug)
self._session = sessionmaker(self._engine)
self._async_engine = create_async_engine(self.async_connection_string)
self._async_session = sessionmaker(self._async_engine, class_=AsyncSession) # type: ignore
def _create_schema_if_not_exists(self) -> None:
with self._session() as session, session.begin():
from sqlalchemy import text
statement = text(f"CREATE SCHEMA IF NOT EXISTS {self.schema_name}")
session.execute(statement)
session.commit()
def _create_tables_if_not_exists(self) -> None:
with self._session() as session, session.begin():
self._base.metadata.create_all(session.connection())
def _create_extension(self) -> None:
import sqlalchemy
with self._session() as session, session.begin():
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS lantern")
session.execute(statement)
session.commit()
def _initialize(self) -> None:
if not self._is_initialized:
self._connect()
if self.perform_setup:
self._create_extension()
self._create_schema_if_not_exists()
self._create_tables_if_not_exists()
self._is_initialized = True
def _node_to_table_row(self, node: BaseNode) -> Any:
return self._table_class(
node_id=node.node_id,
embedding=node.get_embedding(),
text=node.get_content(metadata_mode=MetadataMode.NONE),
metadata_=node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
),
)
def add(self, nodes: List[BaseNode]) -> List[str]:
self._initialize()
ids = []
with self._session() as session, session.begin():
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
session.add(item)
session.commit()
return ids
async def async_add(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
self._initialize()
ids = []
async with self._async_session() as session, session.begin():
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
session.add(item)
await session.commit()
return ids
def _apply_filters_and_limit(
self,
stmt: "Select",
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
import sqlalchemy
if metadata_filters:
for filter_ in metadata_filters.legacy_filters():
bind_parameter = f"value_{filter_.key}"
stmt = stmt.where( # type: ignore
sqlalchemy.text(f"metadata_->>'{filter_.key}' = :{bind_parameter}")
)
stmt = stmt.params( # type: ignore
**{bind_parameter: str(filter_.value)}
)
return stmt.limit(limit) # type: ignore
def _build_query(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
from sqlalchemy import func, select
stmt = select( # type: ignore
self._table_class,
func.cos_dist(self._table_class.embedding, embedding),
).order_by(self._table_class.embedding.op("<=>")(embedding))
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
def _prepare_query(self, session: Any, limit: int) -> None:
from sqlalchemy import text
session.execute(text("SET enable_seqscan=OFF")) # always use index
session.execute(text(f"SET hnsw.init_k={limit}")) # always use index
async def _aprepare_query(self, session: Any, limit: int) -> None:
from sqlalchemy import text
await session.execute(text("SET enable_seqscan=OFF")) # always use index
await session.execute(text(f"SET hnsw.init_k={limit}")) # always use index
def _query_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters)
with self._session() as session, session.begin():
self._prepare_query(session, limit)
res = session.execute(
stmt,
)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=(1 - distance) if distance is not None else 0,
)
for item, distance in res.all()
]
async def _aquery_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters)
async with self._async_session() as async_session, async_session.begin():
await self._aprepare_query(async_session, limit)
res = await async_session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=(1 - distance) if distance is not None else 0,
)
for item, distance in res.all()
]
def _build_sparse_query(
self,
query_str: Optional[str],
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
from sqlalchemy import select, type_coerce
from sqlalchemy.sql import func, text
from sqlalchemy.types import UserDefinedType
class REGCONFIG(UserDefinedType):
# The TypeDecorator.cache_ok class-level flag indicates if this custom TypeDecorator is safe to be used as part of a cache key.
# If the TypeDecorator is not guaranteed to produce the same bind/result behavior and SQL generation every time,
# this flag should be set to False; otherwise if the class produces the same behavior each time, it may be set to True.
cache_ok = True
def get_col_spec(self, **kw: Any) -> str:
return "regconfig"
if query_str is None:
raise ValueError("query_str must be specified for a sparse vector query.")
ts_query = func.plainto_tsquery(
type_coerce(self.text_search_config, REGCONFIG), query_str
)
stmt = (
select( # type: ignore
self._table_class,
func.ts_rank(self._table_class.text_search_tsv, ts_query).label("rank"),
)
.where(self._table_class.text_search_tsv.op("@@")(ts_query))
.order_by(text("rank desc"))
)
# type: ignore
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
async def _async_sparse_query_with_rank(
self,
query_str: Optional[str] = None,
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_sparse_query(query_str, limit, metadata_filters)
async with self._async_session() as async_session, async_session.begin():
res = await async_session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=rank,
)
for item, rank in res.all()
]
def _sparse_query_with_rank(
self,
query_str: Optional[str] = None,
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_sparse_query(query_str, limit, metadata_filters)
with self._session() as session, session.begin():
res = session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=rank,
)
for item, rank in res.all()
]
async def _async_hybrid_query(
self, query: VectorStoreQuery
) -> List[DBEmbeddingRow]:
import asyncio
if query.alpha is not None:
_logger.warning("postgres hybrid search does not support alpha parameter.")
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = await asyncio.gather(
self._aquery_with_score(
query.query_embedding, query.similarity_top_k, query.filters
),
self._async_sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
),
)
dense_results, sparse_results = results
all_results = dense_results + sparse_results
return _dedup_results(all_results)
def _hybrid_query(self, query: VectorStoreQuery) -> List[DBEmbeddingRow]:
if query.alpha is not None:
_logger.warning("postgres hybrid search does not support alpha parameter.")
sparse_top_k = query.sparse_top_k or query.similarity_top_k
dense_results = self._query_with_score(
query.query_embedding, query.similarity_top_k, query.filters
)
sparse_results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
all_results = dense_results + sparse_results
return _dedup_results(all_results)
def _db_rows_to_query_result(
self, rows: List[DBEmbeddingRow]
) -> VectorStoreQueryResult:
nodes = []
similarities = []
ids = []
for db_embedding_row in rows:
try:
node = metadata_dict_to_node(db_embedding_row.metadata)
node.set_content(str(db_embedding_row.text))
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
node = TextNode(
id_=db_embedding_row.node_id,
text=db_embedding_row.text,
metadata=db_embedding_row.metadata,
)
similarities.append(db_embedding_row.similarity)
ids.append(db_embedding_row.node_id)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
self._initialize()
if query.mode == VectorStoreQueryMode.HYBRID:
results = await self._async_hybrid_query(query)
elif query.mode in [
VectorStoreQueryMode.SPARSE,
VectorStoreQueryMode.TEXT_SEARCH,
]:
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = await self._async_sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
results = await self._aquery_with_score(
query.query_embedding, query.similarity_top_k, query.filters
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return self._db_rows_to_query_result(results)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
self._initialize()
if query.mode == VectorStoreQueryMode.HYBRID:
results = self._hybrid_query(query)
elif query.mode in [
VectorStoreQueryMode.SPARSE,
VectorStoreQueryMode.TEXT_SEARCH,
]:
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
results = self._query_with_score(
query.query_embedding, query.similarity_top_k, query.filters
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return self._db_rows_to_query_result(results)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
from sqlalchemy import text
self._initialize()
with self._session() as session, session.begin():
# Use parameterized query with bind parameters
stmt = text(
f"DELETE FROM {self.schema_name}.data_{self.table_name} "
"WHERE (metadata_->>'doc_id')::text = :ref_doc_id"
).bindparams(ref_doc_id=ref_doc_id)
session.execute(stmt)
session.commit()
def _dedup_results(results: List[DBEmbeddingRow]) -> List[DBEmbeddingRow]:
seen_ids = set()
deduped_results = []
for result in results:
if result.node_id not in seen_ids:
deduped_results.append(result)
seen_ids.add(result.node_id)
return deduped_results
| LanternVectorStore |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_event_attribute_handler.py | {
"start": 628,
"end": 43656
} | class ____(ConditionTestCase):
condition = Condition.EVENT_ATTRIBUTE
payload = {
"id": EventAttributeCondition.id,
"match": MatchType.EQUAL,
"value": "php",
"attribute": "platform",
}
def get_event(self, **kwargs):
data = {
"message": "hello world",
"request": {"method": "GET", "url": "http://example.com/"},
"user": {
"id": "1",
"ip_address": "127.0.0.1",
"email": "foo@example.com",
"username": "foo",
},
"exception": {
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{
"filename": "example.php",
"module": "example",
"context_line": 'echo "hello";',
"abs_path": "path/to/example.php",
}
]
},
"thread_id": 1,
}
]
},
"tags": [("environment", "production")],
"extra": {"foo": {"bar": "baz"}, "biz": ["baz"], "bar": "foo"},
"platform": "php",
"sdk": {"name": "sentry.javascript.react", "version": "6.16.1"},
"contexts": {
"response": {
"type": "response",
"status_code": 500,
},
"device": {
"screen_width_pixels": 1920,
"screen_height_pixels": 1080,
"screen_dpi": 123,
"screen_density": 2.5,
},
"app": {
"in_foreground": True,
},
"unreal": {
"crash_type": "crash",
},
"os": {"distribution_name": "ubuntu", "distribution_version": "22.04"},
"ota_updates": {
"channel": "production",
"runtime_version": "1.0.0",
"update_id": "123",
},
},
"threads": {
"values": [
{
"id": 1,
"main": True,
},
],
},
}
data.update(kwargs)
event = self.store_event(data, project_id=self.project.id)
return event
def setup_group_event_and_job(self):
self.group_event = self.event.for_group(self.group)
self.event_data = WorkflowEventData(
event=self.group_event,
group=self.group,
group_state=GroupState(
{
"id": 1,
"is_regression": False,
"is_new": False,
"is_new_group_environment": False,
}
),
)
def error_setup(self):
self.event = self.get_event(
exception={
"values": [
{
"type": "Generic",
"value": "hello world",
"mechanism": {"type": "UncaughtExceptionHandler", "handled": False},
}
],
}
)
self.setup_group_event_and_job()
def setUp(self) -> None:
self.event = self.get_event()
self.setup_group_event_and_job()
self.dc = self.create_data_condition(
type=self.condition,
comparison={"match": MatchType.EQUAL, "attribute": "platform", "value": "php"},
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"match": MatchType.EQUAL,
"value": "php",
"attribute": "platform",
}
assert dc.condition_result is True
assert dc.condition_group == dcg
payload = {
"id": EventAttributeCondition.id,
"match": MatchType.IS_SET,
"attribute": "platform",
}
dc = self.translate_to_data_condition(payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"match": MatchType.IS_SET,
"attribute": "platform",
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_dual_write_filter(self) -> None:
self.payload["id"] = EventAttributeFilter.id
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"match": MatchType.EQUAL,
"value": "php",
"attribute": "platform",
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "platform", "value": "php"}
)
self.dc.save()
self.dc.comparison.update(
{"match": "invalid_match", "attribute": "platform", "value": "php"}
)
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": 0, "value": "php"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "platform", "value": 2000}
)
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"attribute": "platform", "value": 2000})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "value": 2000})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "platform"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "platform", "value": 2000, "extra": "extra"}
)
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update(
{"match": MatchType.IS_SET, "attribute": "platform", "value": 2000}
)
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "asdf", "value": 2000})
with pytest.raises(ValidationError):
self.dc.save()
def test_not_in_registry(self) -> None:
with pytest.raises(NoRegistrationExistsError):
attribute_registry.get("transaction")
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "transaction",
"value": "asdf",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_equals(self) -> None:
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "platform", "value": "php"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "platform", "value": "python"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_not_equals(self) -> None:
self.dc.comparison.update(
{"match": MatchType.NOT_EQUAL, "attribute": "platform", "value": "php"}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.NOT_EQUAL,
"attribute": "platform",
"value": "python",
}
)
self.assert_passes(self.dc, self.event_data)
def test_starts_with(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.STARTS_WITH,
"attribute": "platform",
"value": "ph",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.STARTS_WITH,
"attribute": "platform",
"value": "py",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_does_not_start_with(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.NOT_STARTS_WITH,
"attribute": "platform",
"value": "ph",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.NOT_STARTS_WITH,
"attribute": "platform",
"value": "py",
}
)
self.assert_passes(self.dc, self.event_data)
def test_ends_with(self) -> None:
self.dc.comparison.update(
{"match": MatchType.ENDS_WITH, "attribute": "platform", "value": "hp"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.ENDS_WITH,
"attribute": "platform",
"value": "thon",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_does_not_end_with(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.NOT_ENDS_WITH,
"attribute": "platform",
"value": "hp",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.NOT_ENDS_WITH,
"attribute": "platform",
"value": "thon",
}
)
self.assert_passes(self.dc, self.event_data)
def test_contains(self) -> None:
self.dc.comparison.update(
{"match": MatchType.CONTAINS, "attribute": "platform", "value": "p"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.CONTAINS, "attribute": "platform", "value": "z"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_contains_message(self) -> None:
self.dc.comparison.update(
{"match": MatchType.CONTAINS, "attribute": "message", "value": "hello"}
)
self.assert_passes(self.dc, self.event_data)
# Validate that this searches message in the same way that snuba does
self.event = self.get_event(message="")
self.setup_group_event_and_job()
# This should still pass, even though the message is now empty
self.dc.comparison.update(
{"match": MatchType.CONTAINS, "attribute": "message", "value": "hello"}
)
self.assert_passes(self.dc, self.event_data)
# The search should also include info from the exception if present
self.dc.comparison.update(
{
"match": MatchType.CONTAINS,
"attribute": "message",
"value": "SyntaxError",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.CONTAINS,
"attribute": "message",
"value": "not present",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_does_not_contain(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.NOT_CONTAINS,
"attribute": "platform",
"value": "p",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.NOT_CONTAINS,
"attribute": "platform",
"value": "z",
}
)
self.assert_passes(self.dc, self.event_data)
def test_message(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "message",
"value": "hello world",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "message", "value": "php"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_environment(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "environment",
"value": "production",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "environment",
"value": "staging",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_compares_case_insensitive(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "environment",
"value": "PRODUCTION",
}
)
self.assert_passes(self.dc, self.event_data)
def test_compare_int_value(self) -> None:
self.event.data["extra"]["number"] = 1
self.setup_group_event_and_job()
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "extra.number", "value": "1"}
)
self.assert_passes(self.dc, self.event_data)
def test_http_method(self) -> None:
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "http.method", "value": "GET"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "http.method", "value": "POST"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_http_url(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "http.url",
"value": "http://example.com/",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "http.url",
"value": "http://foo.com/",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_http_status_code(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "http.status_code",
"value": "500",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "http.status_code",
"value": "400",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_user_id(self) -> None:
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "user.id", "value": "1"})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "user.id", "value": "2"})
self.assert_does_not_pass(self.dc, self.event_data)
def test_user_ip_address(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "user.ip_address",
"value": "127.0.0.1",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "user.ip_address",
"value": "2",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_user_email(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "user.email",
"value": "foo@example.com",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "user.email", "value": "2"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_user_username(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "user.username",
"value": "foo",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "user.username", "value": "2"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_exception_type(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.type",
"value": "SyntaxError",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.type",
"value": "TypeError",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
@patch("sentry.services.eventstore.models.get_interfaces", return_value={})
def test_exception_type_keyerror(self, mock_get_interfaces: MagicMock) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.type",
"value": "SyntaxError",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_error_handled(self) -> None:
self.error_setup()
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.handled",
"value": "False",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.handled",
"value": "True",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_error_handled_not_defined(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.handled",
"value": "True",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
@patch("sentry.services.eventstore.models.get_interfaces", return_value={})
def test_error_handled_keyerror(self, mock_get_interfaces: MagicMock) -> None:
self.error_setup()
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.handled",
"value": "False",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_error_unhandled(self) -> None:
self.error_setup()
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.unhandled",
"value": "True",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "error.unhandled",
"value": "False",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_exception_value(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.value",
"value": "hello world",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.value",
"value": "foo bar",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_sdk_name(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "sdk.name",
"value": "sentry.javascript.react",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "sdk.name",
"value": "sentry.python",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_filename(self) -> None:
"""Stacktrace.filename should match frames anywhere in the stack."""
self.event = self.get_event(
exception={
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{"filename": "example.php", "module": "example"},
{"filename": "somecode.php", "module": "somecode"},
{"filename": "othercode.php", "module": "othercode"},
]
},
}
]
}
)
self.setup_group_event_and_job()
# correctly matching filenames, at various locations in the stacktrace
for value in ["example.php", "somecode.php", "othercode.php"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.filename",
"value": value,
}
)
self.assert_passes(self.dc, self.event_data)
# non-matching filename
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.filename",
"value": "foo.php",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_attributeerror(self) -> None:
self.event = self.get_event(
exception={
"values": [
{
"type": "SyntaxError",
"value": "hello world",
}
]
}
)
# hack to trigger attributeerror
self.event.interfaces["exception"]._data["values"][0] = None
self.setup_group_event_and_job()
for value in ["example.php", "somecode.php", "othercode.php"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.filename",
"value": value,
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_module(self) -> None:
"""Stacktrace.module should match frames anywhere in the stack."""
self.event = self.get_event(
exception={
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{"filename": "example.php", "module": "example"},
{"filename": "somecode.php", "module": "somecode"},
{"filename": "othercode.php", "module": "othercode"},
]
},
}
]
}
)
self.setup_group_event_and_job()
# correctly matching modules, at various locations in the stacktrace
for value in ["example", "somecode", "othercode"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.module",
"value": value,
}
)
self.assert_passes(self.dc, self.event_data)
# non-matching module
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.module",
"value": "foo",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_code(self) -> None:
"""Stacktrace.code should match frames anywhere in the stack."""
self.event = self.get_event(
exception={
"values": [
{
"type": "NameError",
"value": "name 'hi' is not defined",
"stacktrace": {
"frames": [
{
"filename": "example.py",
"module": "example",
"function": "foo",
"context_line": "somecode.bar()",
},
{
"filename": "somecode.py",
"module": "somecode",
"function": "bar",
"context_line": "othercode.baz()",
},
{
"filename": "othercode.py",
"module": "othercode",
"function": "baz",
"context_line": "hi()",
},
]
},
}
]
}
)
self.setup_group_event_and_job()
# correctly matching code, at various locations in the stacktrace
for value in ["somecode.bar()", "othercode.baz()", "hi()"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.code",
"value": value,
}
)
self.assert_passes(self.dc, self.event_data)
# non-matching code
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.code",
"value": "foo",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_abs_path(self) -> None:
"""Stacktrace.abs_path should match frames anywhere in the stack."""
self.event = self.get_event(
exception={
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{
"filename": "example.php",
"module": "example",
"abs_path": "path/to/example.php",
},
{
"filename": "somecode.php",
"module": "somecode",
"abs_path": "path/to/somecode.php",
},
{
"filename": "othercode.php",
"module": "othercode",
"abs_path": "path/to/othercode.php",
},
]
},
}
]
}
)
self.setup_group_event_and_job()
# correctly matching abs_paths, at various locations in the stacktrace
for value in ["path/to/example.php", "path/to/somecode.php", "path/to/othercode.php"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.abs_path",
"value": value,
}
)
self.assert_passes(self.dc, self.event_data)
# non-matching abs_path
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.abs_path",
"value": "path/to/foo.php",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_stacktrace_package(self) -> None:
"""Stacktrace.package should match frames anywhere in the stack."""
self.event = self.get_event(
exception={
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{"filename": "example.php", "package": "package/example.lib"},
{
"filename": "somecode.php",
"package": "package/otherpackage.lib",
},
{
"filename": "othercode.php",
"package": "package/somepackage.lib",
},
]
},
}
]
}
)
self.setup_group_event_and_job()
# correctly matching filenames, at various locations in the stacktrace
for value in ["package/example.lib", "package/otherpackage.lib", "package/somepackage.lib"]:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.package",
"value": value,
}
)
self.assert_passes(self.dc, self.event_data)
# non-matching filename
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "stacktrace.package",
"value": "package/otherotherpackage.lib",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_extra_simple_value(self) -> None:
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "extra.bar", "value": "foo"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "extra.bar", "value": "bar"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_extra_nested_value(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "extra.foo.bar",
"value": "baz",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "extra.foo.bar",
"value": "bar",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_extra_nested_list(self) -> None:
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "extra.biz", "value": "baz"}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{"match": MatchType.EQUAL, "attribute": "extra.biz", "value": "bar"}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_event_type(self) -> None:
self.event.data["type"] = "error"
self.setup_group_event_and_job()
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "type", "value": "error"})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.EQUAL, "attribute": "type", "value": "csp"})
self.assert_does_not_pass(self.dc, self.event_data)
def test_device_screen_width_pixels(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_width_pixels",
"value": "1920",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_width_pixels",
"value": "400",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_device_screen_height_pixels(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_height_pixels",
"value": "1080",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_height_pixels",
"value": "400",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_device_screen_dpi(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_dpi",
"value": "123",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_dpi",
"value": "400",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_device_screen_density(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_density",
"value": "2.5",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "device.screen_density",
"value": "400",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_app_in_foreground(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "app.in_foreground",
"value": "True",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "app.in_foreground",
"value": "False",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_os_distribution_name_and_version(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "os.distribution_name",
"value": "ubuntu",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "os.distribution_name",
"value": "slackware",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "os.distribution_version",
"value": "22.04",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "os.distribution_version",
"value": "20.04",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_ota_updates(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.channel",
"value": "production",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.channel",
"value": "development",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.runtime_version",
"value": "1.0.0",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.runtime_version",
"value": "2.0.0",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.update_id",
"value": "123",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.update_id",
"value": "876",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "ota_updates.non_existent",
"value": "876",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_unreal_crash_type(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "unreal.crash_type",
"value": "Crash",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "unreal.crash_type",
"value": "NoCrash",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_does_not_error_with_none(self) -> None:
self.event = self.get_event(
exception={
"values": [
None,
{
"type": "SyntaxError",
"value": "hello world",
"stacktrace": {
"frames": [
{
"filename": "example.php",
"module": "example",
"context_line": 'echo "hello";',
"abs_path": "path/to/example.php",
}
]
},
"thread_id": 1,
},
]
}
)
self.setup_group_event_and_job()
self.dc.comparison.update(
{
"match": MatchType.EQUAL,
"attribute": "exception.type",
"value": "SyntaxError",
}
)
self.assert_passes(self.dc, self.event_data)
def test_is_set(self) -> None:
self.dc.comparison.update({"match": MatchType.IS_SET, "attribute": "platform"})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.IS_SET, "attribute": "missing"})
self.assert_does_not_pass(self.dc, self.event_data)
def test_not_set(self) -> None:
self.dc.comparison.update({"match": MatchType.NOT_SET, "attribute": "platform"})
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.NOT_SET, "attribute": "missing"})
self.assert_passes(self.dc, self.event_data)
def test_attr_is_in(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.IS_IN,
"attribute": "platform",
"value": "php, python",
}
)
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.IS_IN,
"attribute": "platform",
"value": "python, ruby",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
def test_attr_not_in(self) -> None:
self.dc.comparison.update(
{
"match": MatchType.NOT_IN,
"attribute": "platform",
"value": "php, python",
}
)
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update(
{
"match": MatchType.NOT_IN,
"attribute": "platform",
"value": "python, ruby",
}
)
self.assert_passes(self.dc, self.event_data)
| TestEventAttributeCondition |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 10981,
"end": 11456
} | class ____(TestCase):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar("numpy"))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
| TestIsscalar |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 28667,
"end": 29739
} | class ____(_MutableDictTestBase, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
import json
class JSONEncodedDict(TypeDecorator):
impl = VARCHAR(50)
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
MutableDict = cls._type_fixture()
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", MutableDict.as_mutable(JSONEncodedDict)),
Column("non_mutable_data", JSONEncodedDict),
Column("unrelated_data", String(50)),
)
def test_non_mutable(self):
self._test_non_mutable()
| MutableWithScalarJSONTest |
python | numba__numba | numba/core/typed_passes.py | {
"start": 15559,
"end": 19603
} | class ____(abc.ABC, LoweringPass):
"""The base class for a lowering pass. The lowering functionality must be
specified in inheriting classes by providing an appropriate lowering class
implementation in the overridden `lowering_class` property."""
_name = None
def __init__(self):
LoweringPass.__init__(self)
@property
@abc.abstractmethod
def lowering_class(self):
"""Returns the class that performs the lowering of the IR describing the
function that is the target of the current compilation."""
pass
def run_pass(self, state):
if state.library is None:
codegen = state.targetctx.codegen()
state.library = codegen.create_library(state.func_id.func_qualname)
# Enable object caching upfront, so that the library can
# be later serialized.
state.library.enable_object_caching()
library = state.library
targetctx = state.targetctx
interp = state.func_ir # why is it called this?!
typemap = state.typemap
restype = state.return_type
calltypes = state.calltypes
flags = state.flags
metadata = state.metadata
pre_stats = llvm.newpassmanagers.dump_refprune_stats()
# Add reload functions to library
library._reload_init.update(state.reload_init)
msg = ("Function %s failed at nopython "
"mode lowering" % (state.func_id.func_name,))
with fallback_context(state, msg):
# Lowering
fndesc = \
funcdesc.PythonFunctionDescriptor.from_specialized_function(
interp, typemap, restype, calltypes,
mangler=targetctx.mangler, inline=flags.forceinline,
noalias=flags.noalias, abi_tags=[flags.get_mangle_string()])
with targetctx.push_code_library(library):
lower = self.lowering_class(targetctx, library, fndesc, interp,
metadata=metadata)
lower.lower()
if not flags.no_cpython_wrapper:
lower.create_cpython_wrapper(flags.release_gil)
if not flags.no_cfunc_wrapper:
# skip cfunc wrapper generation if unsupported
# argument or return types are used
for t in state.args:
if isinstance(t, (types.Omitted, types.Generator)):
break
else:
if isinstance(restype,
(types.Optional, types.Generator)):
pass
else:
lower.create_cfunc_wrapper()
env = lower.env
call_helper = lower.call_helper
del lower
from numba.core.compiler import _LowerResult # TODO: move this
if flags.no_compile:
state['cr'] = _LowerResult(fndesc, call_helper,
cfunc=None, env=env)
else:
# Prepare for execution
# Insert native function for use by other jitted-functions.
# We also register its library to allow for inlining.
cfunc = targetctx.get_executable(library, fndesc, env)
targetctx.insert_user_function(cfunc, fndesc, [library])
state.reload_init.extend(library._reload_init)
state['cr'] = _LowerResult(fndesc, call_helper,
cfunc=cfunc, env=env)
# capture pruning stats
post_stats = llvm.newpassmanagers.dump_refprune_stats()
metadata['prune_stats'] = post_stats - pre_stats
# Save the LLVM pass timings
metadata['llvm_pass_timings'] = library.recorded_timings
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| BaseNativeLowering |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 2326,
"end": 2996
} | class ____(TypedDict):
"""Run input information schema."""
# The output format to produce after executing the input. Each entry on the
# set must belong to the `RunResultFormat` dict. The executor is
# responsible of producing the correct format. This field will be available
# on a RunExecutor but it is not necessary for a RunConfigurationProvider
# to include it.
output_formats: NotRequired[Set[str]]
# Input to process by the executor. The executor is responsible for the
# correct interpretation of the input type.
run_input: Any
# Run input metadata information.
metadata: RunConfigurationMetadata
| RunConfiguration |
python | ethereum__web3.py | web3/beacon/async_beacon.py | {
"start": 1278,
"end": 9706
} | class ____:
is_async = True
def __init__(
self,
base_url: str,
request_timeout: float = 10.0,
) -> None:
self.base_url = base_url
self.request_timeout = request_timeout
self._request_session_manager = HTTPSessionManager()
async def _async_make_get_request(
self, endpoint_uri: str, params: dict[str, str] | None = None
) -> dict[str, Any]:
uri = URI(self.base_url + endpoint_uri)
return await self._request_session_manager.async_json_make_get_request(
uri, params=params, timeout=ClientTimeout(self.request_timeout)
)
async def _async_make_post_request(
self, endpoint_uri: str, body: list[str] | dict[str, Any]
) -> dict[str, Any]:
uri = URI(self.base_url + endpoint_uri)
return await self._request_session_manager.async_json_make_post_request(
uri, json=body, timeout=self.request_timeout
)
# [ BEACON endpoints ]
# states
async def get_genesis(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_GENESIS)
async def get_hash_root(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_HASH_ROOT.format(state_id))
async def get_fork_data(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_FORK_DATA.format(state_id))
async def get_finality_checkpoint(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(
GET_FINALITY_CHECKPOINT.format(state_id)
)
async def get_validators(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_VALIDATORS.format(state_id))
async def get_validator(
self, validator_id: str, state_id: str = "head"
) -> dict[str, Any]:
return await self._async_make_get_request(
GET_VALIDATOR.format(state_id, validator_id)
)
async def get_validator_balances(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(
GET_VALIDATOR_BALANCES.format(state_id)
)
async def get_epoch_committees(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_EPOCH_COMMITTEES.format(state_id))
async def get_epoch_sync_committees(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(
GET_EPOCH_SYNC_COMMITTEES.format(state_id)
)
async def get_epoch_randao(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_EPOCH_RANDAO.format(state_id))
# headers
async def get_block_headers(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLOCK_HEADERS)
async def get_block_header(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLOCK_HEADER.format(block_id))
# block
async def get_block(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLOCK.format(block_id))
async def get_block_root(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLOCK_ROOT.format(block_id))
async def get_block_attestations(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(
GET_BLOCK_ATTESTATIONS.format(block_id)
)
async def get_blinded_blocks(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLINDED_BLOCKS.format(block_id))
# rewards
async def get_rewards(self, block_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_REWARDS.format(block_id))
# light client (untested but follows spec)
async def get_light_client_bootstrap_structure(
self, block_root: HexStr
) -> dict[str, Any]:
return await self._async_make_get_request(
GET_LIGHT_CLIENT_BOOTSTRAP_STRUCTURE.format(block_root)
)
async def get_light_client_updates(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_LIGHT_CLIENT_UPDATES)
async def get_light_client_finality_update(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_LIGHT_CLIENT_FINALITY_UPDATE)
async def get_light_client_optimistic_update(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_LIGHT_CLIENT_OPTIMISTIC_UPDATE)
# pool
async def get_attestations(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_ATTESTATIONS)
async def get_attester_slashings(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_ATTESTER_SLASHINGS)
async def get_proposer_slashings(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_PROPOSER_SLASHINGS)
async def get_voluntary_exits(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_VOLUNTARY_EXITS)
async def get_bls_to_execution_changes(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_BLS_TO_EXECUTION_CHANGES)
# [ CONFIG endpoints ]
async def get_fork_schedule(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_FORK_SCHEDULE)
async def get_spec(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_SPEC)
async def get_deposit_contract(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_DEPOSIT_CONTRACT)
# [ DEBUG endpoints ]
async def get_beacon_state(self, state_id: str = "head") -> dict[str, Any]:
return await self._async_make_get_request(GET_BEACON_STATE.format(state_id))
async def get_beacon_heads(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_BEACON_HEADS)
# [ NODE endpoints ]
async def get_node_identity(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_NODE_IDENTITY)
async def get_peers(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_PEERS)
async def get_peer(self, peer_id: str) -> dict[str, Any]:
return await self._async_make_get_request(GET_PEER.format(peer_id))
async def get_peer_count(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_PEER_COUNT)
async def get_health(self) -> int:
url = URI(self.base_url + GET_HEALTH)
response = (
await self._request_session_manager.async_get_response_from_get_request(url)
)
return response.status
async def get_version(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_VERSION)
async def get_syncing(self) -> dict[str, Any]:
return await self._async_make_get_request(GET_SYNCING)
# [ BLOB endpoints ]
async def get_blob_sidecars(
self, block_id: str, indices: list[int] | None = None
) -> dict[str, Any]:
indices_param = {"indices": ",".join(map(str, indices))} if indices else None
return await self._async_make_get_request(
GET_BLOB_SIDECARS.format(block_id),
params=indices_param,
)
# [ VALIDATOR endpoints ]
async def get_attester_duties(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return await self._async_make_post_request(
GET_ATTESTER_DUTIES.format(epoch), validator_indices
)
async def get_block_proposer_duties(self, epoch: str) -> dict[str, Any]:
return await self._async_make_get_request(
GET_BLOCK_PROPOSERS_DUTIES.format(epoch)
)
async def get_sync_committee_duties(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return await self._async_make_post_request(
GET_SYNC_COMMITTEE_DUTIES.format(epoch), validator_indices
)
# [ REWARDS endpoints ]
async def get_attestations_rewards(
self, epoch: str, validator_indices: list[str]
) -> dict[str, Any]:
return await self._async_make_post_request(
GET_ATTESTATIONS_REWARDS.format(epoch), validator_indices
)
| AsyncBeacon |
python | skorch-dev__skorch | examples/optuna/skorch_example.py | {
"start": 673,
"end": 4050
} | class ____(nn.Module):
def __init__(self, n_layers: int, dropout: float, hidden_units: list[int]) -> None:
super().__init__()
layers = []
input_dim = 28 * 28 # Assuming flattened MNIST input
for i in range(n_layers):
layers.append(nn.Linear(input_dim, hidden_units[i]))
layers.append(nn.ReLU())
layers.append(nn.Dropout(dropout))
input_dim = hidden_units[i]
layers.append(nn.Linear(input_dim, 10))
self.model = nn.Sequential(*layers)
def forward(self, x):
if isinstance(x, dict):
x = x["data"]
return self.model(x)
def convert_to_numpy(X_train, X_test, y_train, y_test):
return (
X_train.to_numpy().astype(np.float32),
X_test.to_numpy().astype(np.float32),
y_train.to_numpy(),
y_test.to_numpy()
)
def objective(trial: optuna.Trial, X_train, X_test, y_train, y_test) -> float:
n_layers = trial.suggest_int("n_layers", 1, 3)
dropout = trial.suggest_float("dropout", 0.0, 0.5)
hidden_units = [trial.suggest_int(f"n_units_l{i}", 4, 128, log=True) for i in range(n_layers)]
model = ClassifierModule(n_layers, dropout, hidden_units)
X_train_np, X_test_np, y_train_np, y_test_np = convert_to_numpy(X_train, X_test, y_train, y_test)
net = skorch.NeuralNetClassifier(
model,
criterion=torch.nn.CrossEntropyLoss,
max_epochs=trial.suggest_int("max_epochs", 10, 50),
lr=trial.suggest_float("lr", 1e-4, 1e-1, log=True),
device=device,
verbose=0,
callbacks=[SkorchPruningCallback(trial, "valid_acc")],
)
net.fit(X_train_np, y_train_np)
return accuracy_score(y_test_np, net.predict(X_test_np))
def main(args):
# Load and preprocess data
subset_ratio = 0.4
X, y = load_mnist_data(subset_ratio)
indices = np.random.permutation(len(X))
N = int(len(X) * subset_ratio)
X, y = X.iloc[indices][:N], y.iloc[indices][:N]
X /= 255.0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
# Run optimization
pruner = optuna.pruners.MedianPruner() if args.pruning else optuna.pruners.NopPruner()
study = optuna.create_study(direction="maximize", pruner=pruner)
study.optimize(lambda trial: objective(trial, X_train, X_test, y_train, y_test), n_trials=args.n_trials, timeout=args.timeout)
# Print results
print(f"Number of finished trials: {len(study.trials)}")
print(f"Best trial value: {study.best_trial.value}")
print("Best trial parameters:")
for key, value in study.best_trial.params.items():
print(f" {key}: {value}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="skorch example.")
parser.add_argument(
"--pruning",
"-p",
action="store_true",
help="Activate the pruning feature. MedianPruner stops unpromising trials early.",
)
parser.add_argument(
"--n_trials",
"-n",
type=int,
default=100,
help="Number of trials to run in the study (default: 100).",
)
parser.add_argument(
"--timeout",
"-t",
type=int,
default=600,
help="Timeout in seconds for the study (default: 600).",
)
args = parser.parse_args()
main(args)
| ClassifierModule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 140146,
"end": 141851
} | class ____(Grouping[_T]):
"""used by cache_key->_apply_params_to_element to allow compilation /
execution of a SQL element that's been cached, using an alternate set of
bound parameter values.
This is used by the ORM to swap new parameter values into expressions
that are embedded into loader options like with_expression(),
selectinload(). Previously, this task was accomplished using the
.params() method which would perform a deep-copy instead. This deep
copy proved to be too expensive for more complex expressions.
See #11085
"""
__visit_name__ = "override_binds"
def __init__(
self,
element: ColumnElement[_T],
bindparams: Sequence[BindParameter[Any]],
replaces_params: Sequence[BindParameter[Any]],
):
self.element = element
self.translate = {
k.key: v.value for k, v in zip(replaces_params, bindparams)
}
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Optional[typing_Tuple[Any, ...]]:
"""generate a cache key for the given element, substituting its bind
values for the translation values present."""
existing_bps: List[BindParameter[Any]] = []
ck = self.element._gen_cache_key(anon_map, existing_bps)
bindparams.extend(
(
bp._with_value(
self.translate[bp.key], maintain_key=True, required=False
)
if bp.key in self.translate
else bp
)
for bp in existing_bps
)
return ck
_FrameIntTuple = tuple[int | None, int | None]
| _OverrideBinds |
python | doocs__leetcode | solution/1600-1699/1628.Design an Expression Tree With Evaluate Function/Solution.py | {
"start": 185,
"end": 298
} | class ____(ABC):
@abstractmethod
# define your fields here
def evaluate(self) -> int:
pass
| Node |
python | google__pytype | pytype/typegraph/typegraph_serializer.py | {
"start": 1017,
"end": 1209
} | class ____:
id: CFGNodeId
name: str
incoming: list[CFGNodeId]
outgoing: list[CFGNodeId]
bindings: list[BindingId]
condition: BindingId | None
@dataclasses.dataclass
| SerializedCFGNode |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 143463,
"end": 145606
} | class ____(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
is_temp = True
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
item_type = PyrexTypes.remove_cv_ref(item_type, remove_fakeref=True)
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
item_type = self.infer_type(env, self.iterator.type)
if self.iterator.type.is_pyobject and not item_type.is_pyobject:
# We definitely read a Python object from the iterable but inferred a C type for it,
# probably by anticipating to unpack it. Do the coercion outside to allow undoing it later.
self.type = item_type.equivalent_type or py_object_type
return self.coerce_to(item_type, env)
else:
self.type = item_type
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
| NextNode |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module.py | {
"start": 210,
"end": 325
} | class ____:
x = 5; y: Optional['C'] = None
from typing import Tuple
x: int = 5; y: str = x; f: Tuple[int, int]
| C |
python | catalyst-team__catalyst | catalyst/data/sampler.py | {
"start": 16898,
"end": 18763
} | class ____(DistributedSampler):
"""
Wrapper over `Sampler` for distributed training.
Allows you to use any sampler in distributed mode.
It is especially useful in conjunction with
`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSamplerWrapper instance as a DataLoader
sampler, and load a subset of subsampled data of the original dataset
that is exclusive to it.
.. note::
Sampler is assumed to be of constant size.
"""
def __init__(
self,
sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
):
"""
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler),
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
)
self.sampler = sampler
def __iter__(self) -> Iterator[int]:
"""Iterate over sampler.
Returns:
python iterator
"""
self.dataset = DatasetFromSampler(self.sampler)
indexes_of_indexes = super().__iter__()
subsampler_indexes = self.dataset
return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
__all__ = [
"BalanceClassSampler",
"BatchBalanceClassSampler",
"DistributedSamplerWrapper",
"DynamicBalanceClassSampler",
"MiniEpochSampler",
]
| DistributedSamplerWrapper |
python | celery__celery | celery/security/certificate.py | {
"start": 2765,
"end": 3520
} | class ____:
"""Base class for certificate stores."""
def __init__(self) -> None:
self._certs: dict[str, Certificate] = {}
def itercerts(self) -> Iterator[Certificate]:
"""Return certificate iterator."""
yield from self._certs.values()
def __getitem__(self, id: str) -> Certificate:
"""Get certificate by id."""
try:
return self._certs[bytes_to_str(id)]
except KeyError:
raise SecurityError(f'Unknown certificate: {id!r}')
def add_cert(self, cert: Certificate) -> None:
cert_id = bytes_to_str(cert.get_id())
if cert_id in self._certs:
raise SecurityError(f'Duplicate certificate: {id!r}')
self._certs[cert_id] = cert
| CertStore |
python | pytorch__pytorch | torch/_streambase.py | {
"start": 395,
"end": 435
} | class ____(torch.Event):
pass
| _EventBase |
python | streamlit__streamlit | lib/tests/streamlit/elements/text_area_test.py | {
"start": 1205,
"end": 12261
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall text_area protos."""
def test_just_label(self):
"""Test that it can be called with no value."""
st.text_area("the label")
c = self.get_delta_from_queue().new_element.text_area
assert c.label == "the label"
assert (
c.label_visibility.value
== LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE
)
assert c.default == ""
assert c.HasField("default")
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.text_area("the label", disabled=True)
c = self.get_delta_from_queue().new_element.text_area
assert c.disabled
def test_value_types(self):
"""Test that it supports different types of values."""
arg_values = ["some str", 123, {}, SomeObj()]
proto_values = ["some str", "123", "{}", ".*SomeObj.*"]
for arg_value, proto_value in zip(arg_values, proto_values, strict=False):
st.text_area("the label", arg_value)
c = self.get_delta_from_queue().new_element.text_area
assert c.label == "the label"
assert re.match(proto_value, c.default)
def test_none_value(self):
"""Test that it can be called with None as initial value."""
st.text_area("the label", value=None)
c = self.get_delta_from_queue().new_element.text_area
assert c.label == "the label"
# If a proto property is null, it is not determined by
# this value, but by the check via the HasField method:
assert c.default == ""
assert not c.HasField("default")
def test_placeholder(self):
"""Test that it can be called with placeholder"""
st.text_area("the label", "", placeholder="testing")
c = self.get_delta_from_queue().new_element.text_area
assert c.label == "the label"
assert c.default == ""
assert c.placeholder == "testing"
def test_outside_form(self):
"""Test that form id is marshalled correctly outside of a form."""
st.text_area("foo")
proto = self.get_delta_from_queue().new_element.color_picker
assert proto.form_id == ""
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form(self):
"""Test that form id is marshalled correctly inside of a form."""
with st.form("form"):
st.text_area("foo")
# 2 elements will be created: form block, widget
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
text_area_proto = self.get_delta_from_queue(1).new_element.text_area
assert text_area_proto.form_id == form_proto.form.form_id
def test_inside_column(self):
"""Test that it works correctly inside of a column."""
col1, _col2, _col3 = st.columns([2.5, 1.5, 8.3])
with col1:
st.text_area("foo")
all_deltas = self.get_all_deltas_from_queue()
# 5 elements will be created: 1 horizontal block, 3 columns, 1 widget
assert len(all_deltas) == 5
text_area_proto = self.get_delta_from_queue().new_element.text_area
assert text_area_proto.label == "foo"
@parameterized.expand(
[
("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE),
("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN),
("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED),
]
)
def test_label_visibility(self, label_visibility_value, proto_value):
"""Test that it can be called with label_visibility param."""
st.text_area("the label", label_visibility=label_visibility_value)
c = self.get_delta_from_queue().new_element.text_area
assert c.label_visibility.value == proto_value
def test_label_visibility_wrong_value(self):
with pytest.raises(StreamlitAPIException) as e:
st.text_area("the label", label_visibility="wrong_value")
assert (
str(e.value)
== "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'."
)
def test_width_config_default(self):
"""Test that default width is 'stretch'."""
st.text_area("the label")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
def test_width_config_pixel(self):
"""Test that pixel width works properly."""
st.text_area("the label", width=100)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 100
def test_width_config_stretch(self):
"""Test that 'stretch' width works properly."""
st.text_area("the label", width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
[
"invalid",
-100,
0,
100.5,
None,
]
)
def test_invalid_width(self, width):
"""Test that invalid width values raise exceptions."""
with pytest.raises(StreamlitInvalidWidthError):
st.text_area("the label", width=width)
def test_height_config_default(self):
"""Test that default height is 122 pixels."""
st.text_area("the label")
c = self.get_delta_from_queue().new_element
# Default height should be set to 122 pixels.
assert c.height_config.pixel_height == 122
def test_height_config_pixel(self):
"""Test that pixel height works properly."""
st.text_area("the label", height=150)
c = self.get_delta_from_queue().new_element
assert (
c.height_config.WhichOneof("height_spec")
== HeightConfigFields.PIXEL_HEIGHT.value
)
assert c.height_config.pixel_height == 150
def test_height_config_content(self):
"""Test that 'content' height works properly."""
st.text_area("the label", height="content")
c = self.get_delta_from_queue().new_element
assert (
c.height_config.WhichOneof("height_spec")
== HeightConfigFields.USE_CONTENT.value
)
assert c.height_config.use_content
def test_height_config_stretch(self):
"""Test that 'stretch' height works properly."""
st.text_area("the label", height="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.height_config.WhichOneof("height_spec")
== HeightConfigFields.USE_STRETCH.value
)
assert c.height_config.use_stretch
@parameterized.expand(
[
100.5,
"invalid",
0,
-100,
]
)
def test_invalid_height(self, height):
"""Test that invalid height values raise exceptions."""
with pytest.raises(StreamlitInvalidHeightError):
st.text_area("the label", height=height)
def test_help_dedents(self):
"""Test that help properly dedents"""
st.text_area(
"the label",
value="TESTING",
help="""\
Hello World!
This is a test
""",
)
c = self.get_delta_from_queue().new_element.text_area
assert c.label == "the label"
assert c.default == "TESTING"
assert c.help == """Hello World!\nThis is a test\n\n\n"""
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when this widget is used inside a cached function."""
st.cache_data(lambda: st.text_area("the label"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params
st.text_area(
label="Label 1",
key="text_area_key",
value="abc",
help="Help 1",
disabled=False,
width="stretch",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
placeholder="placeholder 1",
max_chars=50,
height=200,
)
c1 = self.get_delta_from_queue().new_element.text_area
id1 = c1.id
# Second render with different params but same key (keep max_chars the same)
st.text_area(
label="Label 2",
key="text_area_key",
value="def",
help="Help 2",
disabled=True,
width=200,
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
placeholder="placeholder 2",
max_chars=50,
height="content",
)
c2 = self.get_delta_from_queue().new_element.text_area
id2 = c2.id
assert id1 == id2
@parameterized.expand(
[
("max_chars", 100, 200),
]
)
def test_whitelisted_stable_key_kwargs(
self, kwarg_name: str, value1: object, value2: object
):
"""Test that the widget ID changes when a whitelisted kwarg changes even when the key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.text_area(
label="Label 1",
key="text_area_key",
**{kwarg_name: value1},
)
c1 = self.get_delta_from_queue().new_element.text_area
id1 = c1.id
st.text_area(
label="Label 2",
key="text_area_key",
**{kwarg_name: value2},
)
c2 = self.get_delta_from_queue().new_element.text_area
id2 = c2.id
assert id1 != id2
| TextAreaTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_stats_summary.py | {
"start": 1415,
"end": 4817
} | class ____(serializers.Serializer):
# time params
statsPeriod = serializers.CharField(
help_text=(
"This defines the range of the time series, relative to now. "
"The range is given in a `<number><unit>` format. "
"For example `1d` for a one day range. Possible units are `m` for minutes, `h` for hours, `d` for days and `w` for weeks."
"You must either provide a `statsPeriod`, or a `start` and `end`."
),
required=False,
)
interval = serializers.CharField(
help_text=(
"This is the resolution of the time series, given in the same format as `statsPeriod`. "
"The default resolution is `1h` and the minimum resolution is currently restricted to `1h` as well. "
"Intervals larger than `1d` are not supported, and the interval has to cleanly divide one day."
),
required=False,
)
start = serializers.DateTimeField(
help_text="This defines the start of the time series range as an explicit datetime, either in UTC ISO8601 or epoch seconds."
"Use along with `end` instead of `statsPeriod`.",
required=False,
)
end = serializers.DateTimeField(
help_text=(
"This defines the inclusive end of the time series range as an explicit datetime, either in UTC ISO8601 or epoch seconds."
"Use along with `start` instead of `statsPeriod`."
),
required=False,
)
field = serializers.ChoiceField(
list(COLUMN_MAP.keys()),
help_text=(
"the `sum(quantity)` field is bytes for attachments, and all others the 'event' count for those types of events.\n\n"
"`sum(times_seen)` sums the number of times an event has been seen. "
"For 'normal' event types, this will be equal to `sum(quantity)` for now. "
"For sessions, quantity will sum the total number of events seen in a session, while `times_seen` will be the unique number of sessions. "
"and for attachments, `times_seen` will be the total number of attachments, while quantity will be the total sum of attachment bytes."
),
)
# filter parameters
project = serializers.ListField(
required=False,
help_text="The ID of the projects to filter by.",
)
category = serializers.ChoiceField(
("error", "transaction", "attachment", "replays", "profiles"),
required=False,
help_text=(
"If filtering by attachments, you cannot filter by any other category due to quantity values becoming nonsensical (combining bytes and event counts).\n\n"
"If filtering by `error`, it will automatically add `default` and `security` as we currently roll those two categories into `error` for displaying."
),
)
outcome = serializers.ChoiceField(
[o.name.lower() for o in Outcome],
required=False,
help_text="See https://docs.sentry.io/product/stats/ for more information on outcome statuses.",
)
reason = serializers.CharField(
required=False, help_text="The reason field will contain why an event was filtered/dropped."
)
# download the file
download = serializers.BooleanField(
help_text=("Download the API response in as a csv file"),
required=False,
)
| OrgStatsSummaryQueryParamsSerializer |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 20672,
"end": 30736
} | class ____:
"""Store for Spack concretization results and statistics
Serializes solver result objects and statistics to json and stores
at a given endpoint in a cache associated by the sha256 of the
asp problem and the involved control files.
"""
def __init__(self, root: Union[str, None] = None):
root = root or spack.config.get("concretizer:concretization_cache:url", None)
if root is None:
root = os.path.join(spack.caches.misc_cache_location(), "concretization")
self.root = pathlib.Path(spack.util.path.canonicalize_path(root))
self.root.mkdir(parents=True, exist_ok=True)
self._lockfile = self.root / ".cc_lock"
def cleanup(self):
"""Prunes the concretization cache according to configured entry
count limits. Cleanup is done in LRU ordering."""
entry_limit = spack.config.get("concretizer:concretization_cache:entry_limit", 1000)
# determine if we even need to clean up
entries = list(self.cache_entries())
if len(entries) <= entry_limit:
return
# collect stat info for mod time about all entries
removal_queue = []
for entry in entries:
try:
entry_stat_info = entry.stat()
# mtime will always be time of last use as we update it after
# each read and obviously after each write
mod_time = entry_stat_info.st_mtime
removal_queue.append((mod_time, entry))
except FileNotFoundError:
# don't need to cleanup the file, it's not there!
pass
removal_queue.sort() # sort items for removal, ascending, so oldest first
# Try to remove the oldest half of the cache.
for _, entry_to_rm in removal_queue[: entry_limit // 2]:
# cache bucket was removed by another process -- that's fine; move on
if not entry_to_rm.exists():
continue
try:
with self.write_transaction(entry_to_rm, timeout=1e-6):
self._safe_remove(entry_to_rm)
except lk.LockTimeoutError:
# if we can't get a lock, it's either
# 1) being read, so it's been used recently, i.e. not a good candidate for LRU,
# 2) it's already being removed by another process, so we don't care, or
# 3) system is busy, but we don't really need to wait just for cache cleanup.
pass # so skip it
def cache_entries(self):
"""Generator producing cache entries within a bucket"""
for cache_entry in self.root.iterdir():
# Lockfile starts with "."
# old style concretization cache entries are in directories
if not cache_entry.name.startswith(".") and cache_entry.is_file():
yield cache_entry
def _results_from_cache(self, cache_entry_file: str) -> Union[Result, None]:
"""Returns a Results object from the concretizer cache
Reads the cache hit and uses `Result`'s own deserializer
to produce a new Result object
"""
cache_entry = json.loads(cache_entry_file)
result_json = cache_entry["results"]
return Result.from_dict(result_json)
def _stats_from_cache(self, cache_entry_file: str) -> Union[Dict, None]:
"""Returns concretization statistic from the
concretization associated with the cache.
Deserialzes the the json representation of the
statistics covering the cached concretization run
and returns the Python data structures
"""
return json.loads(cache_entry_file)["statistics"]
def _prefix_digest(self, problem: str) -> str:
"""Return the first two characters of, and the full, sha256 of the given asp problem"""
return spack.util.hash.b32_hash(problem)
def _cache_path_from_problem(self, problem: str) -> pathlib.Path:
"""Returns a Path object representing the path to the cache
entry for the given problem where the problem is the sha256 of the given asp problem"""
prefix = self._prefix_digest(problem)
return self.root / prefix
def _safe_remove(self, cache_dir: pathlib.Path) -> bool:
"""Removes cache entries with handling for the case where the entry has been
removed already or there are multiple cache entries in a directory"""
try:
cache_dir.unlink()
return True
except FileNotFoundError:
# That's fine, removal is idempotent
pass
except OSError as e:
# Catch other timing/access related issues
tty.debug(
f"Exception occured while attempting to remove Concretization Cache entry, {e}"
)
pass
return False
def _lock(self, path: pathlib.Path) -> lk.Lock:
"""Returns a lock over the byte range correspnding to the hash of the asp problem.
``path`` is a path to a file in the cache, and its basename is the hash of the problem.
Args:
path: absolute or relative path to concretization cache entry to be locked
"""
return lk.Lock(
str(self._lockfile),
start=spack.util.hash.base32_prefix_bits(
path.name, spack.util.crypto.bit_length(sys.maxsize)
),
length=1,
desc=f"Concretization cache lock for {path}",
)
def read_transaction(
self, path: pathlib.Path, timeout: Optional[float] = None
) -> lk.ReadTransaction:
"""Read transactions for concretization cache entries.
Args:
path: absolute or relative path to the concretization cache entry to be locked
timeout: give up after this many seconds
"""
return lk.ReadTransaction(self._lock(path), timeout=timeout)
def write_transaction(
self, path: pathlib.Path, timeout: Optional[float] = None
) -> lk.WriteTransaction:
"""Write transactions for concretization cache entries
Args:
path: absolute or relative path to the concretization cache entry to be locked
timeout: give up after this many seconds
"""
return lk.WriteTransaction(self._lock(path), timeout=timeout)
def store(self, problem: str, result: Result, statistics: List) -> None:
"""Creates entry in concretization cache for problem if none exists,
storing the concretization Result object and statistics in the cache
as serialized json joined as a single file.
Hash membership is computed based on the sha256 of the provided asp
problem.
"""
cache_path = self._cache_path_from_problem(problem)
with self.write_transaction(cache_path, timeout=30):
if cache_path.exists():
# if cache path file exists, we already have a cache entry, likely created
# by another process. Exit early.
return
with gzip.open(cache_path, "xb", compresslevel=6) as cache_entry:
cache_dict = {"results": result.to_dict(), "statistics": statistics}
cache_entry.write(json.dumps(cache_dict).encode())
def fetch(self, problem: str) -> Union[Tuple[Result, Dict], Tuple[None, None]]:
"""Returns the concretization cache result for a lookup based on the given problem.
Checks the concretization cache for the given problem, and either returns the
Python objects cached on disk representing the concretization results and statistics
or returns none if no cache entry was found.
"""
cache_path = self._cache_path_from_problem(problem)
if not cache_path.exists():
return None, None # if exists is false, then there's no chance of a hit
cache_content = None
try:
with self.read_transaction(cache_path, timeout=2):
try:
with gzip.open(cache_path, "rb", compresslevel=6) as f:
f.peek(1) # Try to read at least one byte
f.seek(0)
cache_content = f.read().decode("utf-8")
except OSError:
# Cache may have been created pre compression check if gzip, and if not,
# read from plaintext otherwise re raise
with open(cache_path, "rb") as f:
# raise if this is a gzip file we failed to open
if GZipFileType().matches_magic(f):
raise
cache_content = f.read().decode()
except FileNotFoundError:
pass # cache miss, already cleaned up
except lk.LockTimeoutError:
pass # if the lock times, out skip the cache
if not cache_content:
return None, None
# update mod/access time for use w/ LRU cleanup
os.utime(cache_path)
return (
self._results_from_cache(cache_content),
self._stats_from_cache(cache_content),
) # type: ignore
def _is_checksummed_git_version(v):
return isinstance(v, vn.GitVersion) and v.is_commit
def _is_checksummed_version(version_info: Tuple[GitOrStandardVersion, dict]):
"""Returns true iff the version is not a moving target"""
version, info = version_info
if isinstance(version, spack.version.StandardVersion):
if any(h in info for h in spack.util.crypto.hashes.keys()) or "checksum" in info:
return True
return "commit" in info and len(info["commit"]) == 40
return _is_checksummed_git_version(version)
def _spec_with_default_name(spec_str, name):
"""Return a spec with a default name if none is provided, used for requirement specs"""
spec = spack.spec.Spec(spec_str)
if not spec.name:
spec.name = name
return spec
| ConcretizationCache |
python | doocs__leetcode | solution/2100-2199/2138.Divide a String Into Groups of Size k/Solution.py | {
"start": 0,
"end": 158
} | class ____:
def divideString(self, s: str, k: int, fill: str) -> List[str]:
return [s[i : i + k].ljust(k, fill) for i in range(0, len(s), k)]
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/color.py | {
"start": 2625,
"end": 13486
} | class ____:
"""
A class to represent an Excel color.
"""
def __init__(self, color: Union[str, int, Tuple[int, int]]) -> None:
"""
Initialize a Color instance.
Args:
color (Union[str, int, Tuple[int, int]]): The value of the color
(e.g., a hex string, an integer, or a tuple of two integers).
"""
self._rgb_value: int = 0x000000
self._type: ColorTypes = ColorTypes.RGB
self._theme_color: Tuple[int, int] = (0, 0)
self._is_automatic: bool = False
if isinstance(color, str):
self._parse_string_color(color)
self._type = ColorTypes.RGB
elif isinstance(color, int):
if color > 0xFFFFFF:
raise ValueError("RGB color must be in the range 0x000000 - 0xFFFFFF.")
self._rgb_value = color
self._type = ColorTypes.RGB
elif (
isinstance(color, tuple)
and len(color) == 2
and all(isinstance(v, int) for v in color)
):
if color[0] > 9:
raise ValueError("Theme color must be in the range 0-9.")
if color[1] > 5:
raise ValueError("Theme shade must be in the range 0-5.")
self._theme_color = color
self._type = ColorTypes.THEME
else:
raise ValueError(
"Invalid color value. Must be a string, integer, or tuple."
)
def __repr__(self) -> str:
"""
Return a string representation of the Color instance.
"""
if self._type == ColorTypes.RGB:
value = f"0x{self._rgb_value:06X}"
else:
value = f"Theme({self._theme_color[0]}, {self._theme_color[1]})"
return (
f"Color("
f"value={value}, "
f"type={self._type.name}, "
f"is_automatic={self._is_automatic})"
)
@staticmethod
def _from_value(value: Union["Color", str]) -> "Color":
"""
Internal method to convert a string to a Color instance or return the
Color instance if already provided. This is mainly used for backward
compatibility support in the XlsxWriter API.
Args:
value (Union[Color, str]): A Color instance or a string representing
a color.
Returns:
Color: A Color instance.
"""
if isinstance(value, Color):
return value
if isinstance(value, str):
return Color(value)
raise TypeError("Value must be a Color instance or a string.")
@staticmethod
def rgb(color: str) -> "Color":
"""
Create a user-defined RGB color from a Html color string.
Args:
color (int): An RGB value in the range 0x000000 (black) to 0xFFFFFF (white).
Returns:
Color: A Color object representing an Excel RGB color.
"""
return Color(color)
@staticmethod
def rgb_integer(color: int) -> "Color":
"""
Create a user-defined RGB color from an integer value.
Args:
color (int): An RGB value in the range 0x000000 (black) to 0xFFFFFF (white).
Returns:
Color: A Color object representing an Excel RGB color.
"""
if color > 0xFFFFFF:
raise ValueError("RGB color must be in the range 0x000000 - 0xFFFFFF.")
return Color(color)
@staticmethod
def theme(color: int, shade: int) -> "Color":
"""
Create a theme color.
Args:
color (int): The theme color index (0-9).
shade (int): The theme shade index (0-5).
Returns:
Color: A Color object representing an Excel Theme color.
"""
if color > 9:
raise ValueError("Theme color must be in the range 0-9.")
if shade > 5:
raise ValueError("Theme shade must be in the range 0-5.")
return Color((color, shade))
@staticmethod
def automatic() -> "Color":
"""
Create an Excel color representing an "Automatic" color.
The Automatic color for an Excel property is usually the same as the
Default color but can vary according to system settings. This method and
color type are rarely used in practice but are included for completeness.
Returns:
Color: A Color object representing an Excel Automatic color.
"""
color = Color(0x000000)
color._is_automatic = True
return color
def _parse_string_color(self, value: str) -> None:
"""
Convert a hex string or named color to an RGB value.
Returns:
int: The RGB value.
"""
# Named colors used in conjunction with various set_xxx_color methods to
# convert a color name into an RGB value. These colors are for backward
# compatibility with older versions of Excel.
named_colors = {
"red": 0xFF0000,
"blue": 0x0000FF,
"cyan": 0x00FFFF,
"gray": 0x808080,
"lime": 0x00FF00,
"navy": 0x000080,
"pink": 0xFF00FF,
"black": 0x000000,
"brown": 0x800000,
"green": 0x008000,
"white": 0xFFFFFF,
"orange": 0xFF6600,
"purple": 0x800080,
"silver": 0xC0C0C0,
"yellow": 0xFFFF00,
"magenta": 0xFF00FF,
}
color = value.lstrip("#").lower()
if color == "automatic":
self._is_automatic = True
self._rgb_value = 0x000000
elif color in named_colors:
self._rgb_value = named_colors[color]
else:
try:
self._rgb_value = int(color, 16)
except ValueError as e:
raise ValueError(f"Invalid color value: {value}") from e
def _rgb_hex_value(self) -> str:
"""
Get the RGB hex value for the color.
Returns:
str: The RGB hex value as a string.
"""
if self._is_automatic:
# Default to black for automatic colors.
return "000000"
if self._type == ColorTypes.THEME:
# Default to black for theme colors.
return "000000"
return f"{self._rgb_value:06X}"
def _vml_rgb_hex_value(self) -> str:
"""
Get the RGB hex value for a VML fill color in "#rrggbb" format.
Returns:
str: The RGB hex value as a string.
"""
if self._is_automatic:
# Default VML color for non-RGB colors.
return "#ffffe1"
return f"#{self._rgb_hex_value().lower()}"
def _argb_hex_value(self) -> str:
"""
Get the ARGB hex value for the color. The alpha channel is always FF.
Returns:
str: The ARGB hex value as a string.
"""
return f"FF{self._rgb_hex_value()}"
def _attributes(self) -> List[Tuple[str, str]]:
"""
Convert the color into a set of "rgb" or "theme/tint" attributes used in
color-related Style XML elements.
Returns:
list[tuple[str, str]]: A list of key-value pairs representing the
attributes.
"""
# pylint: disable=too-many-return-statements
# pylint: disable=no-else-return
if self._type == ColorTypes.THEME:
color, shade = self._theme_color
# The first 3 columns of colors in the theme palette are different
# from the others.
if color == 0:
if shade == 1:
return [("theme", str(color)), ("tint", "-4.9989318521683403E-2")]
elif shade == 2:
return [("theme", str(color)), ("tint", "-0.14999847407452621")]
elif shade == 3:
return [("theme", str(color)), ("tint", "-0.249977111117893")]
elif shade == 4:
return [("theme", str(color)), ("tint", "-0.34998626667073579")]
elif shade == 5:
return [("theme", str(color)), ("tint", "-0.499984740745262")]
else:
return [("theme", str(color))]
elif color == 1:
if shade == 1:
return [("theme", str(color)), ("tint", "0.499984740745262")]
elif shade == 2:
return [("theme", str(color)), ("tint", "0.34998626667073579")]
elif shade == 3:
return [("theme", str(color)), ("tint", "0.249977111117893")]
elif shade == 4:
return [("theme", str(color)), ("tint", "0.14999847407452621")]
elif shade == 5:
return [("theme", str(color)), ("tint", "4.9989318521683403E-2")]
else:
return [("theme", str(color))]
elif color == 2:
if shade == 1:
return [("theme", str(color)), ("tint", "-9.9978637043366805E-2")]
elif shade == 2:
return [("theme", str(color)), ("tint", "-0.249977111117893")]
elif shade == 3:
return [("theme", str(color)), ("tint", "-0.499984740745262")]
elif shade == 4:
return [("theme", str(color)), ("tint", "-0.749992370372631")]
elif shade == 5:
return [("theme", str(color)), ("tint", "-0.89999084444715716")]
else:
return [("theme", str(color))]
else:
if shade == 1:
return [("theme", str(color)), ("tint", "0.79998168889431442")]
elif shade == 2:
return [("theme", str(color)), ("tint", "0.59999389629810485")]
elif shade == 3:
return [("theme", str(color)), ("tint", "0.39997558519241921")]
elif shade == 4:
return [("theme", str(color)), ("tint", "-0.249977111117893")]
elif shade == 5:
return [("theme", str(color)), ("tint", "-0.499984740745262")]
else:
return [("theme", str(color))]
# Handle RGB color.
elif self._type == ColorTypes.RGB:
return [("rgb", self._argb_hex_value())]
# Default case for other colors.
return []
def _chart_scheme(self) -> Tuple[str, int, int]:
"""
Return the chart theme based on color and shade.
Returns:
Tuple[str, int, int]: The corresponding tuple of values from CHART_THEMES.
"""
return CHART_THEMES[self._theme_color[0]][self._theme_color[1]]
| Color |
python | palantir__python-language-server | pyls/config/flake8_conf.py | {
"start": 1028,
"end": 1702
} | class ____(ConfigSource):
"""Parse flake8 configurations."""
def user_config(self):
config_file = self._user_config_file()
config = self.read_config_from_files([config_file])
return self.parse_config(config, CONFIG_KEY, OPTIONS)
def _user_config_file(self):
if self.is_windows:
return os.path.expanduser('~\\.flake8')
return os.path.join(self.xdg_home, 'flake8')
def project_config(self, document_path):
files = find_parents(self.root_path, document_path, PROJECT_CONFIGS)
config = self.read_config_from_files(files)
return self.parse_config(config, CONFIG_KEY, OPTIONS)
| Flake8Config |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_version_files.py | {
"start": 1075,
"end": 1530
} | class ____(GQLResult):
node: Optional[FileFragment]
ArtifactVersionFiles.model_rebuild()
ArtifactVersionFilesProject.model_rebuild()
ArtifactVersionFilesProjectArtifactType.model_rebuild()
ArtifactVersionFilesProjectArtifactTypeArtifact.model_rebuild()
ArtifactVersionFilesProjectArtifactTypeArtifactFiles.model_rebuild()
ArtifactVersionFilesProjectArtifactTypeArtifactFilesEdges.model_rebuild()
| ArtifactVersionFilesProjectArtifactTypeArtifactFilesEdges |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/declared_attr_one.py | {
"start": 1574,
"end": 2483
} | class ____(Employee):
__mapper_args__ = {
"polymorphic_identity": "manager",
}
@declared_attr
def start_date(cls) -> Mapped[datetime]:
"Start date column, if not present already."
assert Employee.__table__ is not None
return getattr(
Employee.__table__.c,
"start date",
mapped_column("start date", DateTime),
)
def do_something_with_mapped_class(
cls_: MappedClassProtocol[Employee],
) -> None:
assert_type(cls_.__table__.select(), Select[Unpack[tuple[Any, ...]]])
assert_type(cls_.__mapper__, Mapper[Employee])
assert_type(cls_(), Employee)
do_something_with_mapped_class(Manager)
do_something_with_mapped_class(Engineer)
if typing.TYPE_CHECKING:
assert_type(Engineer.start_date, InstrumentedAttribute[datetime])
assert_type(Manager.start_date, InstrumentedAttribute[datetime])
| Manager |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 546,
"end": 1195
} | class ____:
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None)
assert metadata.config == metadata.config == {}
def test_reuse(self, isolation):
config = {}
metadata = ProjectMetadata(str(isolation), None, config)
assert metadata.config is metadata.config is config
def test_read(self, temp_dir):
project_file = temp_dir / "pyproject.toml"
project_file.write_text("foo = 5")
with temp_dir.as_cwd():
metadata = ProjectMetadata(str(temp_dir), None)
assert metadata.config == metadata.config == {"foo": 5}
| TestConfig |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 4174,
"end": 4339
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "AlertStartEvent"
| GrapheneAlertStartEvent |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py | {
"start": 966,
"end": 1069
} | class ____(BaseModel):
id: str
"""Identifier of the requested tool."""
| InferenceOptionsToolChoice |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 82183,
"end": 84592
} | class ____:
def test_get_values_for_type(self):
gns = x509.GeneralNames([x509.DNSName("cryptography.io")])
names = gns.get_values_for_type(x509.DNSName)
assert names == ["cryptography.io"]
def test_iter_names(self):
gns = x509.GeneralNames(
[x509.DNSName("cryptography.io"), x509.DNSName("crypto.local")]
)
assert len(gns) == 2
assert list(gns) == [
x509.DNSName("cryptography.io"),
x509.DNSName("crypto.local"),
]
def test_iter_input(self):
names = [
x509.DNSName("cryptography.io"),
x509.DNSName("crypto.local"),
]
gns = x509.GeneralNames(iter(names))
assert list(gns) == names
def test_indexing(self):
gn = x509.GeneralNames(
[
x509.DNSName("cryptography.io"),
x509.DNSName("crypto.local"),
x509.DNSName("another.local"),
x509.RFC822Name("email@another.local"),
x509.UniformResourceIdentifier("http://another.local"),
]
)
assert gn[-1] == gn[4]
assert gn[2:6:2] == [gn[2], gn[4]]
def test_invalid_general_names(self):
with pytest.raises(TypeError):
x509.GeneralNames(
[
x509.DNSName("cryptography.io"),
"invalid", # type:ignore[list-item]
]
)
def test_repr(self):
gns = x509.GeneralNames([x509.DNSName("cryptography.io")])
assert repr(gns) == (
"<GeneralNames([<DNSName(value='cryptography.io')>])>"
)
def test_eq(self):
gns = x509.GeneralNames([x509.DNSName("cryptography.io")])
gns2 = x509.GeneralNames([x509.DNSName("cryptography.io")])
assert gns == gns2
def test_ne(self):
gns = x509.GeneralNames([x509.DNSName("cryptography.io")])
gns2 = x509.GeneralNames([x509.RFC822Name("admin@cryptography.io")])
assert gns != gns2
assert gns != object()
def test_hash(self):
gns = x509.GeneralNames([x509.DNSName("cryptography.io")])
gns2 = x509.GeneralNames([x509.DNSName("cryptography.io")])
gns3 = x509.GeneralNames([x509.RFC822Name("admin@cryptography.io")])
assert hash(gns) == hash(gns2)
assert hash(gns) != hash(gns3)
| TestGeneralNames |
python | scipy__scipy | scipy/interpolate/_fitpack2.py | {
"start": 47180,
"end": 48238
} | class ____(_BivariateSplineBase):
"""Bivariate spline constructed from the coefficients and knots of another
spline.
Notes
-----
The class is not meant to be instantiated directly from the data to be
interpolated or smoothed. As a result, its ``fp`` attribute and
``get_residual`` method are inherited but overridden; ``AttributeError`` is
raised when they are accessed.
The other inherited attributes can be used as usual.
"""
_invalid_why = ("is unavailable, because _DerivedBivariateSpline"
" instance is not constructed from data that are to be"
" interpolated or smoothed, but derived from the"
" underlying knots and coefficients of another spline"
" object")
@property
def fp(self):
raise AttributeError(f"attribute \"fp\" {self._invalid_why}")
def get_residual(self):
raise AttributeError(f"method \"get_residual\" {self._invalid_why}")
@xp_capabilities(out_of_scope=True)
| _DerivedBivariateSpline |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 157022,
"end": 159375
} | class ____(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
@skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# includes regression test for reference count error gh-13165
inds = [np.intp(0), np.array([True] * self.a.size), np.array([0]), None]
indtype = np.dtype(np.intp)
rc_indtype = sys.getrefcount(indtype)
for ind in inds:
rc_ind = sys.getrefcount(ind)
for _ in range(100):
try:
self.a.flat[ind]
except IndexError:
pass
assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
def test_index_getset(self):
it = np.arange(10).reshape(2, 1, 5).flat
with pytest.raises(AttributeError):
it.index = 10
for _ in it:
pass
# Check the value of `.index` is updated correctly (see also gh-19153)
# If the type was incorrect, this would show up on big-endian machines
assert it.index == it.base.size
| TestFlat |
python | keon__algorithms | algorithms/graph/transitive_closure_dfs.py | {
"start": 127,
"end": 1577
} | class ____:
"""
This class represents a directed graph using adjacency lists
"""
def __init__(self, vertices):
# No. of vertices
self.vertex_count = vertices
# default dictionary to store graph
self.graph = {}
# To store transitive closure
self.closure = [[0 for j in range(vertices)] for i in range(vertices)]
def add_edge(self, source, target):
"""
Adds a directed edge to the graph
"""
if source in self.graph:
self.graph[source].append(target)
else:
self.graph[source] = [target]
def dfs_util(self, source, target):
"""
A recursive DFS traversal function that finds
all reachable vertices for source
"""
# Mark reachability from source to target as true.
self.closure[source][target] = 1
# Find all the vertices reachable through target
for adjacent in self.graph[target]:
if self.closure[source][adjacent] == 0:
self.dfs_util(source, adjacent)
def transitive_closure(self):
"""
The function to find transitive closure. It uses
recursive dfs_util()
"""
# Call the recursive helper function to print DFS
# traversal starting from all vertices one by one
for i in range(self.vertex_count):
self.dfs_util(i, i)
return self.closure
| Graph |
python | ansible__ansible | test/units/_internal/templating/test_access.py | {
"start": 603,
"end": 882
} | class ____(NotifiableAccessContextBase, metaclass=abc.ABCMeta):
def __init__(self, access_list: list):
self._access_list: list = access_list
def _log(self, o: t.Any) -> t.Any:
self._access_list.append(LoggedAccess(ctx=self, obj=o))
| LoggingTagAccessNotifier |
python | django__django | django/db/models/fields/json.py | {
"start": 21092,
"end": 23029
} | class ____(JSONExact):
# RemovedInDjango70Warning: When deprecation period ends, uncomment the
# flag below.
# can_use_none_as_rhs = True
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
return super(lookups.Exact, self).process_rhs(compiler, connection)
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "oracle":
func = []
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
for value in rhs_params:
value = json.loads(value)
if isinstance(value, (list, dict)):
func.append(sql % "JSON_QUERY")
else:
func.append(sql % "JSON_VALUE")
rhs %= tuple(func)
elif connection.vendor == "sqlite":
func = []
for value in rhs_params:
if value in connection.ops.jsonfield_datatype_values:
func.append("%s")
else:
func.append("JSON_EXTRACT(%s, '$')")
rhs %= tuple(func)
return rhs, rhs_params
def as_oracle(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if rhs_params and (*rhs_params,) == ("null",):
# Field has key and it's NULL.
has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name)
has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection)
is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True)
is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection)
return (
"%s AND %s" % (has_key_sql, is_null_sql),
tuple(has_key_params) + tuple(is_null_params),
)
return super().as_sql(compiler, connection)
| KeyTransformExact |
python | tensorflow__tensorflow | tensorflow/python/ops/factory_ops_test.py | {
"start": 1907,
"end": 2929
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters(
(sparse_int64,),
(sparse_str,),
)
@test_util.run_gpu_only
def testSparseWithDistributedDataset(self, sparse_factory):
@def_function.function
def distributed_dataset_producer(t):
strategy = mirrored_strategy.MirroredStrategy(['GPU:0', 'GPU:1'])
sparse_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(sparse_ds)
ds = iter(dist_dataset)
result = strategy.experimental_local_results(next(ds))[0]
# Reach the end of the iterator
for ignore in ds: # pylint: disable=unused-variable
pass
return result
t = sparse_factory()
result = distributed_dataset_producer(t)
self.assertAllEqual(
self.evaluate(sparse_ops.sparse_tensor_to_dense(t)[0]),
self.evaluate(sparse_ops.sparse_tensor_to_dense(result)[0]))
if __name__ == '__main__':
test.main()
| FactoryOpsTest |
python | plotly__plotly.py | plotly/graph_objs/streamtube/_colorbar.py | {
"start": 233,
"end": 61532
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "streamtube"
_path_str = "streamtube.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.streamtube.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.streamtube.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.streamtube.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.streamtube.col
orbar.tickformatstopdefaults), sets the default property values
to use for elements of streamtube.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.streamtube.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.streamtube.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.streamtube.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.stream
tube.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
streamtube.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.streamtube.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.streamtube.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.stream
tube.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
streamtube.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.streamtube.colorbar.Title`
instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.streamtube.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | wandb__wandb | wandb/vendor/pygments/lexers/javascript.py | {
"start": 4582,
"end": 8960
} | class ____(RegexLexer):
"""
For `Kal`_ source code.
.. _Kal: http://rzimmerman.github.io/kal
.. versionadded:: 2.0
"""
name = 'Kal'
aliases = ['kal']
filenames = ['*.kal']
mimetypes = ['text/kal', 'application/kal']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'functiondef': [
(r'[$a-zA-Z_][\w$]*\s*', Name.Function, '#pop'),
include('commentsandwhitespace'),
],
'classdef': [
(r'\binherits\s+from\b', Keyword),
(r'[$a-zA-Z_][\w$]*\s*\n', Name.Class, '#pop'),
(r'[$a-zA-Z_][\w$]*\s*', Name.Class),
include('commentsandwhitespace'),
],
'listcomprehension': [
(r'\]', Punctuation, '#pop'),
(r'\b(property|value)\b', Keyword),
include('root'),
],
'waitfor': [
(r'\n', Punctuation, '#pop'),
(r'\bfrom\b', Keyword),
include('root'),
],
'root': [
include('commentsandwhitespace'),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex),
(r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
Operator),
(r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
r'doesnt\s+exist)\b', Operator.Word),
(r'(?:\([^()]+\))?\s*>', Name.Function),
(r'[{(]', Punctuation),
(r'\[', Punctuation, 'listcomprehension'),
(r'[})\].,]', Punctuation),
(r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
(r'\bclass\b', Keyword.Declaration, 'classdef'),
(r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'),
(r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
(r'(?<![.$])(for(\s+(parallel|series))?|in|of|while|until|'
r'break|return|continue|'
r'when|if|unless|else|otherwise|except\s+when|'
r'throw|raise|fail\s+with|try|catch|finally|new|delete|'
r'typeof|instanceof|super|run\s+in\s+parallel|'
r'inherits\s+from)\b', Keyword),
(r'(?<![.$])(true|false|yes|no|on|off|null|nothing|none|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|isSafeInteger|parseFloat|parseInt|document|'
r'window|'
r'print)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.$]*\s*(:|[+\-*/]?\=)?\b', Name.Variable),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all kal strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
| KalLexer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_format15.py | {
"start": 314,
"end": 1497
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("format15.xlsx")
def test_create_file_zero_number_format(self):
"""Test the creation of a simple XlsxWriter file 0 number format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"bold": 1})
format2 = workbook.add_format({"bold": 1, "num_format": 0})
worksheet.write("A1", 1, format1)
worksheet.write("A2", 2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_zero_number_format_string(self):
"""Test the creation of a simple XlsxWriter file 0 number format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"bold": 1})
format2 = workbook.add_format({"bold": 1, "num_format": "0"})
worksheet.write("A1", 1, format1)
worksheet.write("A2", 2, format2)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | wandb__wandb | wandb/filesync/dir_watcher.py | {
"start": 6011,
"end": 16335
} | class ____:
def __init__(
self,
settings: "SettingsStatic",
file_pusher: "FilePusher",
file_dir: Optional[PathStr] = None,
) -> None:
self._file_count = 0
self._dir = file_dir or settings.files_dir
self._settings = settings
self._savename_file_policies: MutableMapping[LogicalPath, PolicyName] = {}
self._user_file_policies: Mapping[PolicyName, MutableSet[GlobStr]] = {
"end": set(),
"live": set(),
"now": set(),
}
self._file_pusher = file_pusher
self._file_event_handlers: MutableMapping[LogicalPath, FileEventHandler] = {}
self._file_observer = wd_polling.PollingObserver()
self._file_observer.schedule(
self._per_file_event_handler(), self._dir, recursive=True
)
self._file_observer.start()
logger.info("watching files in: %s", settings.files_dir)
@property
def emitter(self) -> Optional["wd_api.EventEmitter"]:
try:
return next(iter(self._file_observer.emitters))
except StopIteration:
return None
def update_policy(self, path: GlobStr, policy: "PolicyName") -> None:
# When we're dealing with one of our own media files, there's no need
# to store the policy in memory. _get_file_event_handler will always
# return PolicyNow. Using the path makes syncing historic runs much
# faster if the name happens to include glob escapable characters. In
# the future we may add a flag to "files" records that indicates it's
# policy is not dynamic and doesn't need to be stored / checked.
save_name = LogicalPath(
os.path.relpath(os.path.join(self._dir, path), self._dir)
)
if save_name.startswith("media/"):
pass
elif path == glob.escape(path):
self._savename_file_policies[save_name] = policy
else:
self._user_file_policies[policy].add(path)
for src_path in glob.glob(os.path.join(self._dir, path)):
save_name = LogicalPath(os.path.relpath(src_path, self._dir))
feh = self._get_file_event_handler(src_path, save_name)
# handle the case where the policy changed
if feh.policy != policy:
try:
del self._file_event_handlers[save_name]
except KeyError:
# TODO: probably should do locking, but this handles moved files for now
pass
feh = self._get_file_event_handler(src_path, save_name)
feh.on_modified(force=True)
def _per_file_event_handler(self) -> "wd_events.FileSystemEventHandler":
"""Create a Watchdog file event handler that does different things for every file."""
file_event_handler = wd_events.PatternMatchingEventHandler()
file_event_handler.on_created = self._on_file_created
file_event_handler.on_modified = self._on_file_modified
file_event_handler.on_moved = self._on_file_moved
file_event_handler._patterns = [os.path.join(self._dir, os.path.normpath("*"))]
# Ignore hidden files/folders
# TODO: what other files should we skip?
file_event_handler._ignore_patterns = [
"*.tmp",
"*.wandb",
"wandb-summary.json",
os.path.join(self._dir, ".*"),
os.path.join(self._dir, "*/.*"),
]
for glb in self._settings.ignore_globs:
file_event_handler._ignore_patterns.append(os.path.join(self._dir, glb))
return file_event_handler
def _on_file_created(self, event: "wd_events.FileCreatedEvent") -> None:
logger.info("file/dir created: %s", event.src_path)
if os.path.isdir(event.src_path):
return None
self._file_count += 1
# We do the directory scan less often as it grows
if self._file_count % 100 == 0:
emitter = self.emitter
if emitter:
emitter._timeout = int(self._file_count / 100) + 1
save_name = LogicalPath(os.path.relpath(event.src_path, self._dir))
self._get_file_event_handler(event.src_path, save_name).on_modified()
# TODO(spencerpearson): this pattern repeats so many times we should have a method/function for it
# def _save_name(self, path: PathStr) -> LogicalPath:
# return LogicalPath(os.path.relpath(path, self._dir))
def _on_file_modified(self, event: "wd_events.FileModifiedEvent") -> None:
logger.info(f"file/dir modified: {event.src_path}")
if os.path.isdir(event.src_path):
return None
save_name = LogicalPath(os.path.relpath(event.src_path, self._dir))
self._get_file_event_handler(event.src_path, save_name).on_modified()
def _on_file_moved(self, event: "wd_events.FileMovedEvent") -> None:
# TODO: test me...
logger.info(f"file/dir moved: {event.src_path} -> {event.dest_path}")
if os.path.isdir(event.dest_path):
return None
old_save_name = LogicalPath(os.path.relpath(event.src_path, self._dir))
new_save_name = LogicalPath(os.path.relpath(event.dest_path, self._dir))
# We have to move the existing file handler to the new name
handler = self._get_file_event_handler(event.src_path, old_save_name)
self._file_event_handlers[new_save_name] = handler
del self._file_event_handlers[old_save_name]
handler.on_renamed(event.dest_path, new_save_name)
def _get_file_event_handler(
self, file_path: PathStr, save_name: LogicalPath
) -> FileEventHandler:
"""Get or create an event handler for a particular file.
file_path: the file's actual path
save_name: its path relative to the run directory (aka the watch directory)
"""
# Always return PolicyNow for any of our media files.
if save_name.startswith("media/"):
return PolicyNow(file_path, save_name, self._file_pusher, self._settings)
if save_name not in self._file_event_handlers:
# TODO: we can use PolicyIgnore if there are files we never want to sync
if "tfevents" in save_name or "graph.pbtxt" in save_name:
self._file_event_handlers[save_name] = PolicyLive(
file_path, save_name, self._file_pusher, self._settings
)
elif save_name in self._savename_file_policies:
policy_name = self._savename_file_policies[save_name]
make_handler = (
PolicyLive
if policy_name == "live"
else PolicyNow
if policy_name == "now"
else PolicyEnd
)
self._file_event_handlers[save_name] = make_handler(
file_path, save_name, self._file_pusher, self._settings
)
else:
make_handler = PolicyEnd
for policy, globs in self._user_file_policies.items():
if policy == "end":
continue
# Convert set to list to avoid RuntimeError's
# TODO: we may need to add locks
for g in list(globs):
paths = glob.glob(os.path.join(self._dir, g))
if any(save_name in p for p in paths):
if policy == "live":
make_handler = PolicyLive
elif policy == "now":
make_handler = PolicyNow
self._file_event_handlers[save_name] = make_handler(
file_path, save_name, self._file_pusher, self._settings
)
return self._file_event_handlers[save_name]
def finish(self) -> None:
logger.info("shutting down directory watcher")
try:
# avoid hanging if we crashed before the observer was started
if self._file_observer.is_alive():
# rather unfortunately we need to manually do a final scan of the dir
# with `queue_events`, then iterate through all events before stopping
# the observer to catch all files written. First we need to prevent the
# existing thread from consuming our final events, then we process them
self._file_observer._timeout = 0
self._file_observer._stopped_event.set()
self._file_observer.join()
self.emitter.queue_events(0) # type: ignore[union-attr]
while True:
try:
self._file_observer.dispatch_events(
self._file_observer.event_queue, 0
)
except queue.Empty:
break
# Calling stop unschedules any inflight events so we handled them above
self._file_observer.stop()
# TODO: py2 TypeError: PyCObject_AsVoidPtr called with null pointer
except TypeError:
pass
# TODO: py3 SystemError: <built-in function stop> returned an error
except SystemError:
pass
# Ensure we've at least noticed every file in the run directory. Sometimes
# we miss things because asynchronously watching filesystems isn't reliable.
logger.info("scan: %s", self._dir)
for dirpath, _, filenames in os.walk(self._dir):
for fname in filenames:
file_path = os.path.join(dirpath, fname)
save_name = LogicalPath(os.path.relpath(file_path, self._dir))
ignored = False
for glb in self._settings.ignore_globs:
if len(fnmatch.filter([save_name], glb)) > 0:
ignored = True
logger.info("ignored: %s matching glob %s", save_name, glb)
break
if ignored:
continue
logger.info("scan save: %s %s", file_path, save_name)
self._get_file_event_handler(file_path, save_name).finish()
| DirWatcher |
python | google__pytype | pytype/state_test.py | {
"start": 2750,
"end": 3413
} | class ____(ConditionTestBase):
def new_binding(self, value=AMBIGUOUS):
var = self._program.NewVariable()
return var.AddBinding(value)
def test_no_parent(self):
x = self.new_binding()
y = self.new_binding()
z = self.new_binding()
c = frame_state.Condition(self._node, [[x, y], [z]])
self.check_binding("x=? y=? | z=?", c.binding, x=x, y=y, z=z)
def test_parent_combination(self):
p = self.new_binding()
x = self.new_binding()
y = self.new_binding()
z = self.new_binding()
c = frame_state.Condition(self._node, [[x, y], [z]])
self.check_binding("x=? y=? | z=?", c.binding, p=p, x=x, y=y, z=z)
| ConditionTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.