language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kennethreitz__tablib | tests/test_tablib.py | {
"start": 35645,
"end": 37249
} | class ____(BaseTestCase):
def test_yaml_format_detect(self):
"""Test YAML format detection."""
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_bunk = (
'¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†'
'ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
fmt = registry.get_format('yaml')
self.assertTrue(fmt.detect(_yaml))
self.assertFalse(fmt.detect(_bunk))
self.assertFalse(fmt.detect(_tsv))
def test_yaml_import_book(self):
"""Generate and import YAML book serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
book.add_sheet(data)
_yaml = book.yaml
book.yaml = _yaml
self.assertEqual(_yaml, book.yaml)
# Same with the load interface
book2 = tablib.Databook().load(_yaml, None)
self.assertEqual(_yaml, book2.yaml)
def test_yaml_import_set(self):
"""Generate and import YAML set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_yaml = data.yaml
data.yaml = _yaml
self.assertEqual(_yaml, data.yaml)
def test_yaml_export(self):
"""YAML export"""
expected = """\
- {first_name: John, gpa: 90, last_name: Adams}
- {first_name: George, gpa: 67, last_name: Washington}
- {first_name: Thomas, gpa: 50, last_name: Jefferson}
"""
output = self.founders.yaml
self.assertEqual(output, expected)
| YAMLTests |
python | walkccc__LeetCode | solutions/3243. Shortest Distance After Road Addition Queries I/3243.py | {
"start": 0,
"end": 924
} | class ____:
def shortestDistanceAfterQueries(
self,
n: int,
queries: list[list[int]],
) -> list[int]:
ans = []
dist = list(range(n))
graph = [[] for _ in range(n)]
for i in range(n - 1):
graph[i].append(i + 1)
for u, v in queries:
graph[u].append(v)
if dist[u] + 1 < dist[v]:
dist[v] = dist[u] + 1
self._bfs(graph, v, dist)
ans.append(dist[n - 1])
return ans
def _bfs(self, graph: list[list[int]], start: int, dist: list[int]) -> None:
"""
Performs a BFS to update the shortest distances from the given `start` node
to all other reachable nodes in the graph. It updates the `dist` vector
with the new shortest distances.
"""
q = collections.deque([start])
while q:
u = q.popleft()
for v in graph[u]:
if dist[u] + 1 < dist[v]:
dist[v] = dist[u] + 1
q.append(v)
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area07.py | {
"start": 315,
"end": 1196
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area07.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:XFD1048576")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_views.py | {
"start": 331,
"end": 456
} | class ____(ImportExportMixinBase, ModelAdmin):
change_list_template = "admin/import_export/change_list.html"
| MockModelAdmin |
python | facelessuser__soupsieve | soupsieve/css_types.py | {
"start": 7368,
"end": 7666
} | class ____(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "own", "_hash")
text: tuple[str, ...]
own: bool
def __init__(self, text: Iterable[str], own: bool) -> None:
"""Initialize."""
super().__init__(text=tuple(text), own=own)
| SelectorContains |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | {
"start": 44823,
"end": 47697
} | class ____(RNNCell):
"""Base class for cells wrappers V1 compatibility.
This class along with `_RNNCellWrapperV2` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapperV1, self).__init__(*args, **kwargs)
assert_like_rnncell("cell", cell)
self.cell = cell
if isinstance(cell, trackable.Trackable):
self._track_trackable(self.cell, name="cell")
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
raise NotImplementedError
def __call__(self, inputs, state, scope=None):
"""Runs the RNN cell step computation.
We assume that the wrapped RNNCell is being built within its `__call__`
method. We directly use the wrapped cell's `__call__` in the overridden
wrapper `__call__` method.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `__call__`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
scope: VariableScope for the subgraph created in the wrapped cells'
`__call__`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope)
def get_config(self):
config = {
"cell": {
"class_name": self.cell.__class__.__name__,
"config": self.cell.get_config()
},
}
base_config = super(_RNNCellWrapperV1, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
cell = config.pop("cell")
try:
assert_like_rnncell("cell", cell)
return cls(cell, **config)
except TypeError:
raise ValueError("RNNCellWrapper cannot reconstruct the wrapped cell. "
"Please overwrite the cell in the config with a RNNCell "
"instance.")
@tf_export(v1=["nn.rnn_cell.DropoutWrapper"])
| _RNNCellWrapperV1 |
python | pytorch__pytorch | benchmarks/operator_benchmark/benchmark_pytorch.py | {
"start": 530,
"end": 5822
} | class ____(torch.nn.Module):
"""This is a base class used to create Pytorch operator benchmark.
module_name is the name of the operator being benchmarked.
test_name is the name (it's created by concatenating all the
inputs) of a specific test
"""
def __init__(self):
super().__init__()
self.user_given_name = None
self._pass_count = 0
self._num_inputs_require_grads = 0
def _set_backward_test(self, is_backward):
self._is_backward = is_backward
def auto_set(self):
"""This is used to automatically set the require_grad for the backward patch.
It is implemented based on two counters. One counter to save the number of
times init has been called. The other counter to save the number of times
this function itself has been called. In the very first time init is called,
this function counts how many inputs require gradient. In each of the
following init calls, this function will return only one true value.
Here is an example:
...
self.v1 = torch.rand(M, N, K, requires_grad=self.auto_set())
self.v2 = torch.rand(M, N, K, requires_grad=self.auto_set())
...
"""
if not self._is_backward:
return False
if self._pass_count == 0:
self._num_inputs_require_grads += 1
return True
else:
self._auto_set_counter += 1
return self._pass_count == self._auto_set_counter
def extract_inputs_tuple(self):
self.inputs_tuple = tuple(self.inputs.values())
@torch.jit.export
def get_inputs(self):
# Need to convert the inputs to tuple outside of JIT so that
# JIT can infer the size of the inputs.
return self.inputs_tuple
@torch.jit.export
def forward_impl(self):
# This is to supply the inputs to the forward function which
# will be called in both the eager and JIT mode of local runs
return self.forward(*self.get_inputs())
@torch.jit.export
def forward_consume(self, iters: int):
# _consume is used to avoid the dead-code-elimination optimization
for _ in range(iters):
torch.ops.operator_benchmark._consume(self.forward_impl())
def forward_impl_eager(self):
# This is to supply the inputs to the forward function which
# will be called in both the eager and compile mode of local runs
return self.forward(*self.get_inputs())
def forward_consume_eager(self, iters: int):
# Eager version of forward_consume without decorators (compilation handled by torch.compile)
for _ in range(iters):
torch.ops.operator_benchmark._consume(self.forward_impl_eager())
def module_name(self):
"""this is used to label the operator being benchmarked"""
if self.user_given_name:
return self.user_given_name
return self.__class__.__name__
def set_module_name(self, name):
self.user_given_name = name
def test_name(self, **kargs):
"""this is a globally unique name which can be used to
label a specific test
"""
# This is a list of attributes which will not be included
# in the test name.
skip_key_list = ["device"]
test_name_str = []
for key in kargs:
value = kargs[key]
test_name_str.append(
("" if key in skip_key_list else key)
+ str(value if type(value) is not bool else int(value))
)
name = (self.module_name() + "_" + "_".join(test_name_str)).replace(" ", "")
return name
def get_memory_traffic_bytes(self):
"""Return the number of bytes read/written by this operator.
Override this method in subclasses for operations with non-standard memory patterns
(e.g., matmul which is compute-bound rather than memory-bound).
The framework will use this value along with execution time to compute
and report memory bandwidth in GB/s.
Default implementation assumes a pointwise-like operation:
- Reads: all input tensors
- Writes: output tensor (estimated as size of largest input)
This default works correctly for:
- Element-wise operations (add, mul, relu, etc.)
- Activations (gelu, sigmoid, etc.)
- Optimizers (SGD, Adam, etc.)
- Reductions (sum, mean, etc. - may underestimate writes)
Returns:
int or None: Total bytes transferred (reads + writes), or None if not applicable
"""
if not hasattr(self, "inputs") or not self.inputs:
return None
input_tensors = [v for v in self.inputs.values() if isinstance(v, torch.Tensor)]
if not input_tensors:
return None
# Calculate total bytes read from all inputs
bytes_read = sum(t.numel() * t.element_size() for t in input_tensors)
# Estimate output size as the largest input (common for pointwise ops)
largest_input = max(input_tensors, key=lambda t: t.numel())
bytes_written = largest_input.numel() * largest_input.element_size()
return bytes_read + bytes_written
| TorchBenchmarkBase |
python | openai__openai-python | src/openai/resources/evals/runs/runs.py | {
"start": 21377,
"end": 22119
} | class ____:
def __init__(self, runs: Runs) -> None:
self._runs = runs
self.create = _legacy_response.to_raw_response_wrapper(
runs.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
runs.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
runs.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
runs.delete,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
runs.cancel,
)
@cached_property
def output_items(self) -> OutputItemsWithRawResponse:
return OutputItemsWithRawResponse(self._runs.output_items)
| RunsWithRawResponse |
python | django__django | tests/check_framework/urls/cbv_as_view.py | {
"start": 131,
"end": 389
} | class ____:
def __call__(self, request, *args, **kwargs):
return HttpResponse()
urlpatterns = [
path("missing_as_view", EmptyCBV),
path("has_as_view", EmptyCBV.as_view()),
path("callable_class", EmptyCallableView()),
]
| EmptyCallableView |
python | jina-ai__jina | jina/checker.py | {
"start": 112,
"end": 2794
} | class ____:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.servers import BaseServer
try:
total_time = 0
total_success = 0
timeout = args.timeout / 1000 if args.timeout != -1 else None
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=timeout)
else:
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = BaseServer.is_ready(
ctrl_address=f'{hostname}:{port}',
timeout=timeout,
protocol=protocol,
)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
| NetworkChecker |
python | encode__starlette | starlette/middleware/wsgi.py | {
"start": 2705,
"end": 5350
} | class ____:
stream_send: ObjectSendStream[MutableMapping[str, Any]]
stream_receive: ObjectReceiveStream[MutableMapping[str, Any]]
def __init__(self, app: Callable[..., Any], scope: Scope) -> None:
self.app = app
self.scope = scope
self.status = None
self.response_headers = None
self.stream_send, self.stream_receive = anyio.create_memory_object_stream(math.inf)
self.response_started = False
self.exc_info: Any = None
async def __call__(self, receive: Receive, send: Send) -> None:
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
environ = build_environ(self.scope, body)
async with anyio.create_task_group() as task_group:
task_group.start_soon(self.sender, send)
async with self.stream_send:
await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response)
if self.exc_info is not None:
raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2])
async def sender(self, send: Send) -> None:
async with self.stream_receive:
async for message in self.stream_receive:
await send(message)
def start_response(
self,
status: str,
response_headers: list[tuple[str, str]],
exc_info: Any = None,
) -> None:
self.exc_info = exc_info
if not self.response_started: # pragma: no branch
self.response_started = True
status_code_string, _ = status.split(" ", 1)
status_code = int(status_code_string)
headers = [
(name.strip().encode("ascii").lower(), value.strip().encode("ascii"))
for name, value in response_headers
]
anyio.from_thread.run(
self.stream_send.send,
{
"type": "http.response.start",
"status": status_code,
"headers": headers,
},
)
def wsgi(
self,
environ: dict[str, Any],
start_response: Callable[..., Any],
) -> None:
for chunk in self.app(environ, start_response):
anyio.from_thread.run(
self.stream_send.send,
{"type": "http.response.body", "body": chunk, "more_body": True},
)
anyio.from_thread.run(self.stream_send.send, {"type": "http.response.body", "body": b""})
| WSGIResponder |
python | fsspec__filesystem_spec | fsspec/implementations/arrow.py | {
"start": 6549,
"end": 7014
} | class ____(io.IOBase):
def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
self.path = path
self.mode = mode
self.fs = fs
self.stream = stream
self.blocksize = self.block_size = block_size
self.kwargs = kwargs
def __enter__(self):
return self
@property
def size(self):
return self.stream.size()
def __exit__(self, *args):
return self.close()
| ArrowFile |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 12814,
"end": 13286
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = FNetPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
| FNetLMPredictionHead |
python | hynek__structlog | src/structlog/twisted.py | {
"start": 5935,
"end": 6558
} | class ____:
"""
Write only the plain message without timestamps or anything else.
Great to just print JSON to stdout where you catch it with something like
runit.
Args:
file: File to print to.
.. versionadded:: 0.2.0
"""
def __init__(self, file: TextIO) -> None:
self._write = file.write
self._flush = file.flush
def __call__(self, eventDict: EventDict) -> None:
self._write(
textFromEventDict(eventDict) # type: ignore[arg-type, operator]
+ "\n",
)
self._flush()
@implementer(ILogObserver)
| PlainFileLogObserver |
python | python__mypy | mypyc/ir/ops.py | {
"start": 49864,
"end": 50605
} | class ____(RegisterOp):
"""Get the address of a struct element.
Note that you may need to use KeepAlive to avoid the struct
being freed, if it's reference counted, such as PyObject *.
"""
error_kind = ERR_NEVER
def __init__(self, src: Value, src_type: RType, field: str, line: int = -1) -> None:
super().__init__(line)
self.type = pointer_rprimitive
self.src = src
self.src_type = src_type
self.field = field
def sources(self) -> list[Value]:
return [self.src]
def set_sources(self, new: list[Value]) -> None:
(self.src,) = new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_get_element_ptr(self)
@final
| GetElementPtr |
python | sympy__sympy | sympy/physics/optics/medium.py | {
"start": 4484,
"end": 7124
} | class ____(Medium):
"""
Represents an optical medium for which the permittivity and permeability are known.
This class should never be instantiated directly. Instead it should be
instantiated indirectly by instantiating Medium with any two of
permittivity, permeability, and n specified, or by not specifying any
of permittivity, permeability, or n, in which case default values for
permittivity and permeability will be used.
Examples
========
>>> from sympy.physics.optics import Medium
>>> from sympy.abc import epsilon, mu
>>> m1 = Medium('m1', permittivity=epsilon, permeability=mu)
>>> m1
MediumPP(Str('m1'), epsilon, mu)
>>> m2 = Medium('m2')
>>> m2
MediumPP(Str('m2'), 625000*ampere**2*second**4/(22468879468420441*pi*kilogram*meter**3), pi*kilogram*meter/(2500000*ampere**2*second**2))
"""
def __new__(cls, name, permittivity, permeability):
obj = super(Medium, cls).__new__(cls, name, permittivity, permeability)
return obj
@property
def intrinsic_impedance(self):
"""
Returns intrinsic impedance of the medium.
Explanation
===========
The intrinsic impedance of a medium is the ratio of the
transverse components of the electric and magnetic fields
of the electromagnetic wave travelling in the medium.
In a region with no electrical conductivity it simplifies
to the square root of ratio of magnetic permeability to
electric permittivity.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.intrinsic_impedance
149896229*pi*kilogram*meter**2/(1250000*ampere**2*second**3)
"""
return sqrt(self.permeability / self.permittivity)
@property
def permittivity(self):
"""
Returns electric permittivity of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permittivity
625000*ampere**2*second**4/(22468879468420441*pi*kilogram*meter**3)
"""
return self.args[1]
@property
def permeability(self):
"""
Returns magnetic permeability of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permeability
pi*kilogram*meter/(2500000*ampere**2*second**2)
"""
return self.args[2]
@property
def n(self):
return c*sqrt(self.permittivity*self.permeability)
| MediumPP |
python | scipy__scipy | scipy/interpolate/tests/test_fitpack.py | {
"start": 10551,
"end": 11762
} | class ____:
def test_len_c(self):
n, k = 7, 3
x = np.arange(n)
y = x**3
t, c, k = splrep(x, y, s=0)
# note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
assert len(t) == len(c) == n + 2*(k-1)
# integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
res = splint(0, 6, (t, c, k))
expected = 6**4 / 4
assert abs(res - expected) < 1e-13
# check that the coefficients past len(t) - k - 1 are ignored
c0 = c.copy()
c0[len(t) - k - 1:] = np.nan
res0 = splint(0, 6, (t, c0, k))
assert abs(res0 - expected) < 1e-13
# however, all other coefficients *are* used
c0[6] = np.nan
assert np.isnan(splint(0, 6, (t, c0, k)))
# check that the coefficient array can have length `len(t) - k - 1`
c1 = c[:len(t) - k - 1]
res1 = splint(0, 6, (t, c1, k))
assert (res1 - expected) < 1e-13
# however shorter c arrays raise. The error from f2py is a
# `dftipack.error`, which is an Exception but not ValueError etc.
with assert_raises(Exception, match=r">=n-k-1"):
splint(0, 1, (np.ones(10), np.ones(5), 3))
| TestSplint |
python | scipy__scipy | scipy/stats/tests/test_survival.py | {
"start": 1540,
"end": 18764
} | class ____:
@staticmethod
def get_random_sample(rng, n_unique):
# generate random sample
unique_times = rng.random(n_unique)
# convert to `np.int32` to resolve `np.repeat` failure in 32-bit CI
repeats = rng.integers(1, 4, n_unique).astype(np.int32)
times = rng.permuted(np.repeat(unique_times, repeats))
censored = rng.random(size=times.size) > rng.random()
sample = stats.CensoredData.right_censored(times, censored)
return sample, times, censored
def test_input_validation(self):
message = '`sample` must be a one-dimensional sequence.'
with pytest.raises(ValueError, match=message):
stats.ecdf([[1]])
with pytest.raises(ValueError, match=message):
stats.ecdf(1)
message = '`sample` must not contain nan'
with pytest.raises(ValueError, match=message):
stats.ecdf([np.nan])
message = 'Currently, only uncensored and right-censored data...'
with pytest.raises(NotImplementedError, match=message):
stats.ecdf(stats.CensoredData.left_censored([1], censored=[True]))
message = 'method` must be one of...'
res = stats.ecdf([1, 2, 3])
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(method='ekki-ekki')
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval(method='shrubbery')
message = 'confidence_level` must be a scalar between 0 and 1'
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(-1)
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval([0.5, 0.6])
message = 'The confidence interval is undefined at some observations.'
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
message = 'Confidence interval bounds do not implement...'
with pytest.raises(NotImplementedError, match=message):
ci.low.confidence_interval()
with pytest.raises(NotImplementedError, match=message):
ci.high.confidence_interval()
def test_edge_cases(self):
res = stats.ecdf([])
assert_equal(res.cdf.quantiles, [])
assert_equal(res.cdf.probabilities, [])
res = stats.ecdf([1])
assert_equal(res.cdf.quantiles, [1])
assert_equal(res.cdf.probabilities, [1])
def test_unique(self):
# Example with unique observations; `stats.ecdf` ref. [1] page 80
sample = [6.23, 5.58, 7.06, 6.42, 5.20]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.arange(1, 6) / 5
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_nonunique(self):
# Example with non-unique observations; `stats.ecdf` ref. [1] page 82
sample = [0, 2, 1, 2, 3, 4]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.array([1/6, 2/6, 4/6, 5/6, 1])
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_evaluate_methods(self):
# Test CDF and SF `evaluate` methods
rng = np.random.default_rng(1162729143302572461)
sample, _, _ = self.get_random_sample(rng, 15)
res = stats.ecdf(sample)
x = res.cdf.quantiles
xr = x + np.diff(x, append=x[-1]+1)/2 # right shifted points
assert_equal(res.cdf.evaluate(x), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(xr), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(x[0]-1), 0) # CDF starts at 0
assert_equal(res.cdf.evaluate([-np.inf, np.inf]), [0, 1])
assert_equal(res.sf.evaluate(x), res.sf.probabilities)
assert_equal(res.sf.evaluate(xr), res.sf.probabilities)
assert_equal(res.sf.evaluate(x[0]-1), 1) # SF starts at 1
assert_equal(res.sf.evaluate([-np.inf, np.inf]), [1, 0])
# ref. [1] page 91
t1 = [37, 43, 47, 56, 60, 62, 71, 77, 80, 81] # times
d1 = [0, 0, 1, 1, 0, 0, 0, 1, 1, 1] # 1 means deaths (not censored)
r1 = [1, 1, 0.875, 0.75, 0.75, 0.75, 0.75, 0.5, 0.25, 0] # reference SF
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
t2 = [8, 12, 26, 14, 21, 27, 8, 32, 20, 40]
d2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
r2 = [0.9, 0.788, 0.675, 0.675, 0.54, 0.405, 0.27, 0.27, 0.27]
t3 = [33, 28, 41, 48, 48, 25, 37, 48, 25, 43]
d3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
r3 = [1, 0.875, 0.75, 0.75, 0.6, 0.6, 0.6]
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/bs704_survival4.html
t4 = [24, 3, 11, 19, 24, 13, 14, 2, 18, 17,
24, 21, 12, 1, 10, 23, 6, 5, 9, 17]
d4 = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1]
r4 = [0.95, 0.95, 0.897, 0.844, 0.844, 0.844, 0.844, 0.844, 0.844,
0.844, 0.76, 0.676, 0.676, 0.676, 0.676, 0.507, 0.507]
# https://www.real-statistics.com/survival-analysis/kaplan-meier-procedure/confidence-interval-for-the-survival-function/
t5 = [3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11]
d5 = [1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1]
r5 = [0.944, 0.889, 0.722, 0.542, 0.542, 0.542, 0.361, 0.181, 0.181, 0.181]
@pytest.mark.parametrize("case", [(t1, d1, r1), (t2, d2, r2), (t3, d3, r3),
(t4, d4, r4), (t5, d5, r5)])
def test_right_censored_against_examples(self, case):
# test `ecdf` against other implementations on example problems
times, died, ref = case
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
assert_allclose(res.sf.probabilities, ref, atol=1e-3)
assert_equal(res.sf.quantiles, np.sort(np.unique(times)))
# test reference implementation against other implementations
res = _kaplan_meier_reference(times, np.logical_not(died))
assert_equal(res[0], np.sort(np.unique(times)))
assert_allclose(res[1], ref, atol=1e-3)
@pytest.mark.parametrize('seed', [182746786639392128, 737379171436494115,
576033618403180168, 308115465002673650])
def test_right_censored_against_reference_implementation(self, seed):
# test `ecdf` against reference implementation on random problems
rng = np.random.default_rng(seed)
n_unique = rng.integers(10, 100)
sample, times, censored = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
ref = _kaplan_meier_reference(times, censored)
assert_allclose(res.sf.quantiles, ref[0])
assert_allclose(res.sf.probabilities, ref[1])
# If all observations are uncensored, the KM estimate should match
# the usual estimate for uncensored data
sample = stats.CensoredData(uncensored=times)
res = _survival._ecdf_right_censored(sample) # force Kaplan-Meier
ref = stats.ecdf(times)
assert_equal(res[0], ref.sf.quantiles)
assert_allclose(res[1], ref.cdf.probabilities, rtol=1e-14)
assert_allclose(res[2], ref.sf.probabilities, rtol=1e-14)
def test_right_censored_ci(self):
# test "greenwood" confidence interval against example 4 (URL above).
times, died = self.t4, self.d4
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
ref_allowance = [0.096, 0.096, 0.135, 0.162, 0.162, 0.162, 0.162,
0.162, 0.162, 0.162, 0.214, 0.246, 0.246, 0.246,
0.246, 0.341, 0.341]
sf_ci = res.sf.confidence_interval()
cdf_ci = res.cdf.confidence_interval()
allowance = res.sf.probabilities - sf_ci.low.probabilities
assert_allclose(allowance, ref_allowance, atol=1e-3)
assert_allclose(sf_ci.low.probabilities,
np.clip(res.sf.probabilities - allowance, 0, 1))
assert_allclose(sf_ci.high.probabilities,
np.clip(res.sf.probabilities + allowance, 0, 1))
assert_allclose(cdf_ci.low.probabilities,
np.clip(res.cdf.probabilities - allowance, 0, 1))
assert_allclose(cdf_ci.high.probabilities,
np.clip(res.cdf.probabilities + allowance, 0, 1))
# test "log-log" confidence interval against Mathematica
# e = {24, 3, 11, 19, 24, 13, 14, 2, 18, 17, 24, 21, 12, 1, 10, 23, 6, 5,
# 9, 17}
# ci = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0}
# R = EventData[e, ci]
# S = SurvivalModelFit[R]
# S["PointwiseIntervals", ConfidenceLevel->0.95,
# ConfidenceTransform->"LogLog"]
ref_low = [0.694743, 0.694743, 0.647529, 0.591142, 0.591142, 0.591142,
0.591142, 0.591142, 0.591142, 0.591142, 0.464605, 0.370359,
0.370359, 0.370359, 0.370359, 0.160489, 0.160489]
ref_high = [0.992802, 0.992802, 0.973299, 0.947073, 0.947073, 0.947073,
0.947073, 0.947073, 0.947073, 0.947073, 0.906422, 0.856521,
0.856521, 0.856521, 0.856521, 0.776724, 0.776724]
sf_ci = res.sf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, ref_low, atol=1e-6)
assert_allclose(sf_ci.high.probabilities, ref_high, atol=1e-6)
def test_right_censored_ci_example_5(self):
# test "exponential greenwood" confidence interval against example 5
times, died = self.t5, self.d5
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
lower = np.array([0.66639, 0.624174, 0.456179, 0.287822, 0.287822,
0.287822, 0.128489, 0.030957, 0.030957, 0.030957])
upper = np.array([0.991983, 0.970995, 0.87378, 0.739467, 0.739467,
0.739467, 0.603133, 0.430365, 0.430365, 0.430365])
sf_ci = res.sf.confidence_interval(method='log-log')
cdf_ci = res.cdf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, lower, atol=1e-5)
assert_allclose(sf_ci.high.probabilities, upper, atol=1e-5)
assert_allclose(cdf_ci.low.probabilities, 1-upper, atol=1e-5)
assert_allclose(cdf_ci.high.probabilities, 1-lower, atol=1e-5)
# Test against R's `survival` library `survfit` function, 90%CI
# library(survival)
# options(digits=16)
# time = c(3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11)
# status = c(1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "log-log", conf.int = 0.90)
# res$time; res$lower; res$upper
low = [0.74366748406861172, 0.68582332289196246, 0.50596835651480121,
0.32913131413336727, 0.32913131413336727, 0.32913131413336727,
0.15986912028781664, 0.04499539918147757, 0.04499539918147757,
0.04499539918147757]
high = [0.9890291867238429, 0.9638835422144144, 0.8560366823086629,
0.7130167643978450, 0.7130167643978450, 0.7130167643978450,
0.5678602982997164, 0.3887616766886558, 0.3887616766886558,
0.3887616766886558]
sf_ci = res.sf.confidence_interval(method='log-log',
confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
# And with conf.type = "plain"
low = [0.8556383113628162, 0.7670478794850761, 0.5485720663578469,
0.3441515412527123, 0.3441515412527123, 0.3441515412527123,
0.1449184105424544, 0., 0., 0.]
high = [1., 1., 0.8958723780865975, 0.7391817920806210,
0.7391817920806210, 0.7391817920806210, 0.5773038116797676,
0.3642270254596720, 0.3642270254596720, 0.3642270254596720]
sf_ci = res.sf.confidence_interval(confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
def test_right_censored_ci_nans(self):
# test `ecdf` confidence interval on a problem that results in NaNs
times, died = self.t1, self.d1
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
# Reference values generated with Matlab
# format long
# t = [37 43 47 56 60 62 71 77 80 81];
# d = [0 0 1 1 0 0 0 1 1 1];
# censored = ~d1;
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Alpha', 0.05);
x = [37, 47, 56, 77, 80, 81]
flo = [np.nan, 0, 0, 0.052701464070711, 0.337611126231790, np.nan]
fup = [np.nan, 0.35417230377, 0.5500569798, 0.9472985359, 1.0, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
message = "The confidence interval is undefined at some observations"
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
# Matlab gives NaN as the first element of the CIs. Mathematica agrees,
# but R's survfit does not. It makes some sense, but it's not what the
# formula gives, so skip that element.
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Function',
# 'survivor', 'Alpha', 0.05);
flo = [np.nan, 0.64582769623, 0.449943020228, 0.05270146407, 0, np.nan]
fup = [np.nan, 1.0, 1.0, 0.947298535929289, 0.662388873768210, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval()
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# With the same data, R's `survival` library `survfit` function
# doesn't produce the leading NaN
# library(survival)
# options(digits=16)
# time = c(37, 43, 47, 56, 60, 62, 71, 77, 80, 81)
# status = c(0, 0, 1, 1, 0, 0, 0, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "plain", conf.int = 0.95)
# res$time
# res$lower
# res$upper
low = [1., 1., 0.64582769623233816, 0.44994302022779326,
0.44994302022779326, 0.44994302022779326, 0.44994302022779326,
0.05270146407071086, 0., np.nan]
high = [1., 1., 1., 1., 1., 1., 1., 0.9472985359292891,
0.6623888737682101, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
# It does with conf.type="log-log", as do we
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval(method='log-log')
low = [np.nan, np.nan, 0.38700001403202522, 0.31480711370551911,
0.31480711370551911, 0.31480711370551911, 0.31480711370551911,
0.08048821148507734, 0.01049958986680601, np.nan]
high = [np.nan, np.nan, 0.9813929658789660, 0.9308983170906275,
0.9308983170906275, 0.9308983170906275, 0.9308983170906275,
0.8263946341076415, 0.6558775085110887, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
def test_right_censored_against_uncensored(self):
rng = np.random.default_rng(7463952748044886637)
sample = rng.integers(10, 100, size=1000)
censored = np.zeros_like(sample)
censored[np.argmax(sample)] = True
res = stats.ecdf(sample)
ref = stats.ecdf(stats.CensoredData.right_censored(sample, censored))
assert_equal(res.sf.quantiles, ref.sf.quantiles)
assert_equal(res.sf._n, ref.sf._n)
assert_equal(res.sf._d[:-1], ref.sf._d[:-1]) # difference @ [-1]
assert_allclose(res.sf._sf[:-1], ref.sf._sf[:-1], rtol=1e-14)
def test_plot_iv(self):
rng = np.random.default_rng(1769658657308472721)
n_unique = rng.integers(10, 100)
sample, _, _ = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
try:
import matplotlib.pyplot as plt # noqa: F401
res.sf.plot() # no other errors occur
except (ModuleNotFoundError, ImportError):
message = r"matplotlib must be installed to use method `plot`."
with pytest.raises(ModuleNotFoundError, match=message):
res.sf.plot()
| TestSurvival |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 49901,
"end": 64569
} | class ____(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
vision_config = config.vision_config
text_config = config.text_config
if config.share_cross_modal_transformer_layers:
self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size)
self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size)
else:
self.cross_modal_text_transform = nn.ModuleList(
[nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
)
self.cross_modal_image_transform = nn.ModuleList(
[nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
)
self.token_type_embeddings = nn.Embedding(2, config.hidden_size)
self.vision_model = BridgeTowerVisionModel(vision_config)
self.text_model = BridgeTowerTextModel(text_config)
if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder:
for ln in self.vision_model.visual.cross_modal_ln_separate:
ln.weight.data = self.vision_model.visual.ln_post.weight.data
ln.bias.data = self.vision_model.visual.ln_post.bias.data
self.cross_modal_image_layers = nn.ModuleList(
[BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]
)
self.cross_modal_text_layers = nn.ModuleList(
[BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]
)
# Class token => Linear => Tanh
self.cross_modal_image_pooler = BridgeTowerPooler(config)
self.cross_modal_text_pooler = BridgeTowerPooler(config)
# Initialize BridgeTower Components
self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.share_link_tower_layers:
self.cross_modal_text_link_tower = BridgeTowerLinkTower(config)
self.cross_modal_image_link_tower = BridgeTowerLinkTower(config)
else:
self.cross_modal_text_link_tower = nn.ModuleList(
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
)
self.cross_modal_image_link_tower = nn.ModuleList(
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
)
self.post_init()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
image_token_type_idx: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
interpolate_pos_encoding: bool = False,
) -> Union[tuple[torch.Tensor], BridgeTowerModelOutput]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
image_token_type_idx (`int`, *optional*):
- The token type ids for images.
output_hidden_states (`bool`, *optional*):
If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and
cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image,
hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding
modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and
`hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and
`cross_modal_image_hidden_states` of each brdige layer.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels are currently not supported.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerModel
>>> from PIL import Image
>>> import requests
>>> # prepare image and text
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "hello world"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base")
>>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base")
>>> inputs = processor(image, text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs.keys()
odict_keys(['text_features', 'image_features', 'pooler_output'])
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
all_hidden_states_text = () if output_hidden_states else None
all_hidden_states_image = () if output_hidden_states else None
all_hidden_states_cross = () if output_hidden_states else None
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if inputs_embeds is not None and input_ids is None:
raise NotImplementedError(
"BridgeTowerModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead."
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
image_token_type_idx = image_token_type_idx if image_token_type_idx else 1
input_shape = input_ids.size()
text_embeds = self.text_model.embeddings(input_ids=input_ids)
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if attention_mask is None:
attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device)
extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to(
input_ids.device
)
# The split_index determines how many layers of the uni-modal encoder are applied before the cross-modal encoder
split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1
# Run the first 'split_index' layers of the textual encoder
for layer in self.text_model.encoder.layer[:split_index]:
text_embeds = layer(text_embeds, extend_text_masks)[0]
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if image_embeds is None:
image_embeds = self.vision_model.visual.forward_pre(
pixel_values.type(self.vision_model.dtype), interpolate_pos_encoding=interpolate_pos_encoding
)
else:
# Permute as BridgeTowerResidualAttention has batch_first=True
image_embeds = image_embeds.permute(1, 0, 2)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
# Run the first 'split_index' layers of the visual encoder
for block in self.vision_model.visual.transformer.resblocks[:split_index]:
image_embeds = block(image_embeds)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype))
# first layer is a special case because we don't have the output from the cross-encoder yet
cross_modal_text = self.cross_modal_text_transform(text_embeds)
text_token_type_embeddings = self.token_type_embeddings(
torch.zeros(1, dtype=torch.long, device=input_ids.device)
).expand_as(cross_modal_text)
cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings)
image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln)
image_token_type_embeddings = self.token_type_embeddings(
torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device)
).expand_as(image_embeds_with_ln)
image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings
cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln)
pixel_mask = torch.ones(
(cross_modal_image.size(0), cross_modal_image.size(1)),
dtype=torch.long,
device=input_ids.device,
)
extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to(
input_ids.device
)
layer_outputs_text = self.cross_modal_text_layers[0](
cross_modal_text,
cross_modal_image,
attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks,
output_attentions=output_attentions,
)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[0](
cross_modal_image,
cross_modal_text,
attention_mask=extend_image_masks,
encoder_attention_mask=extend_text_masks,
output_attentions=output_attentions,
)
cross_image_features = layer_outputs_image[0]
if output_hidden_states:
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
link_layer_index = 0
# Each of the top 6 layers of the visual and textual encoders ([split_index:]) is connected to each layer of
# the cross-modal encoder via bridge layers, which brings bottom-up alignment and fusion to the cross-modal encoder.
for i in range(split_index, len(self.text_model.encoder.layer)):
text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0]
image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type(
self.vision_model.dtype
)
image_embeds_with_ln = (
self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds))
+ image_token_type_embeddings
)
text_link_tower = self.cross_modal_text_link_tower[link_layer_index]
image_link_tower = self.cross_modal_image_link_tower[link_layer_index]
# Bridge layers for textual and visual encoders
cross_text_features_ = text_link_tower(
self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings,
cross_text_features,
extend_text_masks,
)
cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks)
# Cross-modal encoder via bridge layers of textual and visual encoders
layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1](
cross_text_features_,
cross_image_features_,
attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks,
output_attentions=output_attentions,
)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1](
cross_image_features_,
cross_text_features_,
attention_mask=extend_image_masks,
encoder_attention_mask=extend_text_masks,
output_attentions=output_attentions,
)
cross_image_features = layer_outputs_image[0]
link_layer_index += 1
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
all_hidden_states_image += (image_embeds,)
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
# Concatenate the cls token of the text and image features to get the final represtation
text_features, image_features = cross_text_features, cross_image_features
cls_features = self.get_cls_features(text_features, image_features)
if output_hidden_states:
all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross)
if not return_dict:
return tuple(
v
for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions]
if v is not None
)
return BridgeTowerModelOutput(
text_features=text_features,
image_features=image_features,
pooler_output=cls_features,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
def get_cls_features(self, text_features, image_features):
cls_features_text = self.cross_modal_text_pooler(text_features)
cls_features_image = self.cross_modal_image_pooler(image_features)
return torch.cat([cls_features_text, cls_features_image], dim=-1)
# Copied from transformers.models.vilt.modeling_vilt.ViltPredictionHeadTransform with Vilt->BridgeTower
| BridgeTowerModel |
python | ray-project__ray | python/ray/llm/_internal/serve/engines/vllm/vllm_models.py | {
"start": 1081,
"end": 1491
} | class ____(BaseModelExtended):
"""Configuration for placement group bundle.
Note: Counts are floats to align with Ray resource typing.
"""
CPU: float = Field(default=0.0, ge=0.0, description="Number of CPUs per bundle")
GPU: float = Field(default=1.0, ge=0.0, description="Number of GPUs per bundle")
class Config:
extra = "allow" # Allow arbitrary resource types
| BundleConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 1402,
"end": 1452
} | class ____(ParentClosed4):
b: int
| ChildClosed4_1 |
python | lxml__lxml | benchmark/bench_xpath.py | {
"start": 229,
"end": 2659
} | class ____(benchbase.TreeBenchMark):
@nochange
@onlylib('lxe')
@children
def bench_xpath_class(self, children):
xpath = self.etree.XPath("./*[1]")
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_xpath_class_repeat(self, children):
for child in children:
xpath = self.etree.XPath("./*[1]")
xpath(child)
@nochange
@onlylib('lxe')
def bench_xpath_element(self, root):
xpath = self.etree.XPathElementEvaluator(root)
for child in root:
xpath("./*[1]")
@nochange
@onlylib('lxe')
@children
def bench_xpath_method(self, children):
for child in children:
child.xpath("./*[1]")
@nochange
@onlylib('lxe')
@children
def bench_multiple_xpath_or(self, children):
xpath = self.etree.XPath(".//p:a00001|.//p:b00001|.//p:c00001",
namespaces={'p':'cdefg'})
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_multiple_iter_tag(self, children):
for child in children:
list(child.iter("{cdefg}a00001"))
list(child.iter("{cdefg}b00001"))
list(child.iter("{cdefg}c00001"))
@nochange
@onlylib('lxe')
@children
def bench_xpath_old_extensions(self, children):
def return_child(_, elements):
if elements:
return elements[0][0]
else:
return ()
extensions = {("test", "child") : return_child}
xpath = self.etree.XPath("t:child(.)", namespaces={"t":"test"},
extensions=extensions)
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_xpath_extensions(self, children):
def return_child(_, elements):
if elements:
return elements[0][0]
else:
return ()
self.etree.FunctionNamespace("testns")["t"] = return_child
try:
xpath = self.etree.XPath("test:t(.)", namespaces={"test":"testns"})
for child in children:
xpath(child)
finally:
del self.etree.FunctionNamespace("testns")["t"]
if __name__ == '__main__':
benchbase.main(XPathBenchMark)
| XPathBenchMark |
python | MongoEngine__mongoengine | tests/document/test_class_methods.py | {
"start": 196,
"end": 11530
} | class ____(unittest.TestCase):
def setUp(self):
connect(db="mongoenginetest")
self.db = get_db()
class Person(Document):
name = StringField()
age = IntField()
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
def tearDown(self):
for collection in list_collection_names(self.db):
self.db.drop_collection(collection)
def test_definition(self):
"""Ensure that document may be defined using fields."""
assert ["_cls", "age", "id", "name"] == sorted(self.Person._fields.keys())
assert ["IntField", "ObjectIdField", "StringField", "StringField"] == sorted(
x.__class__.__name__ for x in self.Person._fields.values()
)
def test_get_db(self):
"""Ensure that get_db returns the expected db."""
db = self.Person._get_db()
assert self.db == db
def test_get_collection_name(self):
"""Ensure that get_collection_name returns the expected collection
name.
"""
collection_name = "person"
assert collection_name == self.Person._get_collection_name()
def test_get_collection(self):
"""Ensure that get_collection returns the expected collection."""
collection_name = "person"
collection = self.Person._get_collection()
assert self.db[collection_name] == collection
def test_drop_collection(self):
"""Ensure that the collection may be dropped from the database."""
collection_name = "person"
self.Person(name="Test").save()
assert collection_name in list_collection_names(self.db)
self.Person.drop_collection()
assert collection_name not in list_collection_names(self.db)
def test_register_delete_rule(self):
"""Ensure that register delete rule adds a delete rule to the document
meta.
"""
class Job(Document):
employee = ReferenceField(self.Person)
assert self.Person._meta.get("delete_rules") is None
self.Person.register_delete_rule(Job, "employee", NULLIFY)
assert self.Person._meta["delete_rules"] == {(Job, "employee"): NULLIFY}
def test_compare_indexes(self):
"""Ensure that the indexes are properly created and that
compare_indexes identifies the missing/extra indexes
"""
class BlogPost(Document):
author = StringField()
title = StringField()
description = StringField()
tags = StringField()
meta = {"indexes": [("author", "title")]}
BlogPost.drop_collection()
BlogPost.ensure_indexes()
assert BlogPost.compare_indexes() == {"missing": [], "extra": []}
BlogPost.create_index(["author", "description"])
assert BlogPost.compare_indexes() == {
"missing": [],
"extra": [[("author", 1), ("description", 1)]],
}
BlogPost._get_collection().drop_index("author_1_description_1")
assert BlogPost.compare_indexes() == {"missing": [], "extra": []}
BlogPost._get_collection().drop_index("author_1_title_1")
assert BlogPost.compare_indexes() == {
"missing": [[("author", 1), ("title", 1)]],
"extra": [],
}
def test_compare_indexes_inheritance(self):
"""Ensure that the indexes are properly created and that
compare_indexes identifies the missing/extra indexes for subclassed
documents (_cls included)
"""
class BlogPost(Document):
author = StringField()
title = StringField()
description = StringField()
meta = {"allow_inheritance": True}
class BlogPostWithTags(BlogPost):
tags = StringField()
tag_list = ListField(StringField())
meta = {"indexes": [("author", "tags")]}
BlogPost.drop_collection()
BlogPost.ensure_indexes()
BlogPostWithTags.ensure_indexes()
assert BlogPost.compare_indexes() == {"missing": [], "extra": []}
BlogPostWithTags.create_index(["author", "tag_list"])
assert BlogPost.compare_indexes() == {
"missing": [],
"extra": [[("_cls", 1), ("author", 1), ("tag_list", 1)]],
}
BlogPostWithTags._get_collection().drop_index("_cls_1_author_1_tag_list_1")
assert BlogPost.compare_indexes() == {"missing": [], "extra": []}
BlogPostWithTags._get_collection().drop_index("_cls_1_author_1_tags_1")
assert BlogPost.compare_indexes() == {
"missing": [[("_cls", 1), ("author", 1), ("tags", 1)]],
"extra": [],
}
def test_compare_indexes_multiple_subclasses(self):
"""Ensure that compare_indexes behaves correctly if called from a
class, which base class has multiple subclasses
"""
class BlogPost(Document):
author = StringField()
title = StringField()
description = StringField()
meta = {"allow_inheritance": True}
class BlogPostWithTags(BlogPost):
tags = StringField()
tag_list = ListField(StringField())
meta = {"indexes": [("author", "tags")]}
class BlogPostWithCustomField(BlogPost):
custom = DictField()
meta = {"indexes": [("author", "custom")]}
BlogPost.ensure_indexes()
BlogPostWithTags.ensure_indexes()
BlogPostWithCustomField.ensure_indexes()
assert BlogPost.compare_indexes() == {"missing": [], "extra": []}
assert BlogPostWithTags.compare_indexes() == {"missing": [], "extra": []}
assert BlogPostWithCustomField.compare_indexes() == {"missing": [], "extra": []}
def test_compare_indexes_for_text_indexes(self):
"""Ensure that compare_indexes behaves correctly for text indexes"""
class Doc(Document):
a = StringField()
b = StringField()
meta = {
"indexes": [
{
"fields": ["$a", "$b"],
"default_language": "english",
"weights": {"a": 10, "b": 2},
}
]
}
Doc.drop_collection()
Doc.ensure_indexes()
actual = Doc.compare_indexes()
expected = {"missing": [], "extra": []}
assert actual == expected
def test_list_indexes_inheritance(self):
"""ensure that all of the indexes are listed regardless of the super-
or sub-class that we call it from
"""
class BlogPost(Document):
author = StringField()
title = StringField()
description = StringField()
meta = {"allow_inheritance": True}
class BlogPostWithTags(BlogPost):
tags = StringField()
meta = {"indexes": [("author", "tags")]}
class BlogPostWithTagsAndExtraText(BlogPostWithTags):
extra_text = StringField()
meta = {"indexes": [("author", "tags", "extra_text")]}
BlogPost.drop_collection()
BlogPost.ensure_indexes()
BlogPostWithTags.ensure_indexes()
BlogPostWithTagsAndExtraText.ensure_indexes()
assert BlogPost.list_indexes() == BlogPostWithTags.list_indexes()
assert BlogPost.list_indexes() == BlogPostWithTagsAndExtraText.list_indexes()
assert BlogPost.list_indexes() == [
[("_cls", 1), ("author", 1), ("tags", 1)],
[("_cls", 1), ("author", 1), ("tags", 1), ("extra_text", 1)],
[("_id", 1)],
[("_cls", 1)],
]
def test_register_delete_rule_inherited(self):
class Vaccine(Document):
name = StringField(required=True)
meta = {"indexes": ["name"]}
class Animal(Document):
family = StringField(required=True)
vaccine_made = ListField(
ReferenceField("Vaccine", reverse_delete_rule=PULL)
)
meta = {"allow_inheritance": True, "indexes": ["family"]}
class Cat(Animal):
name = StringField(required=True)
assert Vaccine._meta["delete_rules"][(Animal, "vaccine_made")] == PULL
assert Vaccine._meta["delete_rules"][(Cat, "vaccine_made")] == PULL
def test_collection_naming(self):
"""Ensure that a collection with a specified name may be used."""
class DefaultNamingTest(Document):
pass
assert "default_naming_test" == DefaultNamingTest._get_collection_name()
class CustomNamingTest(Document):
meta = {"collection": "pimp_my_collection"}
assert "pimp_my_collection" == CustomNamingTest._get_collection_name()
class DynamicNamingTest(Document):
meta = {"collection": lambda c: "DYNAMO"}
assert "DYNAMO" == DynamicNamingTest._get_collection_name()
# Use Abstract class to handle backwards compatibility
class BaseDocument(Document):
meta = {"abstract": True, "collection": lambda c: c.__name__.lower()}
class OldNamingConvention(BaseDocument):
pass
assert "oldnamingconvention" == OldNamingConvention._get_collection_name()
class InheritedAbstractNamingTest(BaseDocument):
meta = {"collection": "wibble"}
assert "wibble" == InheritedAbstractNamingTest._get_collection_name()
# Mixin tests
class BaseMixin:
meta = {"collection": lambda c: c.__name__.lower()}
class OldMixinNamingConvention(Document, BaseMixin):
pass
assert (
"oldmixinnamingconvention"
== OldMixinNamingConvention._get_collection_name()
)
class BaseMixin:
meta = {"collection": lambda c: c.__name__.lower()}
class BaseDocument(Document, BaseMixin):
meta = {"allow_inheritance": True}
class MyDocument(BaseDocument):
pass
assert "basedocument" == MyDocument._get_collection_name()
def test_custom_collection_name_operations(self):
"""Ensure that a collection with a specified name is used as expected."""
collection_name = "personCollTest"
class Person(Document):
name = StringField()
meta = {"collection": collection_name}
Person(name="Test User").save()
assert collection_name in list_collection_names(self.db)
user_obj = self.db[collection_name].find_one()
assert user_obj["name"] == "Test User"
user_obj = Person.objects[0]
assert user_obj.name == "Test User"
Person.drop_collection()
assert collection_name not in list_collection_names(self.db)
def test_collection_name_and_primary(self):
"""Ensure that a collection with a specified name may be used."""
class Person(Document):
name = StringField(primary_key=True)
meta = {"collection": "app"}
Person(name="Test User").save()
user_obj = Person.objects.first()
assert user_obj.name == "Test User"
Person.drop_collection()
if __name__ == "__main__":
unittest.main()
| TestClassMethods |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter1.py | {
"start": 811,
"end": 1022
} | class ____:
@overload
def __init__(self, val: str) -> None: ...
@overload
def __init__(self, val: bytes) -> None: ...
def __init__(self, val: str | bytes) -> None:
pass
| ConverterClass |
python | streamlit__streamlit | lib/streamlit/elements/widgets/number_input.py | {
"start": 2584,
"end": 25189
} | class ____:
# If "min_value: int" is given and all other numerical inputs are
# "int"s or not provided (value optionally being "min"), return "int"
# If "min_value: int, value: None" is given and all other numerical inputs
# are "int"s or not provided, return "int | None"
@overload
def number_input(
self,
label: str,
min_value: int,
max_value: int | None = None,
value: IntOrNone | Literal["min"] = "min",
step: int | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> int | IntOrNone: ...
# If "max_value: int" is given and all other numerical inputs are
# "int"s or not provided (value optionally being "min"), return "int"
# If "max_value: int, value=None" is given and all other numerical inputs
# are "int"s or not provided, return "int | None"
@overload
def number_input(
self,
label: str,
min_value: None = None,
*,
max_value: int,
value: IntOrNone | Literal["min"] = "min",
step: int | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> int | IntOrNone: ...
# If "value=int" is given and all other numerical inputs are "int"s
# or not provided, return "int"
@overload
def number_input(
self,
label: str,
min_value: int | None = None,
max_value: int | None = None,
*,
value: int,
step: int | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> int: ...
# If "step=int" is given and all other numerical inputs are "int"s
# or not provided (value optionally being "min"), return "int"
# If "step=int, value=None" is given and all other numerical inputs
# are "int"s or not provided, return "int | None"
@overload
def number_input(
self,
label: str,
min_value: None = None,
max_value: None = None,
value: IntOrNone | Literal["min"] = "min",
*,
step: int,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> int | IntOrNone: ...
# If all numerical inputs are floats (with value optionally being "min")
# or are not provided, return "float"
# If only "value=None" is given and none of the other numerical inputs
# are "int"s, return "float | None"
@overload
def number_input(
self,
label: str,
min_value: float | None = None,
max_value: float | None = None,
value: FloatOrNone | Literal["min"] = "min",
step: float | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> float | FloatOrNone: ...
@gather_metrics("number_input")
def number_input(
self,
label: str,
min_value: Number | None = None,
max_value: Number | None = None,
value: Number | Literal["min"] | None = "min",
step: Number | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
) -> Number | None:
r"""Display a numeric input widget.
.. note::
Integer values exceeding +/- ``(1<<53) - 1`` cannot be accurately
stored or returned by the widget due to serialization constraints
between the Python server and JavaScript client. You must handle
such numbers as floats, leading to a loss in precision.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
min_value : int, float, or None
The minimum permitted value.
If this is ``None`` (default), there will be no minimum for float
values and a minimum of ``- (1<<53) + 1`` for integer values.
max_value : int, float, or None
The maximum permitted value.
If this is ``None`` (default), there will be no maximum for float
values and a maximum of ``(1<<53) - 1`` for integer values.
value : int, float, "min" or None
The value of this widget when it first renders. If this is
``"min"`` (default), the initial value is ``min_value`` unless
``min_value`` is ``None``. If ``min_value`` is ``None``, the widget
initializes with a value of ``0.0`` or ``0``.
If ``value`` is ``None``, the widget will initialize with no value
and return ``None`` until the user provides input.
step : int, float, or None
The stepping interval.
Defaults to 1 if the value is an int, 0.01 otherwise.
If the value is not specified, the format parameter will be used.
format : str or None
A printf-style format string controlling how the interface should
display numbers. The output must be purely numeric. This does not
impact the return value of the widget. For more information about
the formatting specification, see `sprintf.js
<https://github.com/alexei/sprintf.js?tab=readme-ov-file#format-specification>`_.
For example, ``format="%0.1f"`` adjusts the displayed decimal
precision to only show one digit after the decimal.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this number_input's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
placeholder : str or None
An optional string displayed when the number input is empty.
If None, no placeholder is displayed.
disabled : bool
An optional boolean that disables the number input if set to
``True``. The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
icon : str, None
An optional emoji or icon to display within the input field to the
left of the value. If ``icon`` is ``None`` (default), no icon is
displayed. If ``icon`` is a string, the following options are
valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
width : "stretch" or int
The width of the number input widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
int or float or None
The current value of the numeric input widget or ``None`` if the widget
is empty. The return type will match the data type of the value parameter.
Example
-------
>>> import streamlit as st
>>>
>>> number = st.number_input("Insert a number")
>>> st.write("The current number is ", number)
.. output::
https://doc-number-input.streamlit.app/
height: 260px
To initialize an empty number input, use ``None`` as the value:
>>> import streamlit as st
>>>
>>> number = st.number_input(
... "Insert a number", value=None, placeholder="Type a number..."
... )
>>> st.write("The current number is ", number)
.. output::
https://doc-number-input-empty.streamlit.app/
height: 260px
"""
ctx = get_script_run_ctx()
return self._number_input(
label=label,
min_value=min_value,
max_value=max_value,
value=value,
step=step,
format=format,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
placeholder=placeholder,
disabled=disabled,
label_visibility=label_visibility,
icon=icon,
width=width,
ctx=ctx,
)
def _number_input(
self,
label: str,
min_value: Number | None = None,
max_value: Number | None = None,
value: Number | Literal["min"] | None = "min",
step: Number | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
placeholder: str | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
icon: str | None = None,
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> Number | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=value if value != "min" else None,
)
maybe_raise_label_warnings(label, label_visibility)
element_id = compute_and_register_element_id(
"number_input",
user_key=key,
# Ensure stable ID when key is provided; explicitly whitelist parameters
# that might invalidate the current widget state.
key_as_main_identity={"min_value", "max_value", "step"},
dg=self.dg,
label=label,
min_value=min_value,
max_value=max_value,
value=value,
step=step,
format=format,
help=help,
placeholder=None if placeholder is None else str(placeholder),
icon=icon,
width=width,
)
# Ensure that all arguments are of the same type.
number_input_args = [min_value, max_value, value, step]
all_int_args = all(
isinstance(a, (numbers.Integral, type(None), str))
for a in number_input_args
)
all_float_args = all(
isinstance(a, (float, type(None), str)) for a in number_input_args
)
if not all_int_args and not all_float_args:
raise StreamlitMixedNumericTypesError(
value=value, min_value=min_value, max_value=max_value, step=step
)
session_state = get_session_state().filtered_state
if key is not None and key in session_state and session_state[key] is None:
value = None
if value == "min":
if min_value is not None:
value = min_value
elif all_int_args and all_float_args:
value = 0.0 # if no values are provided, defaults to float
elif all_int_args:
value = 0
else:
value = 0.0
int_value = isinstance(value, numbers.Integral)
float_value = isinstance(value, float)
if value is None:
if all_int_args and not all_float_args:
# Select int type if all relevant args are ints:
int_value = True
else:
# Otherwise, defaults to float:
float_value = True
# Use default format depending on value type if format was not provided:
number_format = ("%d" if int_value else "%0.2f") if format is None else format
# Warn user if they format an int type as a float or vice versa.
if number_format in ["%d", "%u", "%i"] and float_value:
import streamlit as st
st.warning(
"Warning: NumberInput value below has type float,"
f" but format {number_format} displays as integer."
)
elif number_format[-1] == "f" and int_value:
import streamlit as st
st.warning(
"Warning: NumberInput value below has type int so is"
f" displayed as int despite format string {number_format}."
)
if step is None:
step = 1 if int_value else 0.01
try:
float(number_format % 2)
except (TypeError, ValueError):
raise StreamlitInvalidNumberFormatError(number_format)
# Ensure that the value matches arguments' types.
all_ints = int_value and all_int_args
if min_value is not None and value is not None and min_value > value:
raise StreamlitValueBelowMinError(value=value, min_value=min_value)
if max_value is not None and value is not None and max_value < value:
raise StreamlitValueAboveMaxError(value=value, max_value=max_value)
# Bounds checks. JSNumber produces human-readable exceptions that
# we simply re-package as StreamlitAPIExceptions.
try:
if all_ints:
if min_value is not None:
JSNumber.validate_int_bounds(int(min_value), "`min_value`")
else:
# Issue 6740: If min_value not provided, set default to minimum safe integer
# to avoid JS issues from smaller numbers entered via UI
min_value = JSNumber.MIN_SAFE_INTEGER
if max_value is not None:
JSNumber.validate_int_bounds(int(max_value), "`max_value`")
else:
# See note above - set default to max safe integer
max_value = JSNumber.MAX_SAFE_INTEGER
if step is not None:
JSNumber.validate_int_bounds(int(step), "`step`")
if value is not None:
JSNumber.validate_int_bounds(int(value), "`value`")
else:
if min_value is not None:
JSNumber.validate_float_bounds(min_value, "`min_value`")
else:
# See note above
min_value = JSNumber.MIN_NEGATIVE_VALUE
if max_value is not None:
JSNumber.validate_float_bounds(max_value, "`max_value`")
else:
# See note above
max_value = JSNumber.MAX_VALUE
if step is not None:
JSNumber.validate_float_bounds(step, "`step`")
if value is not None:
JSNumber.validate_float_bounds(value, "`value`")
except JSNumberBoundsException as e:
raise StreamlitJSNumberBoundsError(str(e))
data_type = NumberInputProto.INT if all_ints else NumberInputProto.FLOAT
number_input_proto = NumberInputProto()
number_input_proto.id = element_id
number_input_proto.data_type = data_type
number_input_proto.label = label
if value is not None:
number_input_proto.default = value
if placeholder is not None:
number_input_proto.placeholder = str(placeholder)
number_input_proto.form_id = current_form_id(self.dg)
number_input_proto.disabled = disabled
number_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
number_input_proto.help = dedent(help)
if min_value is not None:
number_input_proto.min = min_value
number_input_proto.has_min = True
if max_value is not None:
number_input_proto.max = max_value
number_input_proto.has_max = True
if step is not None:
number_input_proto.step = step
number_input_proto.format = number_format
if icon is not None:
number_input_proto.icon = validate_icon_or_emoji(icon)
serde = NumberInputSerde(value, data_type)
widget_state = register_widget(
number_input_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="double_value",
)
if widget_state.value_changed:
if widget_state.value is not None:
# Min/Max bounds checks when the value is updated.
if (
number_input_proto.has_min
and widget_state.value < number_input_proto.min
):
raise StreamlitValueBelowMinError(
value=widget_state.value, min_value=number_input_proto.min
)
if (
number_input_proto.has_max
and widget_state.value > number_input_proto.max
):
raise StreamlitValueAboveMaxError(
value=widget_state.value, max_value=number_input_proto.max
)
number_input_proto.value = widget_state.value
number_input_proto.set_value = True
validate_width(width)
layout_config = LayoutConfig(width=width)
self.dg._enqueue(
"number_input", number_input_proto, layout_config=layout_config
)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| NumberInputMixin |
python | streamlit__streamlit | lib/tests/streamlit/write_test.py | {
"start": 16567,
"end": 20380
} | class ____(unittest.TestCase):
"""Test st.write_stream."""
@patch("streamlit.type_util.is_type")
def test_with_openai_chunk(self, is_type):
"""Test st.write_stream with openai Chunks."""
is_type.side_effect = make_is_type_mock(type_util._OPENAI_CHUNK_RE)
# Create a mock for ChatCompletionChunk
mock_chunk = MagicMock()
def openai_stream():
mock_chunk.choices = []
yield mock_chunk # should also support empty chunks
mock_chunk.choices = [MagicMock()]
mock_chunk.choices[0].delta.content = "Hello "
yield mock_chunk
mock_chunk.choices[0].delta.content = "World"
yield mock_chunk
stream_return = st.write_stream(openai_stream)
assert stream_return == "Hello World"
def test_with_generator_text(self):
"""Test st.write_stream with generator text content."""
def test_stream():
yield "Hello "
yield "World"
stream_return = st.write_stream(test_stream)
assert stream_return == "Hello World"
def test_with_async_generator_text(self):
"""Test st.write_stream with async generator text content."""
async def test_stream():
yield "Hello "
yield "World"
stream_return = st.write_stream(test_stream)
assert stream_return == "Hello World"
stream_return = st.write_stream(test_stream())
assert stream_return == "Hello World"
def test_with_empty_chunks(self):
"""Test st.write_stream with generator that returns empty chunks."""
def test_stream():
yield ""
yield ""
stream_return = st.write_stream(test_stream)
assert stream_return == ""
def test_with_empty_stream(self):
"""Test st.write_stream with generator that returns empty chunks."""
def test_stream():
if False:
yield "Hello"
stream_return = st.write_stream(test_stream)
assert stream_return == ""
def test_with_wrong_input(self):
"""Test st.write_stream with string or dataframe input generates exception."""
with pytest.raises(StreamlitAPIException):
st.write_stream("Hello World")
with pytest.raises(StreamlitAPIException):
st.write_stream(pd.DataFrame([[1, 2], [3, 4]]))
def test_with_generator_misc(self):
"""Test st.write_stream with generator with different content."""
def test_stream():
yield "This is "
yield "a dataframe:"
yield pd.DataFrame([[1, 2], [3, 4]])
yield "Text under dataframe"
with patch("streamlit.delta_generator.DeltaGenerator.dataframe") as p_dataframe:
stream_return = st.write_stream(test_stream)
p_dataframe.assert_called_once()
assert str(stream_return) == str(
[
"This is a dataframe:",
pd.DataFrame([[1, 2], [3, 4]]),
"Text under dataframe",
]
)
def test_with_list_output(self):
"""Test st.write_stream with a list."""
data = [
"This is ",
"a dataframe:",
pd.DataFrame([[1, 2], [3, 4]]),
"Text under dataframe",
]
with patch("streamlit.delta_generator.DeltaGenerator.dataframe") as p_dataframe:
stream_return = st.write_stream(data)
p_dataframe.assert_called_once()
assert str(stream_return) == str(
[
"This is a dataframe:",
pd.DataFrame([[1, 2], [3, 4]]),
"Text under dataframe",
]
)
| StreamlitStreamTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 1534,
"end": 1591
} | class ____(models.Model):
charm = CharmField()
| Charming |
python | pypa__warehouse | tests/unit/manage/test_tasks.py | {
"start": 339,
"end": 1544
} | class ____:
def test_update_invitation_status(self, db_request):
project = ProjectFactory.create()
user = UserFactory.create()
invite = RoleInvitationFactory(user=user, project=project)
token_service = pretend.stub(loads=pretend.raiser(TokenExpired))
db_request.find_service = pretend.call_recorder(lambda *a, **kw: token_service)
update_role_invitation_status(db_request)
assert db_request.find_service.calls == [
pretend.call(ITokenService, name="email")
]
assert invite.invite_status == RoleInvitationStatus.Expired
def test_no_updates(self, db_request):
project = ProjectFactory.create()
user = UserFactory.create()
invite = RoleInvitationFactory(user=user, project=project)
token_service = pretend.stub(loads=lambda token: {})
db_request.find_service = pretend.call_recorder(lambda *a, **kw: token_service)
update_role_invitation_status(db_request)
assert db_request.find_service.calls == [
pretend.call(ITokenService, name="email")
]
assert invite.invite_status == RoleInvitationStatus.Pending
| TestUpdateInvitationStatus |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 10599,
"end": 11027
} | class ____(Transform):
lookup_name = "len"
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
"CASE WHEN %(lhs)s IS NULL THEN NULL ELSE "
"coalesce(array_length(%(lhs)s, 1), 0) END"
) % {"lhs": lhs}, params * 2
@ArrayField.register_lookup
| ArrayLenTransform |
python | django-extensions__django-extensions | tests/management/commands/shell_plus_tests/test_collision_resolver.py | {
"start": 724,
"end": 771
} | class ____(AppsOrderCR):
pass
| TestAppsOrderCR |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard_python3/django/polls/test_polls.py | {
"start": 736,
"end": 1923
} | class ____(TestCase):
def setUp(self):
question = Question(
question_text="This is a test question", pub_date=timezone.now()
)
question.save()
self.question = question
choice = Choice(choice_text="This is a test choice", votes=0)
choice.question = question
choice.save()
self.choice = choice
self.client = Client()
def test_index_view(self):
response = self.client.get("/")
assert response.status_code == 200
assert self.question.question_text in str(response.content)
def test_detail_view(self):
response = self.client.get(reverse("polls:detail", args=(self.question.id,)))
assert response.status_code == 200
assert self.question.question_text in str(response.content)
assert self.choice.choice_text in str(response.content)
def test_results_view(self):
response = self.client.get(reverse("polls:results", args=(self.question.id,)))
assert response.status_code == 200
assert self.question.question_text in str(response.content)
assert self.choice.choice_text in str(response.content)
| PollViewTests |
python | TheAlgorithms__Python | data_structures/binary_tree/binary_tree_path_sum.py | {
"start": 504,
"end": 2468
} | class ____:
r"""
The below tree looks like this
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
>>> tree = Node(10)
>>> tree.left = Node(5)
>>> tree.right = Node(-3)
>>> tree.left.left = Node(3)
>>> tree.left.right = Node(2)
>>> tree.right.right = Node(11)
>>> tree.left.left.left = Node(3)
>>> tree.left.left.right = Node(-2)
>>> tree.left.right.right = Node(1)
>>> BinaryTreePathSum().path_sum(tree, 8)
3
>>> BinaryTreePathSum().path_sum(tree, 7)
2
>>> tree.right.right = Node(10)
>>> BinaryTreePathSum().path_sum(tree, 8)
2
>>> BinaryTreePathSum().path_sum(None, 0)
0
>>> BinaryTreePathSum().path_sum(tree, 0)
0
The second tree looks like this
0
/ \
5 5
>>> tree2 = Node(0)
>>> tree2.left = Node(5)
>>> tree2.right = Node(5)
>>> BinaryTreePathSum().path_sum(tree2, 5)
4
>>> BinaryTreePathSum().path_sum(tree2, -1)
0
>>> BinaryTreePathSum().path_sum(tree2, 0)
1
"""
target: int
def __init__(self) -> None:
self.paths = 0
def depth_first_search(self, node: Node | None, path_sum: int) -> None:
if node is None:
return
if path_sum == self.target:
self.paths += 1
if node.left:
self.depth_first_search(node.left, path_sum + node.left.value)
if node.right:
self.depth_first_search(node.right, path_sum + node.right.value)
def path_sum(self, node: Node | None, target: int | None = None) -> int:
if node is None:
return 0
if target is not None:
self.target = target
self.depth_first_search(node, node.value)
self.path_sum(node.left)
self.path_sum(node.right)
return self.paths
if __name__ == "__main__":
import doctest
doctest.testmod()
| BinaryTreePathSum |
python | boto__boto3 | tests/functional/test_collection.py | {
"start": 683,
"end": 1307
} | class ____(unittest.TestCase):
def setUp(self):
self.session = Session(
aws_access_key_id='dummy',
aws_secret_access_key='dummy',
region_name='us-east-1',
)
# Pick an arbitrary resource.
self.ec2_resource = self.session.resource('ec2')
def test_can_use_collection_methods(self):
assert isinstance(
self.ec2_resource.instances.all(), ResourceCollection
)
def test_can_chain_methods(self):
assert isinstance(
self.ec2_resource.instances.all().page_size(5), ResourceCollection
)
| TestCollection |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 8840,
"end": 9522
} | class ____(UserWarning):
"""Warning class for errors in formatters"""
@decorator
def catch_format_error(method, self, *args, **kwargs):
"""show traceback on failed format call"""
try:
r = method(self, *args, **kwargs)
except NotImplementedError:
# don't warn on NotImplementedErrors
return self._check_return(None, args[0])
except Exception:
exc_info = sys.exc_info()
ip = get_ipython()
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
return self._check_return(None, args[0])
return self._check_return(r, args[0])
| FormatterWarning |
python | joke2k__faker | tests/providers/test_credit_card.py | {
"start": 3532,
"end": 5519
} | class ____:
"""Test ru_RU credit card provider methods"""
visa_pattern: Pattern = re.compile(r"4[0-9]{15}")
mastercard_pattern: Pattern = re.compile(
r"(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}",
)
mir_pattern: Pattern = re.compile(r"220[0-4][0-9]{12}")
maestro_pattern: Pattern = re.compile(r"(?:50|5[6-9]|6[0-9])[0-9]{14}")
amex_pattern: Pattern = re.compile(r"3[4|7][0-9]{13}")
unionpay_pattern: Pattern = re.compile(r"(?:62|81)[0-9]{14}")
def test_visa(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("visa")
assert self.visa_pattern.fullmatch(number)
def test_mastercard(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("mastercard")
assert self.mastercard_pattern.fullmatch(number)
def test_mir(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("mir")
assert self.mir_pattern.fullmatch(number)
def test_maestro(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("maestro")
assert self.maestro_pattern.fullmatch(number)
def test_amex(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("amex")
assert self.amex_pattern.fullmatch(number)
def test_unionpay(self, faker, num_samples):
for _ in range(num_samples):
number = faker.credit_card_number("unionpay")
assert self.unionpay_pattern.fullmatch(number)
def test_credit_card_full(self, faker, num_samples):
for _ in range(num_samples):
card_data = faker.credit_card_full().split("\n")
assert re.match("[A-Za-z]+", card_data[1])
assert card_data[4] in RuRuBankProvider.banks
| TestRuRu |
python | allegroai__clearml | clearml/backend_api/services/v2_20/organization.py | {
"start": 2971,
"end": 5225
} | class ____(Response):
"""
Response of organization.get_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "organization"
_action = "get_tags"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
response_mapping = {GetTagsRequest: GetTagsResponse}
| GetTagsResponse |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 208319,
"end": 209654
} | class ____(TestCase):
class Producer:
def __init__(self, exc, die_early=False):
self.exc = exc
self.pos = 0
self.die_early = die_early
def __iter__(self):
if self.die_early:
raise self.exc
return self
def __next__(self):
ret = self.pos
if self.pos >= 5:
raise self.exc
self.pos += 1
return ret
def test_no_error(self):
iterator = range(5)
actual = list(mi.iter_suppress(iterator, RuntimeError))
expected = [0, 1, 2, 3, 4]
self.assertEqual(actual, expected)
def test_raises_error(self):
iterator = self.Producer(ValueError)
with self.assertRaises(ValueError):
list(mi.iter_suppress(iterator, RuntimeError))
def test_suppression(self):
iterator = self.Producer(ValueError)
actual = list(mi.iter_suppress(iterator, RuntimeError, ValueError))
expected = [0, 1, 2, 3, 4]
self.assertEqual(actual, expected)
def test_early_suppression(self):
iterator = self.Producer(ValueError, die_early=True)
actual = list(mi.iter_suppress(iterator, RuntimeError, ValueError))
expected = []
self.assertEqual(actual, expected)
| IterSuppressTests |
python | scipy__scipy | scipy/signal/tests/test_peak_finding.py | {
"start": 16430,
"end": 23501
} | class ____:
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
widths = peak_widths([], [])[0]
assert isinstance(widths, np.ndarray)
assert widths.size == 0
widths = peak_widths([1, 2, 3], [])[0]
assert isinstance(widths, np.ndarray)
assert widths.size == 0
out = peak_widths([], [])
for arr in out:
assert isinstance(arr, np.ndarray)
assert arr.size == 0
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_basic(self):
"""
Test a simple use case with easy to verify results at different relative
heights.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1])
prominence = 2
for rel_height, width_true, lip_true, rip_true in [
(0., 0., 3., 3.), # raises warning
(0.25, 1., 2.5, 3.5),
(0.5, 2., 2., 4.),
(0.75, 3., 1.5, 4.5),
(1., 4., 1., 5.),
(2., 5., 1., 6.),
(3., 5., 1., 6.)
]:
width_calc, height, lip_calc, rip_calc = peak_widths(
x, [3], rel_height)
xp_assert_close(width_calc, np.asarray([width_true]))
xp_assert_close(height, np.asarray([2 - rel_height * prominence]))
xp_assert_close(lip_calc, np.asarray([lip_true]))
xp_assert_close(rip_calc, np.asarray([rip_true]))
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([0, 100, 50], 4)
peaks = np.repeat([1], 3)
result = peak_widths(x[::4], peaks[::3])
xp_assert_equal(result,
np.asarray([[0.75], [75], [0.75], [1.5]])
)
def test_exceptions(self):
"""
Verify that argument validation works as intended.
"""
with raises(ValueError, match='1-D array'):
# x with dimension > 1
peak_widths(np.zeros((3, 4)), np.ones(3))
with raises(ValueError, match='1-D array'):
# x with dimension < 1
peak_widths(3, [0])
with raises(ValueError, match='1-D array'):
# peaks with dimension > 1
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
with raises(ValueError, match='1-D array'):
# peaks with dimension < 1
peak_widths(np.arange(10), 3)
with raises(ValueError, match='not a valid index'):
# peak pos exceeds x.size
peak_widths(np.arange(10), [8, 11])
with raises(ValueError, match='not a valid index'):
# empty x with peaks supplied
peak_widths([], [1, 2])
with raises(TypeError, match='cannot safely cast'):
# peak cannot be safely cast to intp
peak_widths(np.arange(10), [1.1, 2.3])
with raises(ValueError, match='rel_height'):
# rel_height is < 0
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
with raises(TypeError, match='None'):
# prominence data contains None
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
def test_warnings(self):
"""
Verify that appropriate warnings are raised.
"""
msg = "some peaks have a width of 0"
with warns(PeakPropertyWarning, match=msg):
# Case: rel_height is 0
peak_widths([0, 1, 0], [1], rel_height=0)
with warns(PeakPropertyWarning, match=msg):
# Case: prominence is 0 and bases are identical
peak_widths(
[0, 1, 1, 1, 0], [2],
prominence_data=(np.array([0.], np.float64),
np.array([2], np.intp),
np.array([2], np.intp))
)
def test_mismatching_prominence_data(self):
"""Test with mismatching peak and / or prominence data."""
x = [0, 1, 0]
peak = [1]
for i, (prominences, left_bases, right_bases) in enumerate([
((1.,), (-1,), (2,)), # left base not in x
((1.,), (0,), (3,)), # right base not in x
((1.,), (2,), (0,)), # swapped bases same as peak
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
((1., 1.), (0,), (2,)), # arrays with different shapes
((1.,), (0, 0), (2,)), # arrays with different shapes
((1.,), (0,), (2, 2)) # arrays with different shapes
]):
# Make sure input is matches output of signal.peak_prominences
prominence_data = (np.array(prominences, dtype=np.float64),
np.array(left_bases, dtype=np.intp),
np.array(right_bases, dtype=np.intp))
# Test for correct exception
if i < 3:
match = "prominence data is invalid for peak"
else:
match = "arrays in `prominence_data` must have the same shape"
with raises(ValueError, match=match):
peak_widths(x, peak, prominence_data=prominence_data)
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_intersection_rules(self):
"""Test if x == eval_height counts as an intersection."""
# Flatt peak with two possible intersection points if evaluated at 1
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
# relative height is 0 -> width is 0 as well, raises warning
xp_assert_close(peak_widths(x, peaks=[5], rel_height=0),
[(0.,), (3.,), (5.,), (5.,)])
# width_height == x counts as intersection -> nearest 1 is chosen
xp_assert_close(peak_widths(x, peaks=[5], rel_height=2/3),
[(4.,), (1.,), (3.,), (7.,)])
def test_unpack_condition_args():
"""
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
"""
x = np.arange(10)
amin_true = x
amax_true = amin_true + 10
peaks = amin_true[1::2]
# Test unpacking with None or interval
assert (None, None) == _unpack_condition_args((None, None), x, peaks)
assert (1, None) == _unpack_condition_args(1, x, peaks)
assert (1, None) == _unpack_condition_args((1, None), x, peaks)
assert (None, 2) == _unpack_condition_args((None, 2), x, peaks)
assert (3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks)
# Test if borders are correctly reduced with `peaks`
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
xp_assert_equal(amin_calc, amin_true[peaks])
xp_assert_equal(amax_calc, amax_true[peaks])
# Test raises if array borders don't match x
with raises(ValueError, match="array size of lower"):
_unpack_condition_args(amin_true, np.arange(11), peaks)
with raises(ValueError, match="array size of upper"):
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
| TestPeakWidths |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 34578,
"end": 35770
} | class ____(Benchmark):
r"""
Step objective function.
This class defines the Step [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Step}}(x) = \sum_{i=1}^{n} \left ( \lfloor x_i
+ 0.5 \rfloor \right )^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0.5` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-5, 5], [-5, 5])
self.global_optimum = [[0. for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(floor(abs(x)))
| Step |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/cloud_build.py | {
"start": 1506,
"end": 1711
} | class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Build List link."""
name = "Cloud Builds List"
key = "cloud_build_list_key"
format_str = BUILD_LIST_LINK
| CloudBuildListLink |
python | keras-team__keras | keras/src/applications/applications_test.py | {
"start": 4427,
"end": 11513
} | class ____(testing.TestCase):
@classmethod
def setUpClass(cls):
cls.original_image_data_format = backend.image_data_format()
@classmethod
def tearDownClass(cls):
backend.set_image_data_format(cls.original_image_data_format)
def skip_if_invalid_image_data_format_for_model(
self, app, image_data_format
):
does_not_support_channels_first = any(
[
unsupported_name.lower() in app.__name__.lower()
for unsupported_name in MODELS_UNSUPPORTED_CHANNELS_FIRST
]
)
if (
image_data_format == "channels_first"
and does_not_support_channels_first
):
self.skipTest(
"{} does not support channels first".format(app.__name__)
)
@parameterized.named_parameters(test_parameters)
def test_application_notop_variable_input_channels(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
# Test compatibility with 1 channel
if image_data_format == "channels_first":
input_shape = (1, None, None)
correct_output_shape = [None, last_dim, None, None]
else:
input_shape = (None, None, 1)
correct_output_shape = [None, None, None, last_dim]
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, correct_output_shape)
# Test compatibility with 4 channels
if image_data_format == "channels_first":
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, correct_output_shape)
@parameterized.named_parameters(test_parameters)
@pytest.mark.skipif(PIL is None, reason="Requires PIL.")
def test_application_base(self, app, _, app_module, image_data_format):
import tensorflow as tf
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
if (
image_data_format == "channels_first"
and len(tf.config.list_physical_devices("GPU")) == 0
and backend.backend() == "tensorflow"
):
self.skipTest(
"Conv2D doesn't support channels_first using CPU with "
"tensorflow backend"
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
# Can be instantiated with default arguments
model = app(weights="imagenet")
# Can run a correct inference on a test image
if image_data_format == "channels_first":
shape = model.input_shape[2:4]
else:
shape = model.input_shape[1:3]
x = _get_elephant(shape)
x = app_module.preprocess_input(x)
preds = model.predict(x)
names = [p[1] for p in app_module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
self.assertIn("African_elephant", names[:3])
# Can be serialized and deserialized
config = serialization_lib.serialize_keras_object(model)
reconstructed_model = serialization_lib.deserialize_keras_object(config)
self.assertEqual(len(model.weights), len(reconstructed_model.weights))
@parameterized.named_parameters(test_parameters)
def test_application_notop_custom_input_shape(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (3, 123, 123)
last_dim_axis = 1
else:
input_shape = (123, 123, 3)
last_dim_axis = -1
model = app(weights=None, include_top=False, input_shape=input_shape)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape[last_dim_axis], last_dim)
@parameterized.named_parameters(test_parameters)
def test_application_notop_custom_input_tensor(
self, app, last_dim, _, image_data_format
):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
if image_data_format == "channels_first":
input_shape = (4, 123, 123)
last_dim_axis = 1
else:
input_shape = (123, 123, 4)
last_dim_axis = -1
inputs_custom = Input(shape=input_shape, name="custom_input")
inputs_custom = Conv2D(3, (2, 2), padding="valid", strides=(2, 2))(
inputs_custom
)
model = app(weights=None, include_top=False, input_tensor=inputs_custom)
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape[last_dim_axis], last_dim)
@parameterized.named_parameters(test_parameters)
def test_application_pooling(self, app, last_dim, _, image_data_format):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
self.skip_if_invalid_image_data_format_for_model(app, image_data_format)
backend.set_image_data_format(image_data_format)
model = app(weights=None, include_top=False, pooling="max")
output_shape = list(model.outputs[0].shape)
self.assertEqual(output_shape, [None, last_dim])
@parameterized.named_parameters(test_parameters)
def test_application_classifier_activation(self, app, *_):
if app == nasnet.NASNetMobile and backend.backend() == "torch":
self.skipTest(
"NASNetMobile pretrained incorrect with torch backend."
)
model = app(
weights=None, include_top=True, classifier_activation="softmax"
)
last_layer_act = model.layers[-1].activation.__name__
self.assertEqual(last_layer_act, "softmax")
| ApplicationsTest |
python | huggingface__transformers | tests/models/tapas/test_modeling_tapas.py | {
"start": 1894,
"end": 15848
} | class ____:
"""You can also import this e.g from .test_modeling_tapas import TapasModelTester"""
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
max_position_embeddings=512,
type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10],
type_sequence_label_size=2,
positive_weight=10.0,
num_aggregation_labels=4,
num_labels=2,
aggregation_loss_importance=0.8,
use_answer_as_supervision=True,
answer_loss_importance=0.001,
use_normalized_answer_loss=False,
huber_loss_delta=25.0,
temperature=1.0,
agg_temperature=1.0,
use_gumbel_for_cells=False,
use_gumbel_for_agg=False,
average_approximation_function="ratio",
cell_selection_preference=0.5,
answer_loss_cutoff=100,
max_num_rows=64,
max_num_columns=32,
average_logits_per_cell=True,
select_one_column=True,
allow_empty_column_selection=False,
init_cell_selection_weights_to_zero=True,
reset_position_index_per_cell=True,
disable_per_token_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.max_position_embeddings = max_position_embeddings
self.type_vocab_sizes = type_vocab_sizes
self.type_sequence_label_size = type_sequence_label_size
self.positive_weight = positive_weight
self.num_aggregation_labels = num_aggregation_labels
self.num_labels = num_labels
self.aggregation_loss_importance = aggregation_loss_importance
self.use_answer_as_supervision = use_answer_as_supervision
self.answer_loss_importance = answer_loss_importance
self.use_normalized_answer_loss = use_normalized_answer_loss
self.huber_loss_delta = huber_loss_delta
self.temperature = temperature
self.agg_temperature = agg_temperature
self.use_gumbel_for_cells = use_gumbel_for_cells
self.use_gumbel_for_agg = use_gumbel_for_agg
self.average_approximation_function = average_approximation_function
self.cell_selection_preference = cell_selection_preference
self.answer_loss_cutoff = answer_loss_cutoff
self.max_num_rows = max_num_rows
self.max_num_columns = max_num_columns
self.average_logits_per_cell = average_logits_per_cell
self.select_one_column = select_one_column
self.allow_empty_column_selection = allow_empty_column_selection
self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
self.reset_position_index_per_cell = reset_position_index_per_cell
self.disable_per_token_loss = disable_per_token_loss
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).to(torch_device)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length]).to(torch_device)
token_type_ids = []
for type_vocab_size in self.type_vocab_sizes:
token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size))
token_type_ids = torch.stack(token_type_ids, dim=2).to(torch_device)
sequence_labels = None
token_labels = None
labels = None
numeric_values = None
numeric_values_scale = None
float_answer = None
aggregation_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size).to(torch_device)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels).to(torch_device)
labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2).to(torch_device)
numeric_values = floats_tensor([self.batch_size, self.seq_length]).to(torch_device)
numeric_values_scale = floats_tensor([self.batch_size, self.seq_length]).to(torch_device)
float_answer = floats_tensor([self.batch_size]).to(torch_device)
aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels).to(torch_device)
config = self.get_config()
return (
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
)
def get_config(self):
return TapasConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_sizes=self.type_vocab_sizes,
initializer_range=self.initializer_range,
positive_weight=self.positive_weight,
num_aggregation_labels=self.num_aggregation_labels,
num_labels=self.num_labels,
aggregation_loss_importance=self.aggregation_loss_importance,
use_answer_as_supervision=self.use_answer_as_supervision,
answer_loss_importance=self.answer_loss_importance,
use_normalized_answer_loss=self.use_normalized_answer_loss,
huber_loss_delta=self.huber_loss_delta,
temperature=self.temperature,
agg_temperature=self.agg_temperature,
use_gumbel_for_cells=self.use_gumbel_for_cells,
use_gumbel_for_agg=self.use_gumbel_for_agg,
average_approximation_function=self.average_approximation_function,
cell_selection_preference=self.cell_selection_preference,
answer_loss_cutoff=self.answer_loss_cutoff,
max_num_rows=self.max_num_rows,
max_num_columns=self.max_num_columns,
average_logits_per_cell=self.average_logits_per_cell,
select_one_column=self.select_one_column,
allow_empty_column_selection=self.allow_empty_column_selection,
init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero,
reset_position_index_per_cell=self.reset_position_index_per_cell,
disable_per_token_loss=self.disable_per_token_loss,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TapasModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TapasForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
# inference: without aggregation head (SQA). Model only returns logits
sqa_config = copy.copy(config)
sqa_config.num_aggregation_labels = 0
sqa_config.use_answer_as_supervision = False
model = TapasForQuestionAnswering(config=sqa_config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits
model = TapasForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# training: can happen in 3 main ways
# case 1: conversational (SQA)
model = TapasForQuestionAnswering(config=sqa_config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# case 2: weak supervision for aggregation (WTQ)
model = TapasForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# case 3: strong supervision for aggregation (WikiSQL-supervised)
wikisql_config = copy.copy(config)
wikisql_config.use_answer_as_supervision = False
model = TapasForQuestionAnswering(config=wikisql_config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=labels,
aggregation_labels=aggregation_labels,
)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
def create_and_check_for_sequence_classification(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
config.num_labels = self.num_labels
model = TapasForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| TapasModelTester |
python | gevent__gevent | src/gevent/tests/test__server_pywsgi.py | {
"start": 1106,
"end": 2480
} | class ____(test__server.Settings):
ServerClass = pywsgi.WSGIServer
ServerSubClass = SimpleWSGIServer
close_socket_detected = True
restartable = False
close_socket_detected = False
@staticmethod
def assert500(inst):
with inst.makefile() as conn:
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
inst.assertTrue(result.startswith(internal_error_start),
(result, internal_error_start))
inst.assertTrue(result.endswith(internal_error_end),
(result, internal_error_end))
@staticmethod
def assert503(inst):
with inst.makefile() as conn:
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
inst.assertEqual(result, internal_error503)
@staticmethod
def assertPoolFull(inst):
with inst.assertRaises(socket.timeout):
inst.assertRequestSucceeded()
@staticmethod
def assertAcceptedConnectionError(inst):
with inst.makefile() as conn:
result = conn.read()
inst.assertFalse(result)
@staticmethod
def fill_default_server_args(inst, kwargs):
kwargs = test__server.Settings.fill_default_server_args(inst, kwargs)
kwargs.setdefault('log', pywsgi._NoopLog())
return kwargs
| Settings |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 14948,
"end": 14992
} | class ____(TestPool):
size = 10
| TestPool10 |
python | python__mypy | mypy/nodes.py | {
"start": 1514,
"end": 5870
} | class ____:
"""Base type for objects that are valid as error message locations."""
__slots__ = ("line", "column", "end_line", "end_column")
def __init__(self, line: int = -1, column: int = -1) -> None:
self.line = line
self.column = column
self.end_line: int | None = None
self.end_column: int | None = None
def set_line(
self,
target: Context | int,
column: int | None = None,
end_line: int | None = None,
end_column: int | None = None,
) -> None:
"""If target is a node, pull line (and column) information
into this node. If column is specified, this will override any column
information coming from a node.
"""
if isinstance(target, int):
self.line = target
else:
self.line = target.line
self.column = target.column
self.end_line = target.end_line
self.end_column = target.end_column
if column is not None:
self.column = column
if end_line is not None:
self.end_line = end_line
if end_column is not None:
self.end_column = end_column
if TYPE_CHECKING:
# break import cycle only needed for mypy
import mypy.types
T = TypeVar("T")
JsonDict: _TypeAlias = dict[str, Any]
# Symbol table node kinds
#
# TODO rename to use more descriptive names
LDEF: Final = 0
GDEF: Final = 1
MDEF: Final = 2
# Placeholder for a name imported via 'from ... import'. Second phase of
# semantic will replace this the actual imported reference. This is
# needed so that we can detect whether a name has been imported during
# XXX what?
UNBOUND_IMPORTED: Final = 3
# RevealExpr node kinds
REVEAL_TYPE: Final = 0
REVEAL_LOCALS: Final = 1
# Kinds of 'literal' expressions.
#
# Use the function mypy.literals.literal to calculate these.
#
# TODO: Can we make these less confusing?
LITERAL_YES: Final = 2 # Value of expression known statically
LITERAL_TYPE: Final = 1 # Type of expression can be narrowed (e.g. variable reference)
LITERAL_NO: Final = 0 # None of the above
node_kinds: Final = {LDEF: "Ldef", GDEF: "Gdef", MDEF: "Mdef", UNBOUND_IMPORTED: "UnboundImported"}
inverse_node_kinds: Final = {_kind: _name for _name, _kind in node_kinds.items()}
implicit_module_attrs: Final = {
"__name__": "__builtins__.str",
"__doc__": None, # depends on Python version, see semanal.py
"__path__": None, # depends on if the module is a package
"__file__": "__builtins__.str",
"__package__": "__builtins__.str",
"__annotations__": None, # dict[str, Any] bounded in add_implicit_module_attrs()
"__spec__": None, # importlib.machinery.ModuleSpec bounded in add_implicit_module_attrs()
}
# These aliases exist because built-in class objects are not subscriptable.
# For example `list[int]` fails at runtime. Instead List[int] should be used.
type_aliases: Final = {
"typing.List": "builtins.list",
"typing.Dict": "builtins.dict",
"typing.Set": "builtins.set",
"typing.FrozenSet": "builtins.frozenset",
"typing.ChainMap": "collections.ChainMap",
"typing.Counter": "collections.Counter",
"typing.DefaultDict": "collections.defaultdict",
"typing.Deque": "collections.deque",
"typing.OrderedDict": "collections.OrderedDict",
# HACK: a lie in lieu of actual support for PEP 675
"typing.LiteralString": "builtins.str",
}
# This keeps track of the oldest supported Python version where the corresponding
# alias source is available.
type_aliases_source_versions: Final = {"typing.LiteralString": (3, 11)}
# This keeps track of aliases in `typing_extensions`, which we treat specially.
typing_extensions_aliases: Final = {
# See: https://github.com/python/mypy/issues/11528
"typing_extensions.OrderedDict": "collections.OrderedDict",
# HACK: a lie in lieu of actual support for PEP 675
"typing_extensions.LiteralString": "builtins.str",
}
reverse_builtin_aliases: Final = {
"builtins.list": "typing.List",
"builtins.dict": "typing.Dict",
"builtins.set": "typing.Set",
"builtins.frozenset": "typing.FrozenSet",
}
RUNTIME_PROTOCOL_DECOS: Final = (
"typing.runtime_checkable",
"typing_extensions.runtime",
"typing_extensions.runtime_checkable",
)
LAMBDA_NAME: Final = "<lambda>"
| Context |
python | openai__openai-python | src/openai/types/responses/response_function_shell_call_output_content.py | {
"start": 341,
"end": 451
} | class ____(BaseModel):
type: Literal["timeout"]
"""The outcome type. Always `timeout`."""
| OutcomeTimeout |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/workflow_engine_action.py | {
"start": 1079,
"end": 5112
} | class ____(Serializer):
def get_alert_rule_trigger_id(self, action: Action) -> int | None:
"""
Fetches the alert rule trigger id for the detector trigger related to the given action
"""
action_filter_data_condition = DataCondition.objects.filter(
condition_group__in=Subquery(
DataConditionGroupAction.objects.filter(action=action).values("condition_group")
),
type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL,
condition_result=True,
)
detector_dcg = DetectorWorkflow.objects.filter(
workflow__in=Subquery(
WorkflowDataConditionGroup.objects.filter(
condition_group__in=Subquery(
action_filter_data_condition.values("condition_group")
)
).values("workflow")
)
).values("detector__workflow_condition_group")
detector_trigger = DataCondition.objects.get(
condition_result__in=Subquery(action_filter_data_condition.values("comparison")),
condition_group__in=detector_dcg,
)
try:
alert_rule_trigger_id = DataConditionAlertRuleTrigger.objects.values_list(
"alert_rule_trigger_id", flat=True
).get(data_condition=detector_trigger)
return alert_rule_trigger_id
except DataConditionAlertRuleTrigger.DoesNotExist:
# this data condition does not have an analog in the old system,
# but we need to return *something*
return get_fake_id_from_object_id(detector_trigger.id)
def serialize(
self, obj: Action, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser, **kwargs
) -> dict[str, Any]:
"""
Temporary serializer to take an Action and serialize it for the old metric alert rule endpoints
"""
from sentry.incidents.serializers import ACTION_TARGET_TYPE_TO_STRING
try:
aarta = ActionAlertRuleTriggerAction.objects.get(action=obj.id)
except ActionAlertRuleTriggerAction.DoesNotExist:
aarta = None
priority = obj.data.get("priority")
type_value = ActionService.get_value(obj.type)
target = MetricAlertRegistryHandler.target(obj)
target_type = obj.config.get("target_type")
target_identifier = obj.config.get("target_identifier")
target_display = obj.config.get("target_display")
sentry_app_id = None
sentry_app_config = None
if obj.type == Action.Type.SENTRY_APP.value:
sentry_app_id = int(obj.config.get("target_identifier"))
sentry_app_config = obj.data.get("settings")
result = {
"id": (
str(aarta.alert_rule_trigger_action_id)
if aarta is not None
else str(get_fake_id_from_object_id(obj.id))
),
"alertRuleTriggerId": str(self.get_alert_rule_trigger_id(obj)),
"type": obj.type,
"targetType": ACTION_TARGET_TYPE_TO_STRING[ActionTarget(target_type)],
"targetIdentifier": get_identifier_from_action(
type_value, str(target_identifier), target_display
),
"inputChannelId": get_input_channel_id(type_value, target_identifier),
"integrationId": obj.integration_id,
"sentryAppId": sentry_app_id,
"dateCreated": obj.date_added,
"desc": human_desc(
type_value,
target_type,
target_identifier,
target,
target_display,
target_identifier,
priority,
),
"priority": priority,
}
# Check if action is a Sentry App that has Alert Rule UI Component settings
if sentry_app_id and sentry_app_config:
result["settings"] = sentry_app_config
return result
| WorkflowEngineActionSerializer |
python | pytorch__pytorch | test/inductor/test_gpu_cpp_wrapper.py | {
"start": 1448,
"end": 2694
} | class ____(InductorTestCase):
device = GPU_TYPE
def test_aoti_debug_printer_works_on_constants(self):
batch_size = 32
seq_length = 50
hidden_size = 768
def test_fn():
inp = torch.randn(batch_size, seq_length, hidden_size, device=self.device)
weight = torch.randn(hidden_size, hidden_size, device=self.device)
matmul_output = inp @ weight
torch.nn.LayerNorm(hidden_size, device=self.device)(matmul_output)
return True
comp = torch.compile(
options={
"cpp_wrapper": True,
"aot_inductor.debug_intermediate_value_printer": "2",
}
)(test_fn)
comp()
def test_non_tensor_args_wrapped_on_cpu(self):
if not RUN_GPU:
self.skipTest("GPU not available")
def test_fn(x, s):
return (x + s).sum()
compiled = torch.compile(options={"cpp_wrapper": True})(test_fn)
x = torch.randn(4, device=self.device)
with torch.utils._device.DeviceContext(self.device):
_, code = test_torchinductor.run_and_get_cpp_code(compiled, x, 3)
self.assertIn("torch.tensor(arg, device='cpu')", code)
| TestGpuWrapper |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 823665,
"end": 825102
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"restored_custom_email_routings_count",
"restored_issue_assignments_count",
"restored_memberships",
"restored_memberships_count",
"restored_repositories_count",
"restored_repository_stars_count",
"restored_repository_watches_count",
)
restored_custom_email_routings_count = sgqlc.types.Field(
Int, graphql_name="restoredCustomEmailRoutingsCount"
)
restored_issue_assignments_count = sgqlc.types.Field(
Int, graphql_name="restoredIssueAssignmentsCount"
)
restored_memberships = sgqlc.types.Field(
sgqlc.types.list_of(
sgqlc.types.non_null("OrgRestoreMemberAuditEntryMembership")
),
graphql_name="restoredMemberships",
)
restored_memberships_count = sgqlc.types.Field(
Int, graphql_name="restoredMembershipsCount"
)
restored_repositories_count = sgqlc.types.Field(
Int, graphql_name="restoredRepositoriesCount"
)
restored_repository_stars_count = sgqlc.types.Field(
Int, graphql_name="restoredRepositoryStarsCount"
)
restored_repository_watches_count = sgqlc.types.Field(
Int, graphql_name="restoredRepositoryWatchesCount"
)
| OrgRestoreMemberAuditEntry |
python | wandb__wandb | wandb/sdk/artifacts/_generated/delete_aliases.py | {
"start": 180,
"end": 256
} | class ____(GQLResult):
result: Optional[DeleteAliasesResult]
| DeleteAliases |
python | walkccc__LeetCode | solutions/3094. Guess the Number Using Bitwise Questions II/3094-2.py | {
"start": 68,
"end": 290
} | class ____:
def findNumber(self) -> int:
return functools.reduce(lambda x, i: x | (1 << i)
if commonBits(1 << i) > commonBits(1 << i)
else x, range(31), 0)
| Solution |
python | django__django | tests/validation/models.py | {
"start": 4792,
"end": 5081
} | class ____(models.Model):
field = models.CharField(max_length=255)
class Meta:
required_db_features = {"supports_expression_indexes"}
constraints = [
models.UniqueConstraint(Lower("field"), name="func_lower_field_uq"),
]
| UniqueFuncConstraintModel |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py | {
"start": 14865,
"end": 17926
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeKnown(self):
# When drop_remainder=True, batch size can be inferred from the type spec.
dataset = dataset_ops.Dataset.range(32).batch(4, drop_remainder=True)
dataset = dataset_ops.Dataset.zip((dataset, dataset))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeKnownAndMismatched(self):
# Return -1 when different components have different batch sizes.
dataset = dataset_ops.Dataset.range(32)
dataset = dataset_ops.Dataset.zip((dataset.batch(4, drop_remainder=True),
dataset.batch(8, drop_remainder=True)))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeUnknown(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithPassthrough(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset.take(5)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithPassthroughInvalid(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset.map(lambda x: x + 1)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithZip(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset_ops.Dataset.zip((dataset, dataset))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithZipMismatched(self):
dataset = dataset_ops.Dataset.range(32)
dataset = dataset_ops.Dataset.zip((dataset.batch(4), dataset.batch(8)))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testNoneDataset(self):
# Some datasets, e.g. datasets with None tensors, have components without
# output shapes. Test that this doesn't break computing batch size logic.
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.map(lambda x: (x, None))
dataset = dataset.batch(4, drop_remainder=True)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
| ComputeBatchSizeTest |
python | sqlalchemy__sqlalchemy | test/sql/test_deprecations.py | {
"start": 1026,
"end": 1401
} | class ____(fixtures.TestBase):
def test_deprecate_tometadata(self):
m1 = MetaData()
t1 = Table("t", m1, Column("q", Integer))
with testing.expect_deprecated(
r"Table.tometadata\(\) is renamed to Table.to_metadata\(\)"
):
m2 = MetaData()
t2 = t1.tometadata(m2)
eq_(t2.name, "t")
| ToMetaDataTest |
python | skorch-dev__skorch | skorch/tests/test_probabilistic.py | {
"start": 21599,
"end": 24474
} | class ____(BaseProbabilisticTests):
"""Tests for GPBinaryClassifier."""
##########################
# constants and fixtures #
##########################
n_samples = 50
n_targets = 2
supports_predict_proba = True
supports_return_std = False
supports_return_cov = False
settable_params = {'gp__module__eps': 1e-5}
scoring = 'neg_mean_squared_error'
@pytest.fixture
def data(self):
X = np.linspace(-8, 8.01, self.n_samples).astype(np.float32)
y = (np.sin(X) + np.random.randn(len(X)) * 0.2 > 0).astype(np.int64)
return X, y
@pytest.fixture
def gp_cls(self):
from skorch.probabilistic import GPBinaryClassifier
return GPBinaryClassifier
@pytest.fixture
def module_cls(self):
return VariationalBinaryClassificationModule
@pytest.fixture
def gp(self, gp_cls, module_cls, data):
X, y = data
gpc = gp_cls(
module_cls,
module__inducing_points=torch.from_numpy(X[:10]),
likelihood=MyBernoulliLikelihood,
criterion=gpytorch.mlls.VariationalELBO,
criterion__num_data=int(0.8 * len(y)),
batch_size=24,
)
# we want to make sure batching is properly tested
assert gpc.batch_size < self.n_samples
return gpc
# Since GPyTorch v1.10, GPBinaryClassifier is the only estimator left that
# still has issues with pickling/deepcopying.
@pytest.mark.xfail(strict=True)
def test_pickling(self, gp_fit, data):
# Currently fails because of issues outside of our control, this test
# should alert us to when the issue has been fixed. Some issues have
# been fixed in https://github.com/cornellius-gp/gpytorch/pull/1336 but
# not all.
pickle.dumps(gp_fit)
def test_pickle_error_msg(self, gp_fit, data):
# Should eventually be replaced by a test that saves and loads the model
# using pickle and checks that the predictions are identical
msg = ("This GPyTorch model cannot be pickled. The reason is probably this:"
" https://github.com/pytorch/pytorch/issues/38137. "
"Try using 'dill' instead of 'pickle'.")
with pytest.raises(pickle.PicklingError, match=msg):
pickle.dumps(gp_fit)
def test_deepcopy(self, gp_fit, data):
# Should eventually be replaced by a test that saves and loads the model
# using deepcopy and checks that the predictions are identical
msg = ("This GPyTorch model cannot be pickled. The reason is probably this:"
" https://github.com/pytorch/pytorch/issues/38137. "
"Try using 'dill' instead of 'pickle'.")
with pytest.raises(pickle.PicklingError, match=msg):
copy.deepcopy(gp_fit) # doesn't raise
| TestGPBinaryClassifier |
python | python-attrs__attrs | typing-examples/mypy.py | {
"start": 587,
"end": 636
} | class ____:
x: list[int] = attr.ib()
@attr.s
| DD |
python | huggingface__transformers | src/transformers/models/idefics3/configuration_idefics3.py | {
"start": 829,
"end": 4889
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Idefics3VisionModel`]. It is used to instantiate a
Idefics3 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics3 model
[HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers.models.idefics3.modeling_idefics3 import Idefics3VisionTransformer
>>> from transformers.models.idefics3.configuration_idefics3 import Idefics3VisionConfig
>>> # Initializing a Idefics3VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics3VisionConfig()
>>> # Initializing a Idefics3VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics3VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "idefics3_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1152,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=16,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
| Idefics3VisionConfig |
python | TheAlgorithms__Python | data_structures/linked_list/floyds_cycle_detection.py | {
"start": 889,
"end": 4220
} | class ____:
"""
A class representing a singly linked list.
"""
head: Node | None = None
def __iter__(self) -> Iterator:
"""
Iterates through the linked list.
Returns:
Iterator: An iterator over the linked list.
Examples:
>>> linked_list = LinkedList()
>>> list(linked_list)
[]
>>> linked_list.add_node(1)
>>> tuple(linked_list)
(1,)
"""
visited = []
node = self.head
while node:
# Avoid infinite loop in there's a cycle
if node in visited:
return
visited.append(node)
yield node.data
node = node.next_node
def add_node(self, data: Any) -> None:
"""
Adds a new node to the end of the linked list.
Args:
data (Any): The data to be stored in the new node.
Examples:
>>> linked_list = LinkedList()
>>> linked_list.add_node(1)
>>> linked_list.add_node(2)
>>> linked_list.add_node(3)
>>> linked_list.add_node(4)
>>> tuple(linked_list)
(1, 2, 3, 4)
"""
new_node = Node(data)
if self.head is None:
self.head = new_node
return
current_node = self.head
while current_node.next_node is not None:
current_node = current_node.next_node
current_node.next_node = new_node
def detect_cycle(self) -> bool:
"""
Detects if there is a cycle in the linked list using
Floyd's cycle detection algorithm.
Returns:
bool: True if there is a cycle, False otherwise.
Examples:
>>> linked_list = LinkedList()
>>> linked_list.add_node(1)
>>> linked_list.add_node(2)
>>> linked_list.add_node(3)
>>> linked_list.add_node(4)
>>> linked_list.detect_cycle()
False
# Create a cycle in the linked list
>>> linked_list.head.next_node.next_node.next_node = linked_list.head.next_node
>>> linked_list.detect_cycle()
True
"""
if self.head is None:
return False
slow_pointer: Node | None = self.head
fast_pointer: Node | None = self.head
while fast_pointer is not None and fast_pointer.next_node is not None:
slow_pointer = slow_pointer.next_node if slow_pointer else None
fast_pointer = fast_pointer.next_node.next_node
if slow_pointer == fast_pointer:
return True
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
linked_list = LinkedList()
linked_list.add_node(1)
linked_list.add_node(2)
linked_list.add_node(3)
linked_list.add_node(4)
# Create a cycle in the linked list
# It first checks if the head, next_node, and next_node.next_node attributes of the
# linked list are not None to avoid any potential type errors.
if (
linked_list.head
and linked_list.head.next_node
and linked_list.head.next_node.next_node
):
linked_list.head.next_node.next_node.next_node = linked_list.head.next_node
has_cycle = linked_list.detect_cycle()
print(has_cycle) # Output: True
| LinkedList |
python | django__django | tests/model_fields/models.py | {
"start": 4269,
"end": 4340
} | class ____(models.Model):
bfield = models.BooleanField()
| BooleanModel |
python | pytorch__pytorch | torch/cuda/__init__.py | {
"start": 55711,
"end": 55926
} | class ____(_CudaLegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal()
return self._dtype
@classproperty
def _dtype(self):
return torch.uint8
| ByteStorage |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 61542,
"end": 64858
} | class ____(GoogleCloudBaseOperator):
"""
Gets a job trigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetDLPJobTriggerOperator`
:param job_trigger_id: The ID of the DLP job trigger to be read.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_trigger_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggerDetailsLink(),)
def __init__(
self,
*,
job_trigger_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
trigger = hook.get_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggerDetailsLink.persist(
context=context,
project_id=project_id,
trigger_name=self.job_trigger_id,
)
return JobTrigger.to_dict(trigger)
| CloudDLPGetDLPJobTriggerOperator |
python | django-extensions__django-extensions | django_extensions/management/commands/dumpscript.py | {
"start": 3301,
"end": 5790
} | class ____(BaseCommand):
help = "Dumps the data as a customised python script."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("appname", nargs="+")
parser.add_argument(
"--autofield",
action="store_false",
dest="skip_autofield",
default=True,
help="Include Autofields (like pk fields)",
)
@signalcommand
def handle(self, *args, **options):
app_labels = options["appname"]
# Get the models we want to export
models = get_models(app_labels)
# A dictionary is created to keep track of all the processed objects,
# so that foreign key references can be made using python variable names.
# This variable "context" will be passed around like the town bicycle.
context = {}
# Create a dumpscript object and let it format itself as a string
script = Script(
models=models,
context=context,
stdout=self.stdout,
stderr=self.stderr,
options=options,
)
self.stdout.write(str(script))
self.stdout.write("\n")
def get_models(app_labels):
"""
Get a list of models for the given app labels, with some exceptions.
TODO: If a required model is referenced, it should also be included.
Or at least discovered with a get_or_create() call.
"""
# These models are not to be outputted,
# e.g. because they can be generated automatically
# TODO: This should be "appname.modelname" string
EXCLUDED_MODELS = (ContentType,)
models = []
# If no app labels are given, return all
if not app_labels:
for app in apps.get_app_configs():
models += [
m
for m in apps.get_app_config(app.label).get_models()
if m not in EXCLUDED_MODELS
]
return models
# Get all relevant apps
for app_label in app_labels:
# If a specific model is mentioned, get only that model
if "." in app_label:
app_label, model_name = app_label.split(".", 1)
models.append(apps.get_model(app_label, model_name))
# Get all models for a given app
else:
models += [
m
for m in apps.get_app_config(app_label).get_models()
if m not in EXCLUDED_MODELS
]
return models
| Command |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_password.py | {
"start": 263,
"end": 4516
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-password"
method = "put"
def setUp(self) -> None:
self.user = self.create_user(email="a@example.com", is_managed=False, name="example name")
self.user.set_password("helloworld!")
self.user.save()
self.login_as(self.user)
def test_change_password(self) -> None:
old_password = self.user.password
self.get_success_response(
"me",
status_code=204,
**{
"password": "helloworld!",
"passwordNew": "testpassword",
"passwordVerify": "testpassword",
},
)
user = User.objects.get(id=self.user.id)
assert old_password != user.password
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
},
]
)
def test_password_too_short(self) -> None:
self.get_error_response(
"me",
status_code=400,
**{
"password": "helloworld!",
"passwordNew": "hi",
"passwordVerify": "hi",
},
)
def test_no_password(self) -> None:
self.get_error_response("me", status_code=400, **{"password": "helloworld!"})
self.get_error_response("me", status_code=400)
def test_require_current_password(self) -> None:
self.get_error_response(
"me",
status_code=400,
**{
"password": "wrongpassword",
"passwordNew": "testpassword",
"passwordVerify": "testpassword",
},
)
def test_verifies_mismatch_password(self) -> None:
self.get_error_response(
"me",
status_code=400,
**{
"password": "helloworld!",
"passwordNew": "testpassword",
"passwordVerify": "passworddoesntmatch",
},
)
def test_managed_unable_change_password(self) -> None:
user = self.create_user(email="new@example.com", is_managed=True)
self.login_as(user)
self.get_error_response(
user.id,
status_code=400,
**{"passwordNew": "newpassword", "passwordVerify": "newpassword"},
)
def test_unusable_password_unable_change_password(self) -> None:
user = self.create_user(email="new@example.com")
user.set_unusable_password()
user.save()
self.login_as(user)
self.get_error_response(
user.id,
status_code=400,
**{"passwordNew": "newpassword", "passwordVerify": "newpassword"},
)
def test_cannot_change_other_user_password(self) -> None:
user = self.create_user(email="new@example.com", is_superuser=False)
self.login_as(user)
self.get_error_response(
self.user.id,
status_code=403,
**{
"password": "helloworld!",
"passwordNew": "newpassword",
"passwordVerify": "newpassword",
},
)
def test_superuser_can_change_other_user_password(self) -> None:
user = self.create_user(email="new@example.com", is_superuser=True)
self.login_as(user, superuser=True)
self.get_success_response(
self.user.id,
status_code=204,
**{
"password": "helloworld!",
"passwordNew": "newpassword",
"passwordVerify": "newpassword",
},
)
@override_settings(SENTRY_SELF_HOSTED=False)
def test_rate_limit(self) -> None:
with freeze_time("2024-05-21"):
for _ in range(5):
self.test_require_current_password()
self.get_error_response(
"me",
status_code=429,
**{
"password": "wrongguess",
"passwordNew": "newpassword",
"passwordVerify": "newpassword",
},
)
| UserPasswordTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/properties.py | {
"start": 6074,
"end": 6127
} | class ____:
def foo(self) -> None:
...
| Base |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_queried_table_row_count_to_be.py | {
"start": 544,
"end": 5170
} | class ____(QueryExpectation):
"""Expect the expect the number of rows returned from a queried table to equal a specified value.
expect_queried_table_row_count_to_be is a \
[Query Expectation](https://docs.greatexpectations.io/docs/oss/guides/expectations/creating_custom_expectations/how_to_create_custom_query_expectations)
Args:
value (int): \
Expected number of returned rows
query (str): \
SQL query to be executed (default will perform a SELECT COUNT(*) on the table)
Keyword Args:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
"""
value: int
query: str = """
SELECT COUNT(*)
FROM {batch}
"""
metric_dependencies: ClassVar[Tuple[str, ...]] = ("query.table",)
success_keys: ClassVar[Tuple[str, ...]] = (
"value",
"query",
)
domain_keys: ClassVar[Tuple[str, ...]] = (
"batch_id",
"row_condition",
"condition_parser",
)
examples: ClassVar[List[dict]] = [
{
"data": [
{
"data": {
"col1": [1, 2, 2, 3, 4],
"col2": ["a", "a", "b", "b", "a"],
},
},
],
"suppress_test_for": ["snowflake"],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 5,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 2,
},
"out": {"success": False},
},
{
"title": "positive_test_static_data_asset",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 5,
"query": """
SELECT COUNT(*)
FROM test
""",
},
"out": {"success": True},
},
{
"title": "positive_test_row_condition",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"value": 2,
"row_condition": 'col("col1")==2',
"condition_parser": "great_expectations",
},
"out": {"success": True},
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["query-based"],
"contributors": ["@austiezr"],
}
def _validate(
self,
metrics: dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
) -> Union[ExpectationValidationResult, dict]:
configuration = self.configuration
metrics = convert_to_json_serializable(data=metrics)
query_result = list(metrics.get("query.table")[0].values())[0]
value = configuration["kwargs"].get("value")
success = query_result == value
return {
"success": success,
"result": {"observed_value": query_result},
}
if __name__ == "__main__":
ExpectQueriedTableRowCountToBe().print_diagnostic_checklist()
| ExpectQueriedTableRowCountToBe |
python | pypa__pip | src/pip/_vendor/packaging/markers.py | {
"start": 943,
"end": 1058
} | class ____(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
| InvalidMarker |
python | python__mypy | mypy/test/testsemanal.py | {
"start": 2547,
"end": 3510
} | class ____(DataSuite):
files = ["semanal-errors.test", "semanal-errors-python310.test"]
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_semanal_error(testcase)
def test_semanal_error(testcase: DataDrivenTestCase) -> None:
"""Perform a test case."""
try:
src = "\n".join(testcase.input)
res = build.build(
sources=[BuildSource("main", None, src)],
options=get_semanal_options(src, testcase),
alt_lib_path=test_temp_dir,
)
a = res.errors
except CompileError as e:
# Verify that there was a compile error and that the error messages
# are equivalent.
a = e.messages
if testcase.normalize_output:
a = normalize_error_messages(a)
assert_string_arrays_equal(
testcase.output, a, f"Invalid compiler output ({testcase.file}, line {testcase.line})"
)
# SymbolNode table export test cases
| SemAnalErrorSuite |
python | pypa__pipenv | pipenv/patched/pip/_internal/utils/misc.py | {
"start": 15953,
"end": 19116
} | class ____:
secret: str
redacted: str
def __repr__(self) -> str:
return f"<HiddenText {str(self)!r}>"
def __str__(self) -> str:
return self.redacted
# This is useful for testing.
def __eq__(self, other: Any) -> bool:
if type(self) is not type(other):
return False
# The string being used for redaction doesn't also have to match,
# just the raw, original string.
return self.secret == other.secret
def hide_value(value: str) -> HiddenText:
return HiddenText(value, redacted="****")
def hide_url(url: str) -> HiddenText:
redacted = redact_auth_from_url(url)
return HiddenText(url, redacted=redacted)
def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip",
f"pip{sys.version_info.major}",
f"pip{sys.version_info.major}.{sys.version_info.minor}",
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
raise CommandError(
"To modify pip, please run the following command:\n{}".format(
" ".join(new_command)
)
)
def check_externally_managed() -> None:
"""Check whether the current environment is externally managed.
If the ``EXTERNALLY-MANAGED`` config file is found, the current environment
is considered externally managed, and an ExternallyManagedEnvironment is
raised.
"""
if running_under_virtualenv():
return
marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED")
if not os.path.isfile(marker):
return
raise ExternallyManagedEnvironment.from_config(marker)
def is_console_interactive() -> bool:
"""Is this console interactive?"""
return sys.stdin is not None and sys.stdin.isatty()
def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]:
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
return h, length
def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]:
"""
Return paired elements.
For example:
s -> (s0, s1), (s2, s3), (s4, s5), ...
"""
iterable = iter(iterable)
return zip_longest(iterable, iterable)
def partition(
pred: Callable[[T], bool], iterable: Iterable[T]
) -> Tuple[Iterable[T], Iterable[T]]:
"""
Use a predicate to partition entries into false entries and true entries,
like
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
| HiddenText |
python | mlflow__mlflow | mlflow/utils/autologging_utils/client.py | {
"start": 1350,
"end": 1441
} | class ____(NamedTuple):
status: str | None
end_time: int | None
| _PendingSetTerminated |
python | numba__numba | numba/core/typeinfer.py | {
"start": 18329,
"end": 20786
} | class ____(object):
def __init__(self, target, value, dtype, index, loc):
self.target = target
self.value = value
self.dtype = dtype
self.index = index
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of typed-get-item at {loc}",
loc=self.loc):
typevars = typeinfer.typevars
idx_ty = typevars[self.index.name].get()
ty = typevars[self.value.name].get()
self.signature = Signature(self.dtype, ty + idx_ty, None)
typeinfer.add_type(self.target, self.dtype, loc=self.loc)
def get_call_signature(self):
return self.signature
def fold_arg_vars(typevars, args, vararg, kws):
"""
Fold and resolve the argument variables of a function call.
"""
# Fetch all argument types, bail if any is unknown
n_pos_args = len(args)
kwds = [kw for (kw, var) in kws]
argtypes = [typevars[a.name] for a in args]
argtypes += [typevars[var.name] for (kw, var) in kws]
if vararg is not None:
argtypes.append(typevars[vararg.name])
if not all(a.defined for a in argtypes):
return
args = tuple(a.getone() for a in argtypes)
pos_args = args[:n_pos_args]
if vararg is not None:
errmsg = "*args in function call should be a tuple, got %s"
# Handle constant literal used for `*args`
if isinstance(args[-1], types.Literal):
const_val = args[-1].literal_value
# Is the constant value a tuple?
if not isinstance(const_val, tuple):
raise TypingError(errmsg % (args[-1],))
# Append the elements in the const tuple to the positional args
pos_args += const_val
# Handle non-constant
elif not isinstance(args[-1], types.BaseTuple):
# Unsuitable for *args
# (Python is more lenient and accepts all iterables)
raise TypingError(errmsg % (args[-1],))
else:
# Append the elements in the tuple to the positional args
pos_args += args[-1].types
# Drop the last arg
args = args[:-1]
kw_args = dict(zip(kwds, args[n_pos_args:]))
return pos_args, kw_args
def _is_array_not_precise(arrty):
"""Check type is array and it is not precise
"""
return isinstance(arrty, types.Array) and not arrty.is_precise()
| TypedGetItemConstraint |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-transport-all-individuals.py | {
"start": 200,
"end": 1999
} | class ____(object):
def minTime(self, n, k, m, time, mul):
"""
:type n: int
:type k: int
:type m: int
:type time: List[int]
:type mul: List[float]
:rtype: float
"""
def update(d, r, s, mask, submask):
t = lookup[submask]*mul[s]
nr = r^1
ns = (s+int(t))%m
new_mask = mask^submask
nd = d+t
if dist[nr][ns][new_mask] > nd:
dist[nr][ns][new_mask] = nd
heapq.heappush(min_heap, (nd, nr, ns, new_mask))
popcount = [0]*(1<<n) # for better performance
for i in xrange(1, (1<<n)):
popcount[i] = popcount[i>>1]+(i&1)
lookup = [max(time[i] for i in xrange(n) if mask&(1<<i)) if mask else 0 for mask in xrange(1<<n)] # Time: O(n * 2^n)
INF = float("inf")
dist = [[[INF]*(1<<n) for _ in xrange(m)] for _ in xrange(2)]
dist[0][0][(1<<n)-1] = 0.0
min_heap = [(0.0, 0, 0, (1<<n)-1)]
while min_heap:
d, r, s, mask = heapq.heappop(min_heap) # Total Time: O((n * m * 2^n + m * 3^n) * log(n * m * 2^n + m * 3^n))
if d != dist[r][s][mask]:
continue
if mask == 0:
assert(r == 1)
return d
if r == 0:
submask = mask
while submask: # Total Time: O(m * 3^n)
if popcount[submask] <= k:
update(d, r, s, mask, submask)
submask = (submask-1)&mask
else:
for i in xrange(n): # Total Time: O(n * m * 2^n)
if mask&(1<<i):
continue
update(d, r, s, mask, 1<<i)
return -1.0
| Solution |
python | tensorflow__tensorflow | tensorflow/python/autograph/utils/misc_test.py | {
"start": 1081,
"end": 2347
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def test_alias_single_tensor(self):
a = constant(1)
new_a = misc.alias_tensors(a)
self.assertFalse(new_a is a)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
@test_util.run_deprecated_v1
def test_alias_tensors(self):
a = constant(1)
v = Variable(2)
s = 'a'
l = [1, 2, 3]
new_a, new_v, new_s, new_l = misc.alias_tensors(a, v, s, l)
self.assertFalse(new_a is a)
self.assertTrue(new_v is v)
self.assertTrue(new_s is s)
self.assertTrue(new_l is l)
with self.cached_session() as sess:
self.assertEqual(1, self.evaluate(new_a))
def test_get_range_len(self):
get_range_as_graph = def_function.function(misc.get_range_len)
test_range = [(i, constant_op.constant(i)) for i in range(-3, 3)]
results = []
for i, ti in test_range:
for j, tj in test_range:
for k, tk in test_range:
if k == 0:
continue
results.append(((i, j, k), get_range_as_graph(ti, tj, tk)))
for (i, j, k), result_tensor in results:
self.assertEqual(
len(list(range(i, j, k))), self.evaluate(result_tensor))
if __name__ == '__main__':
test.main()
| MiscTest |
python | pytorch__pytorch | torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py | {
"start": 1120,
"end": 1618
} | class ____(torch.nn.Module):
def __init__(self, spec=None, group=None, init_rrefs=True) -> None:
super().__init__()
if spec is not None:
self.sharded_tensor1 = sharded_tensor.rand(
spec, 10, 20, process_group=group, init_rrefs=init_rrefs
)
else:
self.sharded_tensor1 = None
self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
self.submodule = MyShardedModel2(spec, group, init_rrefs)
| MyShardedModel1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess6.py | {
"start": 248,
"end": 627
} | class ____(Generic[_T]):
def __init__(self, type: type[_T]) -> None: ...
@overload
def __get__(self: "Column[_T]", instance: None, type: Any) -> "Column[_T]": ...
@overload
def __get__(self: "Column[_T]", instance: ParentA, type: Any) -> _T: ...
def __get__(
self, instance: ParentA | None, type: Any
) -> _T | None | "Column[_T]": ...
| Column |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail_baseConfig.py | {
"start": 4720,
"end": 4865
} | class ____(FrozenModel):
class Config:
frozen = False
inheriting2 = InheritingModel2(x=1, y='c')
inheriting2.y = 'd'
| InheritingModel2 |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/properties.py | {
"start": 3127,
"end": 3538
} | class ____:
def __init__(self, y):
self.underlying = 0
self.x = y
@property
def x(self) -> int:
return self.underlying
@x.setter
def x(self, x_value) -> None:
self.underlying = x_value
def property_setter_in_constructor():
obj = PropertySetterInConstructor(_test_source())
_test_sink(obj.x)
_test_sink(obj.underlying)
| PropertySetterInConstructor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image12.py | {
"start": 315,
"end": 986
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image12.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header("&L&G", {"image_left": self.image_dir + "black_300e.png"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | crytic__slither | slither/solc_parsing/declarations/structure_top_level.py | {
"start": 566,
"end": 2610
} | class ____(CallerContextExpression): # pylint: disable=too-few-public-methods
"""
Structure class
"""
# elems = [(type, name)]
def __init__( # pylint: disable=too-many-arguments
self,
st: StructureTopLevel,
struct: Dict,
slither_parser: "SlitherCompilationUnitSolc",
) -> None:
if slither_parser.is_compact_ast:
name = struct["name"]
attributes = struct
else:
name = struct["attributes"][slither_parser.get_key()]
attributes = struct["attributes"]
if "canonicalName" in attributes:
canonicalName = attributes["canonicalName"]
else:
canonicalName = name
children = struct["members"] if "members" in struct else struct.get("children", [])
self._structure = st
st.name = name
st.canonical_name = canonicalName
self._slither_parser = slither_parser
self._elemsNotParsed = children
def analyze(self) -> None:
for elem_to_parse in self._elemsNotParsed:
elem = StructureVariable()
elem.set_structure(self._structure)
elem.set_offset(elem_to_parse["src"], self._slither_parser.compilation_unit)
elem_parser = StructureVariableSolc(elem, elem_to_parse)
elem_parser.analyze(self)
self._structure.elems[elem.name] = elem
self._structure.add_elem_in_order(elem.name)
self._elemsNotParsed = []
@property
def is_compact_ast(self) -> bool:
return self._slither_parser.is_compact_ast
@property
def compilation_unit(self) -> SlitherCompilationUnit:
return self._slither_parser.compilation_unit
def get_key(self) -> str:
return self._slither_parser.get_key()
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self._slither_parser
@property
def underlying_structure(self) -> StructureTopLevel:
return self._structure
| StructureTopLevelSolc |
python | walkccc__LeetCode | solutions/1647. Minimum Deletions to Make Character Frequencies Unique/1647.py | {
"start": 0,
"end": 298
} | class ____:
def minDeletions(self, s: str) -> int:
ans = 0
count = collections.Counter(s)
usedFreq = set()
for freq in count.values():
while freq > 0 and freq in usedFreq:
freq -= 1 # Delete ('a' + i).
ans += 1
usedFreq.add(freq)
return ans
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 353200,
"end": 381346
} | class ____(Response):
"""
Response of frames.get_snippets_for_dataview endpoint.
:param frames: List of frames for the requested page. The amount of frames
returned is not guaranteed to be equal to the requested page size.
:type frames: Sequence[Snippet]
:param frames_total: The total number of first frames per unique URI
:type frames_total: int
:param pages_total: The total number of pages
:type pages_total: int
:param page: The currently requested page
:type page: int
:param paging_id: Paging session id to be provided in order to get the next
page of frames
:type paging_id: str
:param total_in_versions: The total number of snippets for the dataview
versions (without applying the dataview filters)
:type total_in_versions: int
:param versions_updated: The list of versions whose frames were updated since
the creation of the paging iterator. If a version was updated after the
iteration was started you may not receive all the updated snippets. To make
sure that you see all the snippets after the update please reset the paging id
(this may result in a different total amount of pages for the same page size).
:type versions_updated: Sequence[str]
"""
_service = "frames"
_action = "get_snippets_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"augmentation": {
"properties": {
"arguments": {
"additionalProperties": True,
"description": "Arguments dictionary, passed to custom augmentations.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class (see global definitions)",
"type": ["string", "null"],
},
"params": {
"description": (
"Transform parameters, an array ot 3 randomly generated values. Fixed values are passed in"
" case of affine reflect augmentation."
),
"items": {"type": "number"},
"type": ["array", "null"],
},
"strength": {
"description": "Transform strength. Required for pixel transforms.",
"type": ["number", "null"],
},
"trans_mat": {
"description": "Transform matrix (list of lists). Required for affine transforms.",
"items": {"items": {"type": "number"}, "type": "array"},
"type": ["array", "null"],
},
"type": {
"description": "Augmentation type (see global definitions)",
"type": ["string", "null"],
},
},
"type": "object",
},
"dataset_version": {
"properties": {
"id": {"description": "Dataset id", "type": ["string", "null"]},
"version": {
"description": "Dataset version id",
"type": ["string", "null"],
},
},
"type": "object",
},
"frame": {
"properties": {
"augmentation": {
"description": "List of augmentations",
"items": {"$ref": "#/definitions/augmentation"},
"type": ["array", "null"],
},
"blob": {
"description": "Raw data (blob) for the frame",
"type": ["string", "null"],
},
"context_id": {
"description": (
"Context ID. Used for the default frames sorting. If not set then it is filled from the uri"
" of the first source."
),
"type": ["string", "null"],
},
"dataset": {
"description": "Frame's dataset version",
"oneOf": [
{"$ref": "#/definitions/dataset_version"},
{"type": "null"},
],
},
"id": {"description": "Frame id", "type": ["string", "null"]},
"is_key_frame": {
"description": "Is this a key frame (only applicable in frames who'se src is a video)",
"type": ["boolean", "null"],
},
"key_frame": {
"description": "ID of the key frame that this frame belongs to",
"type": ["string", "null"],
},
"label_rule_counts": {
"additionalProperties": True,
"description": "The number of matched roi per lable rule",
"type": ["object", "null"],
},
"labels_size": {
"description": "Number of labels returned",
"type": ["integer", "null"],
},
"meta": {
"additionalProperties": True,
"description": (
"Additional metadata dictionary for the frame. Please note that using this field"
" effectively defines a schema (dictionary structure and types used as values) - frames"
" within the same dataset cannot use conflicting schemas for this field (see documentation"
" for more details)."
),
"type": ["object", "null"],
},
"meta_blob": {
"additionalProperties": True,
"description": (
"Non searchable metadata dictionary for the frame. The fields in this object cannot be"
" searched by and are not added to the frame schema"
),
"type": ["object", "null"],
},
"new_ver": {
"description": "Newer version of this frame, if asked to merge",
"oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}],
},
"rois": {
"description": "Frame regions of interest",
"items": {"$ref": "#/definitions/roi"},
"type": ["array", "null"],
},
"rule_name": {
"description": (
"Name of the filtering rule according to which this frame was provided (if applicable)"
),
"type": ["string", "null"],
},
"saved": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"saved_in_version": {
"description": "Last version this frame was saved in (version ID)",
"type": ["string", "null"],
},
"sources": {
"description": "Sources of this frame",
"items": {"$ref": "#/definitions/source"},
"type": ["array", "null"],
},
"timestamp": {
"description": (
"Frame's offset in milliseconds, used primarily for video content. Used for the default"
" frames sorting as the secondary key (with the primary key being 'context_id'). For"
" images, this value should typically be 0. If not set, value is filled from the timestamp"
" of the first source. We recommend using this field only in cases concerning the default"
" sorting behavior."
),
"type": ["integer", "null"],
},
"updated": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"updated_in_version": {
"description": "Last version this frame was updated in (version ID)",
"type": ["string", "null"],
},
"video_gop": {
"description": (
"Video encoding GOP value for the source of this frame. Only valid for video frames"
),
"type": ["number", "null"],
},
},
"type": "object",
},
"mask": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"id": {
"description": "unique ID (in this frame)",
"type": ["string", "null"],
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
"preview": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
"roi": {
"properties": {
"area": {
"description": "ROI area (not used)",
"type": ["integer", "null"],
},
"confidence": {
"description": "ROI confidence",
"type": ["number", "null"],
},
"id": {"description": "ROI id", "type": ["string", "null"]},
"label": {
"description": "ROI labels",
"items": {"type": "string"},
"type": ["array", "null"],
},
"label_num": {
"description": (
"Label number according to the specified labels mapping Used only when ROI is returned as"
" part of a task's frame."
),
"type": ["integer", "null"],
},
"mask": {
"description": "Mask info for this ROI",
"oneOf": [{"$ref": "#/definitions/roi_mask"}, {"type": "null"}],
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the roi",
"type": ["object", "null"],
},
"poly": {
"description": "ROI polygon (x0, y0, ..., xn, yn)",
"items": {"type": "number"},
"type": ["array", "null"],
},
"sources": {
"description": "Sources that this ROI belongs to",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"roi_mask": {
"properties": {
"id": {"description": "Mask ID", "type": "string"},
"value": {
"description": "Mask value",
"items": {"type": "integer"},
"type": "array",
},
},
"required": ["id", "value"],
"type": "object",
},
"snippet": {
"properties": {
"augmentation": {
"description": "List of augmentations",
"items": {"$ref": "#/definitions/augmentation"},
"type": ["array", "null"],
},
"blob": {
"description": "Raw data (blob) for the frame",
"type": ["string", "null"],
},
"context_id": {
"description": (
"Context ID. Used for the default frames sorting. If not set then it is filled from the uri"
" of the first source."
),
"type": ["string", "null"],
},
"dataset": {
"description": "Frame's dataset version",
"oneOf": [
{"$ref": "#/definitions/dataset_version"},
{"type": "null"},
],
},
"id": {"description": "Frame id", "type": ["string", "null"]},
"is_key_frame": {
"description": "Is this a key frame (only applicable in frames who'se src is a video)",
"type": ["boolean", "null"],
},
"key_frame": {
"description": "ID of the key frame that this frame belongs to",
"type": ["string", "null"],
},
"label_rule_counts": {
"additionalProperties": True,
"description": "The number of matched roi per lable rule",
"type": ["object", "null"],
},
"labels_size": {
"description": "Number of labels returned",
"type": ["integer", "null"],
},
"meta": {
"additionalProperties": True,
"description": (
"Additional metadata dictionary for the frame. Please note that using this field"
" effectively defines a schema (dictionary structure and types used as values) - frames"
" within the same dataset cannot use conflicting schemas for this field (see documentation"
" for more details)."
),
"type": ["object", "null"],
},
"meta_blob": {
"additionalProperties": True,
"description": (
"Non searchable metadata dictionary for the frame. The fields in this object cannot be"
" searched by and are not added to the frame schema"
),
"type": ["object", "null"],
},
"new_ver": {
"description": "Newer version of this frame, if asked to merge",
"oneOf": [{"$ref": "#/definitions/frame"}, {"type": "null"}],
},
"num_frames": {
"description": "Number of frames represented by this snippet",
"type": ["integer", "null"],
},
"rois": {
"description": "Frame regions of interest",
"items": {"$ref": "#/definitions/roi"},
"type": ["array", "null"],
},
"rule_name": {
"description": (
"Name of the filtering rule according to which this frame was provided (if applicable)"
),
"type": ["string", "null"],
},
"saved": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"saved_in_version": {
"description": "Last version this frame was saved in (version ID)",
"type": ["string", "null"],
},
"sources": {
"description": "Sources of this frame",
"items": {"$ref": "#/definitions/source"},
"type": ["array", "null"],
},
"timestamp": {
"description": (
"Frame's offset in milliseconds, used primarily for video content. Used for the default"
" frames sorting as the secondary key (with the primary key being 'context_id'). For"
" images, this value should typically be 0. If not set, value is filled from the timestamp"
" of the first source. We recommend using this field only in cases concerning the default"
" sorting behavior."
),
"type": ["integer", "null"],
},
"updated": {
"description": "Last time frame was saved (timestamp)",
"type": ["integer", "null"],
},
"updated_in_version": {
"description": "Last version this frame was updated in (version ID)",
"type": ["string", "null"],
},
"video_gop": {
"description": (
"Video encoding GOP value for the source of this frame. Only valid for video frames"
),
"type": ["number", "null"],
},
},
"type": "object",
},
"source": {
"properties": {
"content_type": {
"description": "Content type (e.g. 'image/jpeg', 'image/png')",
"type": ["string", "null"],
},
"height": {
"description": "Height in pixels",
"type": ["integer", "null"],
},
"id": {
"description": "unique ID (in this frame)",
"type": ["string", "null"],
},
"masks": {
"items": {"$ref": "#/definitions/mask"},
"type": ["array", "null"],
},
"meta": {
"additionalProperties": True,
"description": "Additional metadata dictionary for the source",
"type": ["object", "null"],
},
"preview": {
"oneOf": [{"$ref": "#/definitions/preview"}, {"type": "null"}]
},
"timestamp": {
"default": 0,
"description": (
"Timestamp in the source data (for video content. for images, this value should be 0)"
),
"type": ["integer", "null"],
},
"uri": {"description": "Data URI", "type": ["string", "null"]},
"width": {
"description": "Width in pixels",
"type": ["integer", "null"],
},
},
"type": "object",
},
},
"properties": {
"frames": {
"description": (
"List of frames for the requested page. The amount of frames returned is not guaranteed to be equal"
" to the requested page size."
),
"items": {"$ref": "#/definitions/snippet"},
"type": ["array", "null"],
},
"frames_total": {
"description": "The total number of first frames per unique URI",
"type": ["integer", "null"],
},
"page": {
"description": "The currently requested page",
"type": ["integer", "null"],
},
"pages_total": {
"description": "The total number of pages",
"type": ["integer", "null"],
},
"paging_id": {
"description": "Paging session id to be provided in order to get the next page of frames",
"type": ["string", "null"],
},
"total_in_versions": {
"description": (
"The total number of snippets for the dataview versions (without applying the dataview filters)"
),
"type": ["integer", "null"],
},
"versions_updated": {
"description": (
"The list of versions whose frames were updated since the creation of the paging iterator. If a"
" version was updated after the iteration was started you may not receive all the updated snippets."
" To make sure that you see all the snippets after the update please reset the paging id (this may"
" result in a different total amount of pages for the same page size)."
),
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
frames=None,
frames_total=None,
pages_total=None,
page=None,
paging_id=None,
total_in_versions=None,
versions_updated=None,
**kwargs
):
super(GetSnippetsForDataviewResponse, self).__init__(**kwargs)
self.frames = frames
self.frames_total = frames_total
self.pages_total = pages_total
self.page = page
self.paging_id = paging_id
self.total_in_versions = total_in_versions
self.versions_updated = versions_updated
@schema_property("frames")
def frames(self):
return self._property_frames
@frames.setter
def frames(self, value):
if value is None:
self._property_frames = None
return
self.assert_isinstance(value, "frames", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Snippet.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "frames", Snippet, is_array=True)
self._property_frames = value
@schema_property("frames_total")
def frames_total(self):
return self._property_frames_total
@frames_total.setter
def frames_total(self, value):
if value is None:
self._property_frames_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "frames_total", six.integer_types)
self._property_frames_total = value
@schema_property("pages_total")
def pages_total(self):
return self._property_pages_total
@pages_total.setter
def pages_total(self, value):
if value is None:
self._property_pages_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "pages_total", six.integer_types)
self._property_pages_total = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("paging_id")
def paging_id(self):
return self._property_paging_id
@paging_id.setter
def paging_id(self, value):
if value is None:
self._property_paging_id = None
return
self.assert_isinstance(value, "paging_id", six.string_types)
self._property_paging_id = value
@schema_property("total_in_versions")
def total_in_versions(self):
return self._property_total_in_versions
@total_in_versions.setter
def total_in_versions(self, value):
if value is None:
self._property_total_in_versions = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total_in_versions", six.integer_types)
self._property_total_in_versions = value
@schema_property("versions_updated")
def versions_updated(self):
return self._property_versions_updated
@versions_updated.setter
def versions_updated(self, value):
if value is None:
self._property_versions_updated = None
return
self.assert_isinstance(value, "versions_updated", (list, tuple))
self.assert_isinstance(
value, "versions_updated", six.string_types, is_array=True
)
self._property_versions_updated = value
| GetSnippetsForDataviewResponse |
python | pyqtgraph__pyqtgraph | pyqtgraph/jupyter/GraphicsView.py | {
"start": 5847,
"end": 6722
} | class ____(GraphicsView):
"""jupyter_rfb analogue of
:class:`PlotWidget <pyqtgraph.PlotWidget>`."""
def __init__(self, **kwds):
super().__init__(**kwds)
plotItem = graphicsItems.PlotItem.PlotItem(enableMenu=False)
self.gfxView.setCentralItem(plotItem)
connect_viewbox_redraw(plotItem.getViewBox(), self.request_draw)
self.plotItem = plotItem
def getPlotItem(self):
return self.plotItem
def __getattr__(self, attr):
# kernel crashes if we don't skip attributes starting with '_'
if attr.startswith('_'):
return super().__getattr__(attr)
# implicitly wrap methods from plotItem
if hasattr(self.plotItem, attr):
m = getattr(self.plotItem, attr)
if hasattr(m, '__call__'):
return m
raise AttributeError(attr)
| PlotWidget |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_settings.py | {
"start": 2651,
"end": 5677
} | class ____:
"""Test PrefectDbtSettings initialization and default values."""
def test_initializes_with_defaults(
self, mock_find_profiles_dir: Path, mock_get_current_settings: Mock
):
"""Test that settings initialize with sensible defaults."""
settings = PrefectDbtSettings(profiles_dir=mock_find_profiles_dir)
assert settings.profiles_dir == mock_find_profiles_dir
assert settings.project_dir == Path.cwd()
assert settings.target_path == Path("target")
assert settings.log_level == EventLevel.INFO
def test_accepts_custom_values(self, temp_profiles_dir: Path):
"""Test that settings accept and use custom values."""
custom_project = Path("/custom/project")
custom_target = Path("custom_target")
custom_log_level = EventLevel.DEBUG
settings = PrefectDbtSettings(
profiles_dir=temp_profiles_dir,
project_dir=custom_project,
target_path=custom_target,
log_level=custom_log_level,
)
assert settings.profiles_dir == temp_profiles_dir
assert settings.project_dir == custom_project
assert settings.target_path == custom_target
assert settings.log_level == custom_log_level
def test_uses_prefect_logging_level_when_not_set(
self, mock_get_current_settings_error: Mock
):
"""Test that settings use Prefect's logging level when not explicitly set."""
settings = PrefectDbtSettings()
assert settings.log_level == EventLevel.ERROR
def test_environment_variable_loading(self, monkeypatch: pytest.MonkeyPatch):
"""Test that settings load from environment variables."""
monkeypatch.setenv("DBT_PROJECT_DIR", "/env/project")
monkeypatch.setenv("DBT_TARGET_PATH", "env_target")
monkeypatch.setenv("DBT_LOG_LEVEL", "debug")
settings = PrefectDbtSettings()
assert settings.project_dir == Path("/env/project")
assert settings.target_path == Path("env_target")
assert settings.log_level == EventLevel.DEBUG
def test_environment_variables_override_defaults(
self, monkeypatch: pytest.MonkeyPatch, temp_profiles_dir: Path
):
"""Test that environment variables override default values."""
monkeypatch.setenv("DBT_PROFILES_DIR", str(temp_profiles_dir))
monkeypatch.setenv("DBT_PROJECT_DIR", "/env/override")
settings = PrefectDbtSettings()
assert settings.profiles_dir == temp_profiles_dir
assert settings.project_dir == Path("/env/override")
def test_invalid_log_level_raises_error(self, monkeypatch: pytest.MonkeyPatch):
"""Test that invalid log level raises validation error."""
monkeypatch.setenv("DBT_LOG_LEVEL", "invalid_level")
with pytest.raises(
ValueError,
match="Input should be 'debug', 'test', 'info', 'warn' or 'error'",
):
PrefectDbtSettings()
| TestPrefectDbtSettingsInitialization |
python | scipy__scipy | scipy/sparse/linalg/_matfuncs.py | {
"start": 4642,
"end": 5607
} | class ____(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
| MatrixPowerOperator |
python | kamyu104__LeetCode-Solutions | Python/single-number.py | {
"start": 76,
"end": 229
} | class ____(object):
"""
:type nums: List[int]
:rtype: int
"""
def singleNumber(self, A):
return reduce(operator.xor, A)
| Solution |
python | huggingface__transformers | src/transformers/models/llava/modeling_llava.py | {
"start": 1349,
"end": 2335
} | class ____(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Llava causal language model (or autoregressive) outputs.
"""
)
| LlavaModelOutputWithPast |
python | pytorch__pytorch | test/test_ops.py | {
"start": 78482,
"end": 90698
} | class ____(TestCase):
# Checks if the operator (if it is composite) is written to support most
# backends and Tensor subclasses. See "CompositeImplicitAutograd Compliance"
# in aten/src/ATen/native/README.md for more details
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_operator(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
composite_compliance.check_with_mode(op, args, kwargs, self.assertEqual)
composite_compliance.check_all_permutations(
op, args, kwargs, self.assertEqual
)
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
def test_backward(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_backward_formula(
op.get_op(),
args,
kwargs,
sample.output_process_fn_grad,
op.gradcheck_wrapper,
self.assertEqual,
)
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_forward_ad(self, device, dtype, op):
if torch.float not in op.supported_backward_dtypes(device):
raise unittest.SkipTest("Does not support autograd")
if not op.supports_forward_ad:
raise unittest.SkipTest("Does not support forward_ad")
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_forward_ad_formula(
op.get_op(), args, kwargs, op.gradcheck_wrapper, self.assertEqual
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_cow_input(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=op.supports_autograd)
def is_strided_tensor(arg):
return torch.is_tensor(arg) and arg.layout == torch.strided
def check_ignore_materialize(idx_or_kw, allow_list):
return (allow_list is not None) and (idx_or_kw in allow_list)
def check_cow_input(
arg,
arg_copy,
arg_raw,
idx_or_kw,
backward_or_forward="forward",
supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_forward,
allow_list=op.allow_cow_input_materialize_forward,
):
arg_name = (
f"Argument {idx_or_kw}"
if isinstance(idx_or_kw, int)
else f"Keyword argument '{idx_or_kw}'"
) + f" during {backward_or_forward} call"
if is_strided_tensor(arg):
self.assertTrue(
torch._C._is_cow_tensor(arg_raw),
msg=(
f"{arg_name} raw input should remain COW, but it "
"unexpectedly materialized."
),
)
is_cow = torch._C._is_cow_tensor(arg)
if supports_cow_input_no_materialize and not check_ignore_materialize(
idx_or_kw, allow_list
):
self.assertTrue(
is_cow,
msg=(
f"{arg_name} unexpectedly materializes. "
f"Either set `supports_cow_input_no_materialize_{backward_or_forward}=False` "
"in this operation's OpInfo, add the arg to the OpInfo's "
f"`allow_cow_input_materialize_{backward_or_forward}` list, or change the "
"implementation to avoid materialization."
),
)
if is_cow:
self.assertTrue(
torch.allclose(arg, arg_copy, rtol=0, atol=0, equal_nan=True),
msg=(
f"{arg_name} avoided materialization, "
"but the operation mutated its data."
),
)
else:
self.assertTrue(
torch.allclose(
arg_raw, arg_copy, rtol=0, atol=0, equal_nan=True
),
msg=(
f"{arg_name} materialized, which is allowed in this "
"case, but the COW input data was mutated, which is "
"not allowed."
),
)
for sample in samples:
args_raw = [sample.input] + list(sample.args)
kwargs_raw = sample.kwargs
args_copy = []
args = []
kwargs_copy = {}
kwargs = {}
# Convert strided tensor inputs to COW tensors and make copies of
# all inputs
for arg in args_raw:
if is_strided_tensor(arg):
args_copy.append(arg.detach().clone())
args.append(torch._lazy_clone(arg))
else:
if torch.is_tensor(arg):
args_copy.append(arg.detach().clone())
else:
args_copy.append(copy.deepcopy(arg))
args.append(arg)
for kw, arg in kwargs_raw.items():
if is_strided_tensor(arg):
kwargs_copy[kw] = arg.detach().clone()
kwargs[kw] = torch._lazy_clone(arg)
else:
if torch.is_tensor(arg):
kwargs_copy[kw] = arg.detach().clone()
else:
kwargs_copy[kw] = copy.deepcopy(arg)
kwargs[kw] = arg
leaf_tensors = composite_compliance.gather_leaf_tensors(args, kwargs)
# Call forward op
results_raw = op.get_op()(*args, **kwargs)
# Check that COW inputs remain COW after the forward op is executed
for idx, arg in enumerate(args):
check_cow_input(arg, args_copy[idx], args_raw[idx], idx)
for kw, arg in kwargs.items():
check_cow_input(arg, kwargs_copy[kw], kwargs_raw[kw], kw)
# Call backward op if it is supported. This part of the test is
# based on `composite_compliance.check_backward_formula`
if (
op.supports_autograd
and len(leaf_tensors) > 0
and not op.skip_cow_input_backward
):
if sample.output_process_fn_grad is not None:
results_raw = sample.output_process_fn_grad(results_raw)
leaf_results = pytree.tree_leaves(results_raw)
results = [
r
for r in leaf_results
if isinstance(r, torch.Tensor) and r.requires_grad
]
all_results_strided = all(
is_strided_tensor(result) for result in results
)
# Only test backward if the results are strided tensors
if all_results_strided:
output_grads_raw = [
torch.ones(r.shape, device=r.device, dtype=r.dtype)
for r in results
]
output_grads_copy = []
output_grads = []
# Convert output grads to COW tensors and make copies
for output_grad in output_grads_raw:
output_grads_copy.append(output_grad.detach().clone())
output_grads.append(torch._lazy_clone(output_grad))
torch.autograd.grad(
results,
leaf_tensors,
output_grads,
allow_unused=True,
retain_graph=True,
)
# Check that COW inputs remain COW after the backward op is executed
for idx, arg in enumerate(args):
check_cow_input(
arg,
args_copy[idx],
args_raw[idx],
idx,
backward_or_forward="backward",
supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
allow_list=op.allow_cow_input_materialize_backward,
)
# Check that COW inputs remain COW after the backward op is executed
for idx, output_grad in enumerate(output_grads):
check_cow_input(
output_grad,
output_grads_copy[idx],
output_grads_raw[idx],
f"output grad {idx}",
backward_or_forward="backward",
supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
allow_list=op.allow_cow_input_materialize_backward,
)
@ops(op_db, allowed_dtypes=(torch.float,))
def test_view_replay(self, device, dtype, op):
def _assert_match_metadata(a, b):
self.assertEqual(a.size(), b.size())
self.assertEqual(a.stride(), b.stride())
self.assertEqual(a.storage_offset(), b.storage_offset())
self.assertEqual(a.device, b.device)
self.assertEqual(a.dtype, b.dtype)
# ensure view replay is enabled
with torch.autograd._force_original_view_tracking(True):
for sample in op.sample_inputs(device, dtype, requires_grad=False):
inp = sample.input
outs = op(inp, *sample.args, **sample.kwargs)
if not isinstance(outs, (tuple, list)):
outs = [outs]
# for all outputs that are views of the input, we should be able to replay the
# forward and reverse views via a functioning view_func() / rev_view_func().
for out in outs:
if not (
isinstance(out, torch.Tensor)
and out._is_view()
and out._base is inp
):
continue
# forward view_func
new_inp = inp.clone()
_assert_match_metadata(new_inp, inp)
new_out = out._view_func_unsafe(new_inp)
_assert_match_metadata(new_out, out)
self.assertEqual(new_out, out)
# reverse view_func
new_out = out.detach()
new_inp = out._rev_view_func_unsafe(new_out)
_assert_match_metadata(new_inp, inp)
self.assertTrue(new_inp._is_view())
self.assertTrue(new_inp._base is new_out)
@unMarkDynamoStrictTest
| TestCompositeCompliance |
python | wandb__wandb | wandb/old/summary.py | {
"start": 11399,
"end": 12136
} | class ____(Summary):
def __init__(self, run):
super().__init__(run)
self._fname = os.path.join(run.dir, filenames.SUMMARY_FNAME)
self.load()
def load(self):
try:
with open(self._fname) as f:
self._json_dict = json.load(f)
except (OSError, ValueError):
self._json_dict = {}
def _write(self, commit=False):
# TODO: we just ignore commit to ensure backward capability
with open(self._fname, "w") as f:
f.write(util.json_dumps_safer(self._json_dict))
f.write("\n")
f.flush()
os.fsync(f.fileno())
if self._h5:
self._h5.close()
self._h5 = None
| FileSummary |
python | getsentry__sentry | src/sentry/models/apitoken.py | {
"start": 1630,
"end": 1782
} | class ____(Exception):
"""the secret you are trying to read is read-once and cannot be accessed directly again"""
pass
| PlaintextSecretAlreadyRead |
python | walkccc__LeetCode | solutions/3369. Design an Array Statistics Tracker/3369.py | {
"start": 42,
"end": 1203
} | class ____:
def __init__(self):
self.q = collections.deque()
self.count = collections.Counter()
self.sortedList = SortedList()
self.modeMaxHeap = [] # (frequency, number)
self.sum = 0
def addNumber(self, number: int) -> None:
self.q.append(number)
self.count[number] += 1
self.sortedList.add(number)
heapq.heappush(self.modeMaxHeap, (-self.count[number], number))
self.sum += number
def removeFirstAddedNumber(self) -> None:
number = self.q.popleft()
self.count[number] -= 1
self.sortedList.remove(number)
# Note: No need to update the heap now; we'll clean up stale entries when
# getting the mode.
self.sum -= number
def getMean(self) -> int:
return self.sum // len(self.q)
def getMedian(self) -> int:
return self.sortedList[len(self.sortedList) // 2]
def getMode(self) -> int:
# Removes stale heap entries where frequency no longer matches.
while self.modeMaxHeap:
frequency = -self.modeMaxHeap[0][0]
number = self.modeMaxHeap[0][1]
if self.count[number] == frequency:
return number
heapq.heappop(self.modeMaxHeap)
| StatisticsTracker |
python | fsspec__filesystem_spec | fsspec/implementations/http.py | {
"start": 26892,
"end": 30675
} | class ____(AbstractAsyncStreamedFile):
def __init__(
self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs
):
self.url = url
self.session = session
self.r = None
if mode != "rb":
raise ValueError
self.details = {"name": url, "size": None}
self.kwargs = kwargs
super().__init__(fs=fs, path=url, mode=mode, cache_type="none")
self.size = size
async def read(self, num=-1):
if self.r is None:
r = await self.session.get(
self.fs.encode_url(self.url), **self.kwargs
).__aenter__()
self.fs._raise_not_found_for_status(r, self.url)
self.r = r
out = await self.r.content.read(num)
self.loc += len(out)
return out
async def close(self):
if self.r is not None:
self.r.close()
self.r = None
await super().close()
async def get_range(session, url, start, end, file=None, **kwargs):
# explicit get a range when we know it must be safe
kwargs = kwargs.copy()
headers = kwargs.pop("headers", {}).copy()
headers["Range"] = f"bytes={start}-{end - 1}"
r = await session.get(url, headers=headers, **kwargs)
r.raise_for_status()
async with r:
out = await r.read()
if file:
with open(file, "r+b") as f: # noqa: ASYNC230
f.seek(start)
f.write(out)
else:
return out
async def _file_info(url, session, size_policy="head", **kwargs):
"""Call HEAD on the server to get details about the file (size/checksum etc.)
Default operation is to explicitly allow redirects and use encoding
'identity' (no compression) to get the true size of the target.
"""
logger.debug("Retrieve file size for %s", url)
kwargs = kwargs.copy()
ar = kwargs.pop("allow_redirects", True)
head = kwargs.get("headers", {}).copy()
head["Accept-Encoding"] = "identity"
kwargs["headers"] = head
info = {}
if size_policy == "head":
r = await session.head(url, allow_redirects=ar, **kwargs)
elif size_policy == "get":
r = await session.get(url, allow_redirects=ar, **kwargs)
else:
raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
async with r:
r.raise_for_status()
if "Content-Length" in r.headers:
# Some servers may choose to ignore Accept-Encoding and return
# compressed content, in which case the returned size is unreliable.
if "Content-Encoding" not in r.headers or r.headers["Content-Encoding"] in [
"identity",
"",
]:
info["size"] = int(r.headers["Content-Length"])
elif "Content-Range" in r.headers:
info["size"] = int(r.headers["Content-Range"].split("/")[1])
if "Content-Type" in r.headers:
info["mimetype"] = r.headers["Content-Type"].partition(";")[0]
if r.headers.get("Accept-Ranges") == "none":
# Some servers may explicitly discourage partial content requests, but
# the lack of "Accept-Ranges" does not always indicate they would fail
info["partial"] = False
info["url"] = str(r.url)
for checksum_field in ["ETag", "Content-MD5", "Digest", "Last-Modified"]:
if r.headers.get(checksum_field):
info[checksum_field] = r.headers[checksum_field]
return info
async def _file_size(url, session=None, *args, **kwargs):
if session is None:
session = await get_client()
info = await _file_info(url, session=session, *args, **kwargs)
return info.get("size")
file_size = sync_wrapper(_file_size)
| AsyncStreamFile |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_batch.py | {
"start": 10084,
"end": 12890
} | class ____(GoogleCloudBaseOperator):
"""
List Cloud Batch tasks for a given job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param job_name: Required. The name of the job for which to list tasks.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param filter: The filter based on which to list the jobs. If left empty, all the jobs are listed.
:param group_name: The name of the group that owns the task. By default, it's `group0`.
:param limit: The number of tasks to list.
If left empty, all the tasks matching the filter will be returned.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "region", "job_name", "gcp_conn_id", "impersonation_chain", "group_name")
def __init__(
self,
project_id: str,
region: str,
job_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
group_name: str = "group0",
filter: str | None = None,
limit: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.job_name = job_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.group_name = group_name
self.filter = filter
self.limit = limit
if limit is not None and limit < 0:
raise AirflowException("The limit for the list jobs request should be greater or equal to zero")
def execute(self, context: Context):
hook: CloudBatchHook = CloudBatchHook(self.gcp_conn_id, self.impersonation_chain)
tasks_list = hook.list_tasks(
region=self.region,
project_id=self.project_id,
job_name=self.job_name,
group_name=self.group_name,
filter=self.filter,
limit=self.limit,
)
return [Task.to_dict(task) for task in tasks_list]
| CloudBatchListTasksOperator |
python | sympy__sympy | sympy/logic/boolalg.py | {
"start": 12392,
"end": 14451
} | class ____(BooleanAtom, metaclass=Singleton):
"""
SymPy version of ``False``, a singleton that can be accessed via ``S.false``.
This is the SymPy version of ``False``, for use in the logic module. The
primary advantage of using ``false`` instead of ``False`` is that shorthand
Boolean operations like ``~`` and ``>>`` will work as expected on this class,
whereas with ``False`` they act bitwise on 0. Functions in the logic module
will return this class when they evaluate to false.
Notes
======
See the notes section in :py:class:`sympy.logic.boolalg.BooleanTrue`
Examples
========
>>> from sympy import sympify, true, false, Or
>>> sympify(False)
False
>>> _ is False, _ is false
(False, True)
>>> Or(true, false)
True
>>> _ is true
True
Python operators give a boolean result for false but a
bitwise result for False
>>> ~false, ~False # doctest: +SKIP
(True, -1)
>>> false >> false, False >> False
(True, 0)
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __bool__(self):
return False
def __hash__(self):
return hash(False)
def __eq__(self, other):
if other is True:
return False
if other is False:
return True
return super().__eq__(other)
@property
def negated(self):
return true
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet
"""
return S.EmptySet
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
_sympy_converter[bool] = lambda x: true if x else false
| BooleanFalse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/scheduler.py | {
"start": 916,
"end": 1026
} | class ____(DagsterSchedulerError):
"""Errors raised when fetching a schedule."""
| DagsterScheduleDoesNotExist |
python | pypa__pip | src/pip/_vendor/rich/style.py | {
"start": 984,
"end": 26136
} | class ____:
"""A terminal style.
A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
as bold, italic etc. The attributes have 3 states: they can either be on
(``True``), off (``False``), or not set (``None``).
Args:
color (Union[Color, str], optional): Color of terminal text. Defaults to None.
bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
bold (bool, optional): Enable bold text. Defaults to None.
dim (bool, optional): Enable dim text. Defaults to None.
italic (bool, optional): Enable italic text. Defaults to None.
underline (bool, optional): Enable underlined text. Defaults to None.
blink (bool, optional): Enabled blinking text. Defaults to None.
blink2 (bool, optional): Enable fast blinking text. Defaults to None.
reverse (bool, optional): Enabled reverse text. Defaults to None.
conceal (bool, optional): Enable concealed text. Defaults to None.
strike (bool, optional): Enable strikethrough text. Defaults to None.
underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
frame (bool, optional): Enable framed text. Defaults to None.
encircle (bool, optional): Enable encircled text. Defaults to None.
overline (bool, optional): Enable overlined text. Defaults to None.
link (str, link): Link URL. Defaults to None.
"""
_color: Optional[Color]
_bgcolor: Optional[Color]
_attributes: int
_set_attributes: int
_hash: Optional[int]
_null: bool
_meta: Optional[bytes]
__slots__ = [
"_color",
"_bgcolor",
"_attributes",
"_set_attributes",
"_link",
"_link_id",
"_ansi",
"_style_definition",
"_hash",
"_null",
"_meta",
]
# maps bits on to SGR parameter
_style_map = {
0: "1",
1: "2",
2: "3",
3: "4",
4: "5",
5: "6",
6: "7",
7: "8",
8: "9",
9: "21",
10: "51",
11: "52",
12: "53",
}
STYLE_ATTRIBUTES = {
"dim": "dim",
"d": "dim",
"bold": "bold",
"b": "bold",
"italic": "italic",
"i": "italic",
"underline": "underline",
"u": "underline",
"blink": "blink",
"blink2": "blink2",
"reverse": "reverse",
"r": "reverse",
"conceal": "conceal",
"c": "conceal",
"strike": "strike",
"s": "strike",
"underline2": "underline2",
"uu": "underline2",
"frame": "frame",
"encircle": "encircle",
"overline": "overline",
"o": "overline",
}
def __init__(
self,
*,
color: Optional[Union[Color, str]] = None,
bgcolor: Optional[Union[Color, str]] = None,
bold: Optional[bool] = None,
dim: Optional[bool] = None,
italic: Optional[bool] = None,
underline: Optional[bool] = None,
blink: Optional[bool] = None,
blink2: Optional[bool] = None,
reverse: Optional[bool] = None,
conceal: Optional[bool] = None,
strike: Optional[bool] = None,
underline2: Optional[bool] = None,
frame: Optional[bool] = None,
encircle: Optional[bool] = None,
overline: Optional[bool] = None,
link: Optional[str] = None,
meta: Optional[Dict[str, Any]] = None,
):
self._ansi: Optional[str] = None
self._style_definition: Optional[str] = None
def _make_color(color: Union[Color, str]) -> Color:
return color if isinstance(color, Color) else Color.parse(color)
self._color = None if color is None else _make_color(color)
self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
self._set_attributes = sum(
(
bold is not None,
dim is not None and 2,
italic is not None and 4,
underline is not None and 8,
blink is not None and 16,
blink2 is not None and 32,
reverse is not None and 64,
conceal is not None and 128,
strike is not None and 256,
underline2 is not None and 512,
frame is not None and 1024,
encircle is not None and 2048,
overline is not None and 4096,
)
)
self._attributes = (
sum(
(
bold and 1 or 0,
dim and 2 or 0,
italic and 4 or 0,
underline and 8 or 0,
blink and 16 or 0,
blink2 and 32 or 0,
reverse and 64 or 0,
conceal and 128 or 0,
strike and 256 or 0,
underline2 and 512 or 0,
frame and 1024 or 0,
encircle and 2048 or 0,
overline and 4096 or 0,
)
)
if self._set_attributes
else 0
)
self._link = link
self._meta = None if meta is None else dumps(meta)
self._link_id = (
f"{randint(0, 999999)}{hash(self._meta)}" if (link or meta) else ""
)
self._hash: Optional[int] = None
self._null = not (self._set_attributes or color or bgcolor or link or meta)
@classmethod
def null(cls) -> "Style":
"""Create an 'null' style, equivalent to Style(), but more performant."""
return NULL_STYLE
@classmethod
def from_color(
cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
) -> "Style":
"""Create a new style with colors and no attributes.
Returns:
color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
"""
style: Style = cls.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = color
style._bgcolor = bgcolor
style._set_attributes = 0
style._attributes = 0
style._link = None
style._link_id = ""
style._meta = None
style._null = not (color or bgcolor)
style._hash = None
return style
@classmethod
def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
"""Create a new style with meta data.
Returns:
meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
"""
style: Style = cls.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = None
style._bgcolor = None
style._set_attributes = 0
style._attributes = 0
style._link = None
style._meta = dumps(meta)
style._link_id = f"{randint(0, 999999)}{hash(style._meta)}"
style._hash = None
style._null = not (meta)
return style
@classmethod
def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
"""Create a blank style with meta information.
Example:
style = Style.on(click=self.on_click)
Args:
meta (Optional[Dict[str, Any]], optional): An optional dict of meta information.
**handlers (Any): Keyword arguments are translated in to handlers.
Returns:
Style: A Style with meta information attached.
"""
meta = {} if meta is None else meta
meta.update({f"@{key}": value for key, value in handlers.items()})
return cls.from_meta(meta)
bold = _Bit(0)
dim = _Bit(1)
italic = _Bit(2)
underline = _Bit(3)
blink = _Bit(4)
blink2 = _Bit(5)
reverse = _Bit(6)
conceal = _Bit(7)
strike = _Bit(8)
underline2 = _Bit(9)
frame = _Bit(10)
encircle = _Bit(11)
overline = _Bit(12)
@property
def link_id(self) -> str:
"""Get a link id, used in ansi code for links."""
return self._link_id
def __str__(self) -> str:
"""Re-generate style definition from attributes."""
if self._style_definition is None:
attributes: List[str] = []
append = attributes.append
bits = self._set_attributes
if bits & 0b0000000001111:
if bits & 1:
append("bold" if self.bold else "not bold")
if bits & (1 << 1):
append("dim" if self.dim else "not dim")
if bits & (1 << 2):
append("italic" if self.italic else "not italic")
if bits & (1 << 3):
append("underline" if self.underline else "not underline")
if bits & 0b0000111110000:
if bits & (1 << 4):
append("blink" if self.blink else "not blink")
if bits & (1 << 5):
append("blink2" if self.blink2 else "not blink2")
if bits & (1 << 6):
append("reverse" if self.reverse else "not reverse")
if bits & (1 << 7):
append("conceal" if self.conceal else "not conceal")
if bits & (1 << 8):
append("strike" if self.strike else "not strike")
if bits & 0b1111000000000:
if bits & (1 << 9):
append("underline2" if self.underline2 else "not underline2")
if bits & (1 << 10):
append("frame" if self.frame else "not frame")
if bits & (1 << 11):
append("encircle" if self.encircle else "not encircle")
if bits & (1 << 12):
append("overline" if self.overline else "not overline")
if self._color is not None:
append(self._color.name)
if self._bgcolor is not None:
append("on")
append(self._bgcolor.name)
if self._link:
append("link")
append(self._link)
self._style_definition = " ".join(attributes) or "none"
return self._style_definition
def __bool__(self) -> bool:
"""A Style is false if it has no attributes, colors, or links."""
return not self._null
def _make_ansi_codes(self, color_system: ColorSystem) -> str:
"""Generate ANSI codes for this style.
Args:
color_system (ColorSystem): Color system.
Returns:
str: String containing codes.
"""
if self._ansi is None:
sgr: List[str] = []
append = sgr.append
_style_map = self._style_map
attributes = self._attributes & self._set_attributes
if attributes:
if attributes & 1:
append(_style_map[0])
if attributes & 2:
append(_style_map[1])
if attributes & 4:
append(_style_map[2])
if attributes & 8:
append(_style_map[3])
if attributes & 0b0000111110000:
for bit in range(4, 9):
if attributes & (1 << bit):
append(_style_map[bit])
if attributes & 0b1111000000000:
for bit in range(9, 13):
if attributes & (1 << bit):
append(_style_map[bit])
if self._color is not None:
sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
if self._bgcolor is not None:
sgr.extend(
self._bgcolor.downgrade(color_system).get_ansi_codes(
foreground=False
)
)
self._ansi = ";".join(sgr)
return self._ansi
@classmethod
@lru_cache(maxsize=1024)
def normalize(cls, style: str) -> str:
"""Normalize a style definition so that styles with the same effect have the same string
representation.
Args:
style (str): A style definition.
Returns:
str: Normal form of style definition.
"""
try:
return str(cls.parse(style))
except errors.StyleSyntaxError:
return style.strip().lower()
@classmethod
def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
"""Pick first non-None style."""
for value in values:
if value is not None:
return value
raise ValueError("expected at least one non-None style")
def __rich_repr__(self) -> Result:
yield "color", self.color, None
yield "bgcolor", self.bgcolor, None
yield "bold", self.bold, None,
yield "dim", self.dim, None,
yield "italic", self.italic, None
yield "underline", self.underline, None,
yield "blink", self.blink, None
yield "blink2", self.blink2, None
yield "reverse", self.reverse, None
yield "conceal", self.conceal, None
yield "strike", self.strike, None
yield "underline2", self.underline2, None
yield "frame", self.frame, None
yield "encircle", self.encircle, None
yield "link", self.link, None
if self._meta:
yield "meta", self.meta
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return self.__hash__() == other.__hash__()
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return self.__hash__() != other.__hash__()
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
self._hash = hash(_hash_getter(self))
return self._hash
@property
def color(self) -> Optional[Color]:
"""The foreground color or None if it is not set."""
return self._color
@property
def bgcolor(self) -> Optional[Color]:
"""The background color or None if it is not set."""
return self._bgcolor
@property
def link(self) -> Optional[str]:
"""Link text, if set."""
return self._link
@property
def transparent_background(self) -> bool:
"""Check if the style specified a transparent background."""
return self.bgcolor is None or self.bgcolor.is_default
@property
def background_style(self) -> "Style":
"""A Style with background only."""
return Style(bgcolor=self.bgcolor)
@property
def meta(self) -> Dict[str, Any]:
"""Get meta information (can not be changed after construction)."""
return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
@property
def without_color(self) -> "Style":
"""Get a copy of the style with color removed."""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = None
style._bgcolor = None
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = self._link
style._link_id = f"{randint(0, 999999)}" if self._link else ""
style._null = False
style._meta = None
style._hash = None
return style
@classmethod
@lru_cache(maxsize=4096)
def parse(cls, style_definition: str) -> "Style":
"""Parse a style definition.
Args:
style_definition (str): A string containing a style.
Raises:
errors.StyleSyntaxError: If the style definition syntax is invalid.
Returns:
`Style`: A Style instance.
"""
if style_definition.strip() == "none" or not style_definition:
return cls.null()
STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
color: Optional[str] = None
bgcolor: Optional[str] = None
attributes: Dict[str, Optional[Any]] = {}
link: Optional[str] = None
words = iter(style_definition.split())
for original_word in words:
word = original_word.lower()
if word == "on":
word = next(words, "")
if not word:
raise errors.StyleSyntaxError("color expected after 'on'")
try:
Color.parse(word)
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word!r} as background color; {error}"
) from None
bgcolor = word
elif word == "not":
word = next(words, "")
attribute = STYLE_ATTRIBUTES.get(word)
if attribute is None:
raise errors.StyleSyntaxError(
f"expected style attribute after 'not', found {word!r}"
)
attributes[attribute] = False
elif word == "link":
word = next(words, "")
if not word:
raise errors.StyleSyntaxError("URL expected after 'link'")
link = word
elif word in STYLE_ATTRIBUTES:
attributes[STYLE_ATTRIBUTES[word]] = True
else:
try:
Color.parse(word)
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word!r} as color; {error}"
) from None
color = word
style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
return style
@lru_cache(maxsize=1024)
def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
"""Get a CSS style rule."""
theme = theme or DEFAULT_TERMINAL_THEME
css: List[str] = []
append = css.append
color = self.color
bgcolor = self.bgcolor
if self.reverse:
color, bgcolor = bgcolor, color
if self.dim:
foreground_color = (
theme.foreground_color if color is None else color.get_truecolor(theme)
)
color = Color.from_triplet(
blend_rgb(foreground_color, theme.background_color, 0.5)
)
if color is not None:
theme_color = color.get_truecolor(theme)
append(f"color: {theme_color.hex}")
append(f"text-decoration-color: {theme_color.hex}")
if bgcolor is not None:
theme_color = bgcolor.get_truecolor(theme, foreground=False)
append(f"background-color: {theme_color.hex}")
if self.bold:
append("font-weight: bold")
if self.italic:
append("font-style: italic")
if self.underline:
append("text-decoration: underline")
if self.strike:
append("text-decoration: line-through")
if self.overline:
append("text-decoration: overline")
return "; ".join(css)
@classmethod
def combine(cls, styles: Iterable["Style"]) -> "Style":
"""Combine styles and get result.
Args:
styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
iter_styles = iter(styles)
return sum(iter_styles, next(iter_styles))
@classmethod
def chain(cls, *styles: "Style") -> "Style":
"""Combine styles from positional argument in to a single style.
Args:
*styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
iter_styles = iter(styles)
return sum(iter_styles, next(iter_styles))
def copy(self) -> "Style":
"""Get a copy of this style.
Returns:
Style: A new Style instance with identical attributes.
"""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = self._link
style._link_id = f"{randint(0, 999999)}" if self._link else ""
style._hash = self._hash
style._null = False
style._meta = self._meta
return style
@lru_cache(maxsize=128)
def clear_meta_and_links(self) -> "Style":
"""Get a copy of this style with link and meta information removed.
Returns:
Style: New style object.
"""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = None
style._link_id = ""
style._hash = None
style._null = False
style._meta = None
return style
def update_link(self, link: Optional[str] = None) -> "Style":
"""Get a copy with a different value for link.
Args:
link (str, optional): New value for link. Defaults to None.
Returns:
Style: A new Style instance.
"""
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = link
style._link_id = f"{randint(0, 999999)}" if link else ""
style._hash = None
style._null = False
style._meta = self._meta
return style
def render(
self,
text: str = "",
*,
color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
legacy_windows: bool = False,
) -> str:
"""Render the ANSI codes for the style.
Args:
text (str, optional): A string to style. Defaults to "".
color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
Returns:
str: A string containing ANSI style codes.
"""
if not text or color_system is None:
return text
attrs = self._ansi or self._make_ansi_codes(color_system)
rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
if self._link and not legacy_windows:
rendered = (
f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
)
return rendered
def test(self, text: Optional[str] = None) -> None:
"""Write text with style directly to terminal.
This method is for testing purposes only.
Args:
text (Optional[str], optional): Text to style or None for style name.
"""
text = text or str(self)
sys.stdout.write(f"{self.render(text)}\n")
@lru_cache(maxsize=1024)
def _add(self, style: Optional["Style"]) -> "Style":
if style is None or style._null:
return self
if self._null:
return style
new_style: Style = self.__new__(Style)
new_style._ansi = None
new_style._style_definition = None
new_style._color = style._color or self._color
new_style._bgcolor = style._bgcolor or self._bgcolor
new_style._attributes = (self._attributes & ~style._set_attributes) | (
style._attributes & style._set_attributes
)
new_style._set_attributes = self._set_attributes | style._set_attributes
new_style._link = style._link or self._link
new_style._link_id = style._link_id or self._link_id
new_style._null = style._null
if self._meta and style._meta:
new_style._meta = dumps({**self.meta, **style.meta})
else:
new_style._meta = self._meta or style._meta
new_style._hash = None
return new_style
def __add__(self, style: Optional["Style"]) -> "Style":
combined_style = self._add(style)
return combined_style.copy() if combined_style.link else combined_style
NULL_STYLE = Style()
| Style |
python | pypa__hatch | backend/src/hatchling/version/core.py | {
"start": 342,
"end": 2153
} | class ____:
def __init__(self, root: str, relative_path: str) -> None:
self.__relative_path = relative_path
self.__path = os.path.normpath(os.path.join(root, relative_path))
self.__cached_read_data: tuple | None = None
def read(self, *, pattern: str | bool) -> str:
if not os.path.isfile(self.__path):
message = f"file does not exist: {self.__relative_path}"
raise OSError(message)
with open(self.__path, encoding="utf-8") as f:
contents = f.read()
if not pattern or pattern is True:
pattern = DEFAULT_PATTERN
match = re.search(pattern, contents, flags=re.MULTILINE)
if not match:
message = f"unable to parse the version from the file: {self.__relative_path}"
raise ValueError(message)
groups = match.groupdict()
if "version" not in groups:
message = "no group named `version` was defined in the pattern"
raise ValueError(message)
self.__cached_read_data = groups["version"], contents, match.span("version")
return self.__cached_read_data[0]
def set_version(self, version: str) -> None:
_old_version, file_contents, (start, end) = self.__cached_read_data # type: ignore[misc]
with open(self.__path, "w", encoding="utf-8") as f:
f.write(f"{file_contents[:start]}{version}{file_contents[end:]}")
def write(self, version: str, template: str = DEFAULT_TEMPLATE) -> None:
template = template or DEFAULT_TEMPLATE
parent_dir = os.path.dirname(self.__path)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
with open(self.__path, "w", encoding="utf-8") as f:
f.write(template.format(version=version))
| VersionFile |
python | ApeWorX__ape | src/ape_ethereum/ecosystem.py | {
"start": 12781,
"end": 58520
} | class ____(EcosystemAPI):
# NOTE: `default_transaction_type` should be overridden
# if the chain doesn't support EIP-1559.
fee_token_symbol: str = "ETH"
@property
def config(self) -> EthereumConfig:
return cast(EthereumConfig, super().config)
@property
def default_transaction_type(self) -> TransactionType:
if provider := self.network_manager.active_provider:
# Check connected network first.
networks_to_check = [provider.network.name, self.default_network_name]
else:
networks_to_check = [self.default_network_name]
for name in networks_to_check:
network = self.get_network(name)
ecosystem_config = network.ecosystem_config
ecosystem_default = ecosystem_config.get(
"default_transaction_type", DEFAULT_TRANSACTION_TYPE
)
result: int = network.config.get("default_transaction_type", ecosystem_default)
return TransactionType(result)
return TransactionType(DEFAULT_TRANSACTION_TYPE)
@classmethod
def decode_address(cls, raw_address: RawAddress) -> AddressType:
return to_checksum_address(HexBytes(raw_address)[-20:].rjust(20, b"\x00"))
@classmethod
def encode_address(cls, address: AddressType) -> RawAddress:
return f"{address}"
def decode_transaction_type(self, transaction_type_id: Any) -> type["TransactionAPI"]:
if isinstance(transaction_type_id, TransactionType):
tx_type = transaction_type_id
elif isinstance(transaction_type_id, int):
tx_type = TransactionType(transaction_type_id)
else:
# Using hex or alike.
tx_type = self.conversion_manager.convert(transaction_type_id, int)
if tx_type is TransactionType.STATIC:
return StaticFeeTransaction
elif tx_type is TransactionType.ACCESS_LIST:
return AccessListTransaction
elif tx_type is TransactionType.SET_CODE:
return SetCodeTransaction
return DynamicFeeTransaction
def encode_contract_blueprint(
self, contract_type: "ContractType", *args, **kwargs
) -> "TransactionAPI":
# EIP-5202 implementation.
bytes_obj = contract_type.deployment_bytecode
contract_bytes = (bytes_obj.to_bytes() or b"") if bytes_obj else b""
header = kwargs.pop("header", BLUEPRINT_HEADER)
blueprint_bytecode = header + b"\x00" + contract_bytes
len_bytes = len(blueprint_bytecode).to_bytes(2, "big")
return_data_size = kwargs.pop("return_data_size", HexBytes("0x61"))
return_instructions = kwargs.pop("return_instructions", HexBytes("0x3d81600a3d39f3"))
deploy_bytecode = HexBytes(
return_data_size + len_bytes + return_instructions + blueprint_bytecode
)
converted_kwargs = self.conversion_manager.convert_method_kwargs(kwargs)
return self.encode_deployment(
deploy_bytecode, contract_type.constructor, **converted_kwargs
)
def get_proxy_info(self, address: AddressType) -> Optional[ProxyInfo]:
contract_code = self.chain_manager.get_code(address)
if isinstance(contract_code, bytes):
contract_code = to_hex(contract_code)
if not (code := contract_code[2:]):
return None
patterns = {
ProxyType.Minimal: r"^363d3d373d3d3d363d73(.{40})5af43d82803e903d91602b57fd5bf3",
ProxyType.ZeroAge: r"^3d3d3d3d363d3d37363d73(.{40})5af43d3d93803e602a57fd5bf3",
ProxyType.Clones: r"^36603057343d52307f830d2d700a97af574b186c80d40429385d24241565b08a7c559ba283a964d9b160203da23d3df35b3d3d3d3d363d3d37363d73(.{40})5af43d3d93803e605b57fd5bf3",
ProxyType.Vyper: r"^366000600037611000600036600073(.{40})5af4602c57600080fd5b6110006000f3",
ProxyType.VyperBeta: r"^366000600037611000600036600073(.{40})5af41558576110006000f3",
ProxyType.CWIA: r"^3d3d3d3d363d3d3761.{4}603736393661.{4}013d73(.{40})5af43d3d93803e603557fd5bf3.*",
ProxyType.OldCWIA: r"^363d3d3761.{4}603836393d3d3d3661.{4}013d73(.{40})5af43d82803e903d91603657fd5bf3.*",
ProxyType.SudoswapCWIA: r"^3d3d3d3d363d3d37605160353639366051013d73(.{40})5af43d3d93803e603357fd5bf3.*",
ProxyType.SoladyCWIA: r"36602c57343d527f9e4ac34f21c619cefc926c8bd93b54bf5a39c7ab2127a895af1cc0691d7e3dff593da1005b363d3d373d3d3d3d61.{4}806062363936013d73(.{40})5af43d3d93803e606057fd5bf3.*",
ProxyType.SplitsCWIA: r"36602f57343d527f9e4ac34f21c619cefc926c8bd93b54bf5a39c7ab2127a895af1cc0691d7e3dff60203da13d3df35b3d3d3d3d363d3d3761.{4}606736393661.{4}013d73(.{40})5af43d3d93803e606557fd5bf3.*",
ProxyType.SoladyPush0: r"^5f5f365f5f37365f73(.{40})5af43d5f5f3e6029573d5ffd5b3d5ff3",
ProxyType.SetCode: r"^ef0100(.{40})$",
}
for type_, pattern in patterns.items():
if match := re.match(pattern, code):
target = self.conversion_manager.convert(match.group(1), AddressType)
return ProxyInfo(type=type_, target=target)
sequence_pattern = r"363d3d373d3d3d363d30545af43d82803e903d91601857fd5bf3"
if re.match(sequence_pattern, code):
# the implementation is stored in the slot matching proxy address
slot = self.provider.get_storage(address, address)
target = self.conversion_manager.convert(slot[-20:], AddressType)
return ProxyInfo(type=ProxyType.Sequence, target=target)
def str_to_slot(text):
return int(to_hex(keccak(text=text)), 16)
slots = {
ProxyType.Standard: str_to_slot("eip1967.proxy.implementation") - 1,
ProxyType.Beacon: str_to_slot("eip1967.proxy.beacon") - 1,
ProxyType.OpenZeppelin: str_to_slot("org.zeppelinos.proxy.implementation"),
ProxyType.UUPS: str_to_slot("PROXIABLE"),
}
for _type, slot in slots.items():
try:
# TODO perf: use a batch call here when ape adds support
storage = self.provider.get_storage(address, slot)
except NotImplementedError:
# Break early on not-implemented error rather than attempting
# to try more proxy types.
break
if sum(storage) == 0:
continue
target = self.conversion_manager.convert(storage[-20:], AddressType)
# read `target.implementation()`
if _type == ProxyType.Beacon:
target = ContractCall(IMPLEMENTATION_ABI, target)(skip_trace=True)
return ProxyInfo(type=_type, target=target, abi=IMPLEMENTATION_ABI)
# safe >=1.1.0 provides `masterCopy()`, which is also stored in slot 0
# call it and check that target matches
try:
singleton = ContractCall(MASTER_COPY_ABI, address)(skip_trace=True)
slot_0 = self.provider.get_storage(address, 0)
target = self.conversion_manager.convert(slot_0[-20:], AddressType)
# NOTE: `target` is set in initialized proxies
if target != ZERO_ADDRESS and target == singleton:
return ProxyInfo(type=ProxyType.GnosisSafe, target=target, abi=MASTER_COPY_ABI)
except ApeException:
pass
# eip-897 delegate proxy, read `proxyType()` and `implementation()`
# perf: only make a call when a proxyType() selector is mentioned in the code
eip897_pattern = b"\x63" + keccak(text="proxyType()")[:4]
if eip897_pattern.hex() in code:
try:
proxy_type = ContractCall(PROXY_TYPE_ABI, address)(skip_trace=True)
if proxy_type not in (1, 2):
raise ValueError(f"ProxyType '{proxy_type}' not permitted by EIP-897.")
target = ContractCall(IMPLEMENTATION_ABI, address)(skip_trace=True)
# avoid recursion
if target != ZERO_ADDRESS:
return ProxyInfo(type=ProxyType.Delegate, target=target, abi=IMPLEMENTATION_ABI)
except (ApeException, ValueError):
pass
return None
def decode_receipt(self, data: dict) -> "ReceiptAPI":
status = data.get("status")
if status is not None:
status = self.conversion_manager.convert(status, int)
status = TransactionStatusEnum(status)
hash_key_choices = (
"hash",
"txHash",
"txn_hash",
"txnHash",
"transactionHash",
"transaction_hash",
)
txn_hash = next((data[choice] for choice in hash_key_choices if choice in data), None)
if txn_hash and isinstance(txn_hash, bytes):
txn_hash = to_hex(txn_hash)
data_bytes = data.get("data")
if data_bytes and isinstance(data_bytes, str):
data["data"] = HexBytes(data_bytes)
elif "input" in data and isinstance(data["input"], str):
data["input"] = HexBytes(data["input"])
block_number = data.get("block_number", data.get("blockNumber"))
if block_number is None:
raise ValueError("Missing block number.")
receipt_kwargs = {
"block_number": block_number,
"contract_address": data.get("contract_address", data.get("contractAddress")),
"gas_limit": data.get("gas", data.get("gas_limit", data.get("gasLimit"))) or 0,
"gas_price": data.get("gas_price", data.get("gasPrice")) or 0,
"gas_used": data.get("gas_used", data.get("gasUsed")) or 0,
"logs": data.get("logs", []),
"status": status,
"txn_hash": txn_hash,
"transaction": self.create_transaction(**data),
}
receipt_cls: type[Receipt]
if data.get("type") == 3:
receipt_cls = SharedBlobReceipt
blob_gas_price = data.get("blob_gas_price")
if blob_gas_price is None:
blob_gas_price = data.get("blobGasPrice")
receipt_kwargs["blobGasPrice"] = blob_gas_price
receipt_kwargs["blobGasUsed"] = data.get("blob_gas_used", data.get("blobGasUsed")) or 0
else:
receipt_cls = Receipt
error = receipt_kwargs.pop("error", None)
receipt = receipt_cls.model_validate(receipt_kwargs)
receipt.error = error
return receipt
def decode_block(self, data: dict) -> BlockAPI:
data["hash"] = HexBytes(data["hash"]) if data.get("hash") else None
if "gas_limit" in data:
data["gasLimit"] = data.pop("gas_limit")
if "gas_used" in data:
data["gasUsed"] = data.pop("gas_used")
if "parent_hash" in data:
data["parentHash"] = HexBytes(data.pop("parent_hash"))
if "transaction_ids" in data:
data["transactions"] = data.pop("transaction_ids")
if "total_difficulty" in data:
data["totalDifficulty"] = data.pop("total_difficulty") or 0
elif "totalDifficulty" in data:
data["totalDifficulty"] = data.pop("totalDifficulty") or 0
if "base_fee" in data:
data["baseFeePerGas"] = data.pop("base_fee")
elif "baseFee" in data:
data["baseFeePerGas"] = data.pop("baseFee")
if "transactions" in data:
data["num_transactions"] = len(data["transactions"])
return Block.model_validate(data)
def _python_type_for_abi_type(self, abi_type: ABIType) -> Union[type, Sequence]:
# NOTE: An array can be an array of tuples, so we start with an array check
if str(abi_type.type).endswith("]"):
# remove one layer of the potential onion of array
abi_type_str = str(abi_type.type)
last_bracket_pos = abi_type_str.rfind("[")
new_type = abi_type_str[:last_bracket_pos] if last_bracket_pos != -1 else abi_type_str
# create a new type with the inner type of array
new_abi_type = ABIType(type=new_type, **abi_type.model_dump(exclude={"type"}))
# NOTE: type for static and dynamic array is a single item list
# containing the type of the array
return [self._python_type_for_abi_type(new_abi_type)]
if abi_type.components is not None:
return tuple(self._python_type_for_abi_type(c) for c in abi_type.components)
if abi_type.type == "address":
return AddressType
elif abi_type.type == "bool":
return bool
elif abi_type.type == "string":
return str
elif "bytes" in abi_type.type:
return bytes
elif "int" in abi_type.type:
return int
elif "fixed" in abi_type.type:
return Decimal
raise ConversionError(f"Unable to convert '{abi_type}'.")
def encode_calldata(self, abi: Union[ConstructorABI, MethodABI], *args) -> HexBytes:
if not abi.inputs:
return HexBytes("")
parser = StructParser(abi)
arguments = parser.encode_input(args)
input_types = [i.canonical_type for i in abi.inputs]
python_types = tuple(self._python_type_for_abi_type(i) for i in abi.inputs)
converted_args = self.conversion_manager.convert(arguments, python_types)
encoded_calldata = encode(input_types, converted_args)
return HexBytes(encoded_calldata)
def decode_calldata(self, abi: Union[ConstructorABI, MethodABI], calldata: bytes) -> dict:
raw_input_types = [i.canonical_type for i in abi.inputs]
input_types = [parse_type(i.model_dump()) for i in abi.inputs]
try:
raw_input_values = decode(raw_input_types, calldata, strict=False)
except (InsufficientDataBytes, OverflowError, NonEmptyPaddingBytes) as err:
raise DecodingError(str(err)) from err
input_values = [
self.decode_primitive_value(v, t) for v, t in zip(raw_input_values, input_types)
]
arguments = {}
index = 0
for i, v in zip(abi.inputs, input_values):
name = i.name or f"{index}"
arguments[name] = v
index += 1
return arguments
def decode_returndata(self, abi: MethodABI, raw_data: bytes) -> tuple[Any, ...]:
output_types_str_ls = [o.canonical_type for o in abi.outputs]
if raw_data:
try:
vm_return_values = decode(output_types_str_ls, raw_data, strict=False)
except (InsufficientDataBytes, NonEmptyPaddingBytes) as err:
raise DecodingError(str(err)) from err
else:
# Use all zeroes.
vm_return_values = tuple([0 for _ in output_types_str_ls])
if not vm_return_values:
return vm_return_values
elif not isinstance(vm_return_values, (tuple, list)):
vm_return_values = (vm_return_values,)
output_types = [parse_type(o.model_dump()) for o in abi.outputs]
output_values = [
self.decode_primitive_value(v, t) for v, t in zip(vm_return_values, output_types)
]
parser = StructParser(abi)
output_values = parser.decode_output(output_values)
if issubclass(type(output_values), Struct):
return (output_values,)
elif (
returns_array(abi)
and isinstance(output_values, (list, tuple))
and len(output_values) == 1
):
# Array of structs or tuples: don't convert to list
# Array of anything else: convert to single list
if issubclass(type(output_values[0]), Struct):
return ([output_values[0]],)
else:
try:
return ([o for o in output_values[0]],) # type: ignore[union-attr]
except Exception:
# On-chains transaction data errors.
return (output_values,)
elif returns_array(abi):
# Tuple with single item as the array.
return (output_values,)
return tuple(output_values)
def _enrich_value(self, value: Any, **kwargs) -> Any:
if isinstance(value, bytes):
try:
string_value = value.strip(b"\x00").decode("utf8")
return f'"{string_value}"'
except UnicodeDecodeError:
# Truncate bytes if very long.
if len(value) > 24:
return f"{add_0x_prefix(HexStr(humanize_hash(cast(Hash32, value))))}"
hex_str = to_hex(value)
if is_hex_address(hex_str):
return self._enrich_value(hex_str, **kwargs)
return hex_str
elif isinstance(value, str) and is_hex_address(value):
address = self.decode_address(value)
return self._enrich_contract_id(address, **kwargs)
elif isinstance(value, str):
# Surround non-address strings with quotes.
return f'"{value}"'
elif isinstance(value, int):
return int(value) # Eliminate int-base classes.
elif isinstance(value, (list, tuple)):
return [self._enrich_value(v, **kwargs) for v in value]
elif isinstance(value, Struct):
return {k: self._enrich_value(v, **kwargs) for k, v in value.items()}
return value
def decode_primitive_value(
self, value: Any, output_type: Union[str, tuple, list]
) -> Union[str, HexBytes, int, tuple, list]:
if output_type == "address":
try:
return self.decode_address(value)
except InsufficientDataBytes as err:
raise DecodingError() from err
elif isinstance(value, bytes):
return HexBytes(value)
elif isinstance(value, int) and not isinstance(value, bool):
# Wrap integers in a special type that allows us to compare
# them with currency-value strings.
return CurrencyValueComparable(value)
elif isinstance(output_type, str) and is_array(output_type):
sub_type = "[".join(output_type.split("[")[:-1])
if not isinstance(value, (list, tuple)):
value = (value,)
return [self.decode_primitive_value(v, sub_type) for v in value]
elif isinstance(output_type, tuple):
return tuple([self.decode_primitive_value(v, t) for v, t in zip(value, output_type)])
elif (
isinstance(output_type, list)
and len(output_type) == 1
and isinstance(value, (list, tuple))
):
return tuple([self.decode_primitive_value(v, output_type[0]) for v in value])
return value
def encode_deployment(
self, deployment_bytecode: HexBytes, abi: ConstructorABI, *args, **kwargs
) -> BaseTransaction:
kwargs["abi"] = abi
txn = self.create_transaction(**kwargs)
data = HexBytes(deployment_bytecode)
# Encode args, if there are any
if abi and args:
data = HexBytes(data + self.encode_calldata(abi, *args))
txn.data = data
return cast(BaseTransaction, txn)
def encode_transaction(
self,
address: AddressType,
abi: MethodABI,
*args,
**kwargs,
) -> BaseTransaction:
kwargs["abi"] = abi
txn = self.create_transaction(receiver=address, **kwargs)
# Add method ID
txn.data = self.get_method_selector(abi)
txn.data = HexBytes(txn.data + self.encode_calldata(abi, *args))
return cast(BaseTransaction, txn)
def create_transaction(self, **kwargs) -> "TransactionAPI":
"""
Returns a transaction using the given constructor kwargs.
**NOTE**: This generally should not be called by the user since this API method is used as a
hook for Ecosystems to customize how transactions are created.
Returns:
:class:`~ape.api.transactions.TransactionAPI`
"""
# Handle all aliases.
tx_data = dict(kwargs)
tx_data = _correct_key(
"max_priority_fee",
tx_data,
("max_priority_fee_per_gas", "maxPriorityFeePerGas", "maxPriorityFee"),
)
tx_data = _correct_key("max_fee", tx_data, ("max_fee_per_gas", "maxFeePerGas", "maxFee"))
tx_data = _correct_key("gas", tx_data, ("gas_limit", "gasLimit"))
tx_data = _correct_key("gas_price", tx_data, ("gasPrice",))
tx_data = _correct_key(
"type",
tx_data,
("txType", "tx_type", "txnType", "txn_type", "transactionType", "transaction_type"),
)
tx_data = _correct_key("maxFeePerBlobGas", tx_data, ("max_fee_per_blob_gas",))
tx_data = _correct_key("blobVersionedHashes", tx_data, ("blob_versioned_hashes",))
# Handle unique value specifications, such as "1 ether".
if "value" in tx_data and not isinstance(tx_data["value"], int):
value = tx_data["value"] or 0 # Convert None to 0.
tx_data["value"] = self.conversion_manager.convert(value, int)
# None is not allowed, the user likely means `b""`.
if "data" in tx_data and tx_data["data"] is None:
tx_data["data"] = b""
# Deduce the transaction type.
transaction_types: dict[TransactionType, type[TransactionAPI]] = {
TransactionType.STATIC: StaticFeeTransaction,
TransactionType.ACCESS_LIST: AccessListTransaction,
TransactionType.DYNAMIC: DynamicFeeTransaction,
TransactionType.SHARED_BLOB: SharedBlobTransaction,
TransactionType.SET_CODE: SetCodeTransaction,
}
if "type" in tx_data:
# It might be `None` in the given data dict.
if tx_data["type"] is None:
# Explicit `None` means used default.
version = self.default_transaction_type
elif isinstance(tx_data["type"], TransactionType):
version = tx_data["type"]
elif isinstance(tx_data["type"], int):
version = TransactionType(tx_data["type"])
else:
# Using hex values or alike.
version = TransactionType(self.conversion_manager.convert(tx_data["type"], int))
# NOTE: Determine these in reverse order
elif "authorizationList" in tx_data:
version = TransactionType.SET_CODE
elif "maxFeePerBlobGas" in tx_data or "blobVersionedHashes" in tx_data:
version = TransactionType.SHARED_BLOB
elif "max_fee" in tx_data or "max_priority_fee" in tx_data:
version = TransactionType.DYNAMIC
elif "access_list" in tx_data or "accessList" in tx_data:
version = TransactionType.ACCESS_LIST
elif "gas_price" in tx_data:
version = TransactionType.STATIC
else:
version = self.default_transaction_type
tx_data["type"] = version.value
# This causes problems in pydantic for some reason.
# NOTE: This must happen after deducing the tx type!
if "gas_price" in tx_data and tx_data["gas_price"] is None:
del tx_data["gas_price"]
txn_class = transaction_types[version]
if "required_confirmations" not in tx_data or tx_data["required_confirmations"] is None:
# Attempt to use default required-confirmations from `ape-config.yaml`.
required_confirmations = 0
active_provider = self.network_manager.active_provider
if active_provider:
required_confirmations = active_provider.network.required_confirmations
tx_data["required_confirmations"] = required_confirmations
chain_id = tx_data.get("chainId", tx_data.get("chain_id"))
if isinstance(chain_id, str):
tx_data["chainId"] = int(chain_id, 16)
elif chain_id is None and self.network_manager.active_provider is not None:
tx_data["chainId"] = self.chain_manager.chain_id
if "input" in tx_data:
tx_data["data"] = tx_data.pop("input")
if all(field in tx_data for field in ("v", "r", "s")):
tx_data["signature"] = TransactionSignature(
v=tx_data["v"],
r=bytes(tx_data["r"]),
s=bytes(tx_data["s"]),
)
if "gas" not in tx_data:
tx_data["gas"] = None
tx = txn_class.model_validate(tx_data)
return tx
def decode_logs(self, logs: Sequence[dict], *events: EventABI) -> Iterator[ContractLog]:
if not logs:
return
abi_inputs = {
encode_hex(keccak(text=abi.selector)): LogInputABICollection(abi) for abi in events
}
def get_abi(_topic: HexStr) -> Optional[LogInputABICollection]:
return abi_inputs[_topic] if _topic in abi_inputs else None
for log in logs:
if log.get("anonymous"):
raise NotImplementedError(
"decoding anonymous logs is not supported with this method"
)
topics = log["topics"]
# web3.py converts topics to HexBytes, data is always a HexStr
if isinstance(log["topics"][0], bytes):
topics = [encode_hex(t) for t in log["topics"]]
elif not topics:
continue
if not (abi := get_abi(topics[0])):
continue
event_arguments = abi.decode(topics, log["data"], use_hex_on_fail=True)
# Since LogABICollection does not have access to the Ecosystem,
# the rest of the decoding must happen here.
converted_arguments: dict = {}
for item in abi.abi.inputs:
_type, key, value = item.canonical_type, item.name, event_arguments[item.name]
if isinstance(value, Struct):
struct_types = _type.lstrip("(").rstrip(")").split(",")
for struct_type, (struct_key, struct_val) in zip(struct_types, value.items()):
value[struct_key] = (
self.decode_address(struct_val)
if struct_type == "address"
else HexBytes(struct_val)
if "bytes" in struct_type
else struct_val
)
converted_arguments[key] = value
elif _type == "address":
converted_arguments[key] = self.decode_address(value)
elif is_array(_type):
sub_type = "[".join(_type.split("[")[:-1])
converted_arguments[key] = (
[self.decode_address(v) for v in value] if sub_type == "address" else value
)
elif isinstance(value, int):
# This allows integers to be comparable with currency-value
# strings, such as "1 ETH".
converted_arguments[key] = CurrencyValueComparable(value)
else:
# No change.
converted_arguments[key] = value
yield ContractLog(
_abi=abi,
block_hash=log.get("blockHash") or log.get("block_hash") or "",
block_number=log.get("blockNumber") or log.get("block_number") or 0,
contract_address=self.decode_address(log["address"]),
event_arguments=converted_arguments,
event_name=abi.event_name,
log_index=log.get("logIndex") or log.get("log_index") or 0,
transaction_hash=log.get("transactionHash") or log.get("transaction_hash") or "",
transaction_index=(
log.get("transactionIndex")
if "transactionIndex" in log
else log.get("transaction_index")
),
)
def enrich_trace(self, trace: "TraceAPI", **kwargs) -> "TraceAPI":
kwargs["trace"] = trace
if not isinstance(trace, Trace):
# Can only enrich `ape_ethereum.trace.Trace` (or subclass) implementations.
return trace
elif trace._enriched_calltree is not None:
# Already enriched.
return trace
if sender := trace.transaction.get("from"):
kwargs["sender"] = sender
# Get the un-enriched calltree.
# NOTE: Using JSON mode so Enums are all str types.
data = trace.get_calltree().model_dump(mode="json", by_alias=True)
if isinstance(trace, TransactionTrace):
return_value = trace.__dict__.get("return_value") if data.get("depth", 0) == 0 else None
if return_value is not None:
# Return value was discovered already.
kwargs["return_value"] = return_value
# Cache the result back on the trace.
trace._enriched_calltree = self._enrich_calltree(data, **kwargs)
return trace
def _enrich_calltree(self, call: dict, **kwargs) -> dict:
if "contract_id" in call:
# Already enriched.
return call
if self._test_runner and self._test_runner.gas_tracker.enabled:
default_symbol_for_tokens = not self._test_runner.gas_tracker.enabled
else:
default_symbol_for_tokens = True
kwargs["use_symbol_for_tokens"] = kwargs.get(
"use_symbol_for_tokens", default_symbol_for_tokens
)
# Handle if for some reason this is still an Enum.
call_type = call.get("call_type", "")
if call_type and not isinstance(call_type, str):
call["call_type"] = call_type = call_type.value
is_create = "CREATE" in call_type
# Enrich sub-calls first.
if subcalls := call.get("calls"):
call["calls"] = [self._enrich_calltree(c, **kwargs) for c in subcalls]
# Figure out the contract.
address: AddressType = call.pop("address", "")
try:
call["contract_id"] = address = kwargs["contract_address"] = self.decode_address(
address
)
except Exception:
# Tx was made with a weird address.
call["contract_id"] = address
if calldata := call.get("calldata"):
calldata_bytes = HexBytes(calldata)
call["method_id"] = to_hex(calldata_bytes[:4])
call["calldata"] = calldata if is_create else to_hex(calldata_bytes[4:])
else:
call["method_id"] = "0x"
try:
address_int = int(address, 16)
except Exception:
pass
else:
# Collapse pre-compile address calls
if 1 <= address_int <= 9:
return (
call["calls"][0]
if len(call.get("calls", [])) == 1
else {"contract_id": f"{address_int}", "calls": call["calls"]}
)
depth = call.get("depth", 0)
if depth == 0 and address in self.account_manager:
call["contract_id"] = f"__{self.fee_token_symbol}_transfer__"
else:
call["contract_id"] = self._enrich_contract_id(call["contract_id"], **kwargs)
if not (contract_type := self._get_contract_type_for_enrichment(address, **kwargs)):
# Without a contract type, we can enrich no further.
return call
kwargs["contract_type"] = contract_type
if events := call.get("events"):
call["events"] = self._enrich_trace_events(events, address=address, **kwargs)
method_abi: Optional[Union[MethodABI, ConstructorABI]] = None
if is_create:
method_abi = contract_type.constructor
name = "__new__"
elif call["method_id"] != "0x":
method_id_bytes = HexBytes(call["method_id"])
# perf: use try/except instead of __contains__ check.
try:
method_abi = contract_type.methods[method_id_bytes]
except KeyError:
name = call["method_id"]
else:
if isinstance(method_abi, MethodABI):
# Check if method name duplicated. If that is the case, use selector.
times = len([x for x in contract_type.methods if x.name == method_abi.name])
name = (method_abi.name if times == 1 else method_abi.selector) or call[
"method_id"
]
call = self._enrich_calldata(call, method_abi, **kwargs)
else:
name = call.get("method_id") or "0x"
else:
name = call.get("method_id") or "0x"
call["method_id"] = name
if method_abi:
call = self._enrich_calldata(call, method_abi, **kwargs)
if kwargs.get("return_value"):
# Return value was separately enriched.
call["returndata"] = kwargs["return_value"]
elif isinstance(method_abi, MethodABI):
call = self._enrich_returndata(call, method_abi, **kwargs)
else:
# For constructors, don't include outputs, as it is likely a large amount of bytes.
call["returndata"] = None
elif "revert_message" not in call:
# Method not found but perhaps we still know the error.
call = self._enrich_revert_message(call)
return call
def _enrich_contract_id(self, address: AddressType, **kwargs) -> str:
# Defensively pop "contract_type" key from kwargs. `_get_contract_type_for_enrichment` will
# preferentially return a `contract_type` from kwargs without checking the contract cache.
# The contract_type may not match the contract address being enriched if this method was
# previously called for a different contract.
kwargs.pop("contract_type", None)
if address and address == kwargs.get("sender"):
return "tx.origin"
elif address == ZERO_ADDRESS:
return "ZERO_ADDRESS"
elif not (contract_type := self._get_contract_type_for_enrichment(address, **kwargs)):
# Without a contract type, we can enrich no further.
return address
kwargs["contract_type"] = contract_type
if kwargs.get("use_symbol_for_tokens") and "symbol" in contract_type.view_methods:
# Use token symbol as name
contract = self.chain_manager.contracts.instance_at(address)
try:
symbol = contract.symbol(skip_trace=True)
except ApeException:
symbol = None
if isinstance(symbol, str):
return symbol.strip()
# bytes32 symbol appears in ds-token
if isinstance(symbol, bytes):
try:
return symbol.rstrip(b"\x00").decode()
except UnicodeDecodeError:
return str(symbol)
name = contract_type.name.strip() if contract_type.name else None
return name or address
def _enrich_calldata(
self,
call: dict,
method_abi: Union[MethodABI, ConstructorABI],
**kwargs,
) -> dict:
calldata = call["calldata"]
if isinstance(calldata, str):
calldata_arg = to_bytes(hexstr=calldata)
elif isinstance(calldata, bytes):
calldata_arg = calldata
else:
# Already enriched.
return call
contract_type = kwargs["contract_type"]
if call.get("call_type") and "CREATE" in call.get("call_type", ""):
# Strip off bytecode
bytecode = (
contract_type.deployment_bytecode.to_bytes()
if contract_type.deployment_bytecode
else b""
)
# TODO: Handle Solidity Metadata (delegate to Compilers again?)
calldata_arg = HexBytes(calldata_arg.split(bytecode)[-1])
try:
call["calldata"] = self.decode_calldata(method_abi, calldata_arg)
except DecodingError:
call["calldata"] = ["<?>" for _ in method_abi.inputs]
else:
call["calldata"] = self._enrich_calldata_dict(call["calldata"], **kwargs)
return call
def _enrich_calldata_dict(self, calldata: dict, **kwargs) -> dict:
return {k: self._enrich_value(v, **kwargs) for k, v in calldata.items()}
def _enrich_returndata(self, call: dict, method_abi: MethodABI, **kwargs) -> dict:
if "CREATE" in call.get("call_type", ""):
call["returndata"] = ""
return call
elif "revert_message" in call:
# Already enriched, in a sense..
return call
default_return_value = "<?>"
returndata = call.get("returndata", "")
is_hexstr = isinstance(returndata, str) and is_0x_prefixed(returndata)
# Check if return is only a revert string.
call = self._enrich_revert_message(call)
if "revert_message" in call:
return call
elif is_hexstr:
return_value_bytes = HexBytes(returndata)
# Check if custom-error.
if "trace" in kwargs and "contract_address" in kwargs:
address = kwargs["contract_address"]
try:
instance = self.decode_custom_error(return_value_bytes, address, **kwargs)
except NotImplementedError:
pass
else:
if instance is not None:
call["revert_message"] = repr(instance)
return call
elif is_hexstr or isinstance(returndata, (int, bytes)):
return_value_bytes = HexBytes(returndata)
else:
return_value_bytes = None
if return_value_bytes is None:
values = tuple([default_return_value for _ in method_abi.outputs])
else:
return_values = None
try:
return_values = (
self.decode_returndata(method_abi, return_value_bytes)
if not call.get("failed")
else None
)
except DecodingError:
if return_value_bytes == HexBytes("0x"):
# Empty result, but it failed decoding because of its length.
return_values = ("",)
# Cache un-enriched return_value in trace.
call["unenriched_return_values"] = return_values
values = (
tuple([default_return_value for _ in method_abi.outputs])
if return_values is None
else tuple([self._enrich_value(v, **kwargs) for v in return_values or ()])
)
output_val = values[0] if len(values) == 1 else values
if (
isinstance(output_val, str)
and is_0x_prefixed(output_val)
and "." not in output_val
and not int(output_val, 16)
):
output_val = ""
call["returndata"] = output_val
return call
def _enrich_trace_events(
self,
events: list[dict],
address: Optional[AddressType] = None,
**kwargs,
) -> list[dict]:
return [self._enrich_trace_event(e, address=address, **kwargs) for e in events]
def _enrich_trace_event(
self,
event: dict,
address: Optional[AddressType] = None,
**kwargs,
) -> dict:
if "topics" not in event or len(event["topics"]) < 1:
# Already enriched or wrong.
return event
elif not address:
address = event.get("address")
if not address:
# Cannot enrich further w/o an address.
return event
if not (contract_type := self._get_contract_type_for_enrichment(address, **kwargs)):
# Without a contract type, we can enrich no further.
return event
kwargs["contract_type"] = contract_type
# The selector is always the first topic.
selector = event["topics"][0]
if not isinstance(selector, str):
selector = to_hex(selector)
if selector not in contract_type.identifier_lookup:
# Unable to enrich using this contract type.
# Selector unknown.
return event
abi = contract_type.identifier_lookup[selector]
assert isinstance(abi, EventABI) # For mypy.
log_data = {
"topics": event["topics"],
"data": event["data"],
"address": address,
}
try:
contract_logs = [log for log in self.decode_logs([log_data], abi)]
except Exception as err:
logger.debug(f"Failed decoding logs from trace data: {err}")
return event
if not contract_logs:
# Not sure if this is a likely condition.
return event
# Enrich the event-node data using the Ape ContractLog object.
log: ContractLog = contract_logs[0]
calldata = self._enrich_calldata_dict(log.event_arguments)
return {"name": log.event_name, "calldata": calldata}
def _enrich_revert_message(self, call: dict) -> dict:
returndata = call.get("returndata", "")
is_hexstr = isinstance(returndata, str) and is_0x_prefixed(returndata)
if is_hexstr and returndata.startswith(_REVERT_PREFIX):
# The returndata is the revert-str.
decoded_result = decode(("string",), HexBytes(returndata)[4:])
call["revert_message"] = decoded_result[0] if len(decoded_result) == 1 else ""
return call
def _get_contract_type_for_enrichment(
self, address: AddressType, **kwargs
) -> Optional["ContractType"]:
if not (contract_type := kwargs.get("contract_type")):
try:
contract_type = self.chain_manager.contracts.get(address)
except Exception as err:
logger.debug(f"Error getting contract type during event enrichment: {err}")
return contract_type
def get_python_types(self, abi_type: ABIType) -> Union[type, Sequence]:
return self._python_type_for_abi_type(abi_type)
def decode_custom_error(
self,
data: HexBytes,
address: AddressType,
**kwargs,
) -> Optional[CustomError]:
# Use an instance (required for proper error caching).
try:
contract = self.chain_manager.contracts.instance_at(address)
except Exception:
return None
selector = data[:4]
input_data = data[4:]
if selector in contract.contract_type.errors:
abi = contract.contract_type.errors[selector]
error_cls = contract.get_error_by_signature(abi.signature)
inputs = self.decode_calldata(abi, input_data)
kwargs["contract_address"] = address
error_kwargs = {
k: v
for k, v in kwargs.items()
if k in ("trace", "txn", "contract_address", "source_traceback")
}
return error_cls(abi, inputs, **error_kwargs)
# ABI not found. Try looking at the "last" contract.
if not (tx := kwargs.get("txn")) or not self.network_manager.active_provider:
return None
try:
tx_hash = tx.txn_hash
except SignatureError:
return None
try:
trace = kwargs.get("trace") or self.provider.get_transaction_trace(tx_hash)
except NotImplementedError:
return None
try:
if not (last_addr := next(trace.get_addresses_used(reverse=True), None)):
return None
except Exception:
# When unable to get trace-frames properly, such as eth-tester.
return None
if last_addr == address:
# Avoid checking same address twice.
return None
try:
if cerr := self.decode_custom_error(data, last_addr, **kwargs):
return cerr
except NotImplementedError:
return None
# error never found.
return None
def get_deployment_address(self, address: AddressType, nonce: int) -> AddressType:
"""
Calculate the deployment address of a contract before it is deployed.
This is useful if the address is an argument to another contract's deployment
and you have not yet deployed the first contract yet.
"""
sender_bytes = to_bytes(hexstr=address)
encoded = rlp.encode([sender_bytes, nonce])
address_bytes = keccak(encoded)[12:]
return self.decode_address(address_bytes)
def parse_type(type_: dict[str, Any]) -> Union[str, tuple, list]:
if "tuple" not in type_["type"]:
return type_["type"]
result = tuple([parse_type(c) for c in type_["components"]])
return [result] if is_array(type_["type"]) else result
def _correct_key(key: str, data: dict, alt_keys: tuple[str, ...]) -> dict:
if key in data:
return data
for possible_key in alt_keys:
if possible_key in data:
new_data = data.copy()
new_data[key] = new_data.pop(possible_key)
return new_data
return data
| Ethereum |
python | getsentry__sentry-python | tests/integrations/aws_lambda/utils.py | {
"start": 1705,
"end": 7904
} | class ____(Stack):
"""
Uses the AWS CDK to create a local SAM stack containing Lambda functions.
"""
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
print("[LocalLambdaStack] Creating local SAM Lambda Stack")
super().__init__(scope, construct_id, **kwargs)
# Override the template synthesis
self.template_options.template_format_version = "2010-09-09"
self.template_options.transforms = ["AWS::Serverless-2016-10-31"]
print("[LocalLambdaStack] Create Sentry Lambda layer package")
filename = "sentry-sdk-lambda-layer.zip"
build_packaged_zip(
make_dist=True,
out_zip_filename=filename,
)
print(
"[LocalLambdaStack] Add Sentry Lambda layer containing the Sentry SDK to the SAM stack"
)
self.sentry_layer = CfnResource(
self,
"SentryPythonServerlessSDK",
type="AWS::Serverless::LayerVersion",
properties={
"ContentUri": os.path.join(DIST_PATH, filename),
"CompatibleRuntimes": [
PYTHON_VERSION,
],
},
)
dsn = f"http://123@{get_host_ip()}:9999/0" # noqa: E231
print("[LocalLambdaStack] Using Sentry DSN: %s" % dsn)
print(
"[LocalLambdaStack] Add all Lambda functions defined in "
"/tests/integrations/aws_lambda/lambda_functions/ to the SAM stack"
)
lambda_dirs = [
d
for d in os.listdir(LAMBDA_FUNCTION_DIR)
if os.path.isdir(os.path.join(LAMBDA_FUNCTION_DIR, d))
]
for lambda_dir in lambda_dirs:
CfnResource(
self,
lambda_dir,
type="AWS::Serverless::Function",
properties={
"CodeUri": os.path.join(LAMBDA_FUNCTION_DIR, lambda_dir),
"Handler": "sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler",
"Runtime": PYTHON_VERSION,
"Timeout": LAMBDA_FUNCTION_TIMEOUT,
"Layers": [
{"Ref": self.sentry_layer.logical_id}
], # Add layer containing the Sentry SDK to function.
"Environment": {
"Variables": {
"SENTRY_DSN": dsn,
"SENTRY_INITIAL_HANDLER": "index.handler",
"SENTRY_TRACES_SAMPLE_RATE": "1.0",
}
},
},
)
print(
"[LocalLambdaStack] - Created Lambda function: %s (%s)"
% (
lambda_dir,
os.path.join(LAMBDA_FUNCTION_DIR, lambda_dir),
)
)
print(
"[LocalLambdaStack] Add all Lambda functions defined in "
"/tests/integrations/aws_lambda/lambda_functions_with_embedded_sdk/ to the SAM stack"
)
lambda_dirs = [
d
for d in os.listdir(LAMBDA_FUNCTION_WITH_EMBEDDED_SDK_DIR)
if os.path.isdir(os.path.join(LAMBDA_FUNCTION_WITH_EMBEDDED_SDK_DIR, d))
]
for lambda_dir in lambda_dirs:
# Copy the Sentry SDK into the function directory
sdk_path = os.path.join(
LAMBDA_FUNCTION_WITH_EMBEDDED_SDK_DIR, lambda_dir, "sentry_sdk"
)
if not os.path.exists(sdk_path):
# Find the Sentry SDK in the current environment
import sentry_sdk as sdk_module
sdk_source = os.path.dirname(sdk_module.__file__)
shutil.copytree(sdk_source, sdk_path)
# Install the requirements of Sentry SDK into the function directory
requirements_file = os.path.join(
get_project_root(), "requirements-aws-lambda-layer.txt"
)
# Install the package using pip
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"--upgrade",
"--target",
os.path.join(LAMBDA_FUNCTION_WITH_EMBEDDED_SDK_DIR, lambda_dir),
"-r",
requirements_file,
]
)
CfnResource(
self,
lambda_dir,
type="AWS::Serverless::Function",
properties={
"CodeUri": os.path.join(
LAMBDA_FUNCTION_WITH_EMBEDDED_SDK_DIR, lambda_dir
),
"Handler": "index.handler",
"Runtime": PYTHON_VERSION,
"Timeout": LAMBDA_FUNCTION_TIMEOUT,
"Environment": {
"Variables": {
"SENTRY_DSN": dsn,
}
},
},
)
print(
"[LocalLambdaStack] - Created Lambda function: %s (%s)"
% (
lambda_dir,
os.path.join(LAMBDA_FUNCTION_DIR, lambda_dir),
)
)
@classmethod
def wait_for_stack(cls, timeout=60, port=SAM_PORT):
"""
Wait for SAM to be ready, with timeout.
"""
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise TimeoutError(
"AWS SAM failed to start within %s seconds. (Maybe Docker is not running?)"
% timeout
)
try:
# Try to connect to SAM
response = requests.get(f"http://127.0.0.1:{port}/") # noqa: E231
if response.status_code == 200 or response.status_code == 404:
return
except requests.exceptions.ConnectionError:
time.sleep(1)
continue
| LocalLambdaStack |
python | openai__openai-python | src/openai/types/container_create_response.py | {
"start": 243,
"end": 495
} | class ____(BaseModel):
anchor: Optional[Literal["last_active_at"]] = None
"""The reference point for the expiration."""
minutes: Optional[int] = None
"""The number of minutes after the anchor before the container expires."""
| ExpiresAfter |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 29354,
"end": 30147
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, types.UnicodeType):
msg = 'argument must be a string literal'
raise errors.RequireLiteralValue(msg)
if isinstance(arg, types.StringLiteral):
return signature(types.float64, arg)
if arg not in types.number_domain:
raise errors.NumbaTypeError("float() only support for numbers")
if arg in types.complex_domain:
raise errors.NumbaTypeError("float() does not support complex")
if arg in types.integer_domain:
return signature(types.float64, arg)
elif arg in types.real_domain:
return signature(arg, arg)
@infer_global(complex)
| Float |
python | wandb__wandb | wandb/vendor/pygments/lexers/webmisc.py | {
"start": 33366,
"end": 36340
} | class ____(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
.. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
aliases = ['qml', 'qbs']
filenames = ['*.qml', '*.qbs']
mimetypes = ['application/x-qml', 'application/x-qt.qbs+qml']
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][\w.]*', Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][\w.]*\s*:', Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
| QmlLexer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.