language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 84877,
"end": 85981
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(
self,
name: str,
username: str,
jdbc_url: str,
password: Optional[str] = None,
schema: Optional[str] = None,
):
"""Airbyte Destination for Jdbc.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres
Args:
name (str): The name of the destination.
username (str): The username which is used to access the database.
password (Optional[str]): The password associated with this username.
jdbc_url (str): JDBC formatted url. See the standard here.
schema (Optional[str]): If you leave the schema unspecified, JDBC defaults to a schema named "public".
"""
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.jdbc_url = check.str_param(jdbc_url, "jdbc_url")
self.schema = check.opt_str_param(schema, "schema")
super().__init__("Jdbc", name)
| JdbcDestination |
python | wandb__wandb | wandb/apis/public/jobs.py | {
"start": 9517,
"end": 16946
} | class ____:
"""A single queued run associated with an entity and project.
Args:
entity: The entity associated with the queued run.
project (str): The project where runs executed by the queue are logged to.
queue_name (str): The name of the queue.
run_queue_item_id (int): The id of the run queue item.
project_queue (str): The project that manages the queue.
priority (str): The priority of the queued run.
Call `run = queued_run.wait_until_running()` or
`run = queued_run.wait_until_finished()` to access the run.
"""
def __init__(
self,
client,
entity,
project,
queue_name,
run_queue_item_id,
project_queue=LAUNCH_DEFAULT_PROJECT,
priority=None,
):
self.client = client
self._entity = entity
self._project = project
self._queue_name = queue_name
self._run_queue_item_id = run_queue_item_id
self.sweep = None
self._run = None
self.project_queue = project_queue
self.priority = priority
@property
def queue_name(self):
"""The name of the queue."""
return self._queue_name
@property
def id(self):
"""The id of the queued run."""
return self._run_queue_item_id
@property
def project(self):
"""The project associated with the queued run."""
return self._project
@property
def entity(self):
"""The entity associated with the queued run."""
return self._entity
@property
def state(self):
"""The state of the queued run."""
item = self._get_item()
if item:
return item["state"].lower()
raise ValueError(
f"Could not find QueuedRunItem associated with id: {self.id} on queue {self.queue_name} at itemId: {self.id}"
)
@normalize_exceptions
def _get_run_queue_item_legacy(self) -> dict:
query = gql(
"""
query GetRunQueueItem($projectName: String!, $entityName: String!, $runQueue: String!) {
project(name: $projectName, entityName: $entityName) {
runQueue(name:$runQueue) {
runQueueItems {
edges {
node {
id
state
associatedRunId
}
}
}
}
}
}
"""
)
variable_values = {
"projectName": self.project_queue,
"entityName": self._entity,
"runQueue": self.queue_name,
}
res = self.client.execute(query, variable_values)
for item in res["project"]["runQueue"]["runQueueItems"]["edges"]:
if str(item["node"]["id"]) == str(self.id):
return item["node"]
@normalize_exceptions
def _get_item(self):
query = gql(
"""
query GetRunQueueItem($projectName: String!, $entityName: String!, $runQueue: String!, $itemId: ID!) {
project(name: $projectName, entityName: $entityName) {
runQueue(name: $runQueue) {
runQueueItem(id: $itemId) {
id
state
associatedRunId
}
}
}
}
"""
)
variable_values = {
"projectName": self.project_queue,
"entityName": self._entity,
"runQueue": self.queue_name,
"itemId": self.id,
}
try:
res = self.client.execute(query, variable_values) # exception w/ old server
if res["project"]["runQueue"].get("runQueueItem") is not None:
return res["project"]["runQueue"]["runQueueItem"]
except Exception as e:
if "Cannot query field" not in str(e):
raise LaunchError(f"Unknown exception: {e}")
return self._get_run_queue_item_legacy()
@normalize_exceptions
def wait_until_finished(self):
"""Wait for the queued run to complete and return the finished run."""
if not self._run:
self.wait_until_running()
self._run.wait_until_finished()
# refetch run to get updated summary
self._run.load(force=True)
return self._run
@normalize_exceptions
def delete(self, delete_artifacts=False):
"""Delete the given queued run from the wandb backend."""
query = gql(
"""
query fetchRunQueuesFromProject($entityName: String!, $projectName: String!, $runQueueName: String!) {
project(name: $projectName, entityName: $entityName) {
runQueue(name: $runQueueName) {
id
}
}
}
"""
)
res = self.client.execute(
query,
variable_values={
"entityName": self.entity,
"projectName": self.project_queue,
"runQueueName": self.queue_name,
},
)
if res["project"].get("runQueue") is not None:
queue_id = res["project"]["runQueue"]["id"]
mutation = gql(
"""
mutation DeleteFromRunQueue(
$queueID: ID!,
$runQueueItemId: ID!
) {
deleteFromRunQueue(input: {
queueID: $queueID
runQueueItemId: $runQueueItemId
}) {
success
clientMutationId
}
}
"""
)
self.client.execute(
mutation,
variable_values={
"queueID": queue_id,
"runQueueItemId": self._run_queue_item_id,
},
)
@normalize_exceptions
def wait_until_running(self):
"""Wait until the queued run is running and return the run."""
if self._run is not None:
return self._run
while True:
# sleep here to hide an ugly warning
time.sleep(2)
item = self._get_item()
if item and item["associatedRunId"] is not None:
try:
self._run = public.Run(
self.client,
self._entity,
self.project,
item["associatedRunId"],
None,
)
self._run_id = item["associatedRunId"]
except ValueError as e:
wandb.termwarn(str(e))
else:
return self._run
elif item:
wandb.termlog("Waiting for run to start")
time.sleep(3)
def __repr__(self):
return f"<QueuedRun {self.queue_name} ({self.id})"
RunQueueResourceType = Literal[
"local-container", "local-process", "kubernetes", "sagemaker", "gcp-vertex"
]
RunQueueAccessType = Literal["project", "user"]
RunQueuePrioritizationMode = Literal["DISABLED", "V0"]
| QueuedRun |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 4140,
"end": 4376
} | class ____(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or
is below B's bounding box.
"""
lookup_name = "overlaps_below"
@BaseSpatialField.register_lookup
| OverlapsBelowLookup |
python | apache__airflow | providers/microsoft/psrp/src/airflow/providers/microsoft/psrp/hooks/psrp.py | {
"start": 1555,
"end": 10988
} | class ____(BaseHook):
"""
Hook for PowerShell Remoting Protocol execution.
When used as a context manager, the runspace pool is reused between shell
sessions.
:param psrp_conn_id: Required. The name of the PSRP connection.
:param logging_level:
Logging level for message streams which are received during remote execution.
The default is to include all messages in the task log.
:param operation_timeout: Override the default WSMan timeout when polling the pipeline.
:param runspace_options:
Optional dictionary which is passed when creating the runspace pool. See
:py:class:`~pypsrp.powershell.RunspacePool` for a description of the
available options.
:param wsman_options:
Optional dictionary which is passed when creating the `WSMan` client. See
:py:class:`~pypsrp.wsman.WSMan` for a description of the available options.
:param on_output_callback:
Optional callback function to be called whenever an output response item is
received during job status polling.
:param exchange_keys:
If true (default), automatically initiate a session key exchange when the
hook is used as a context manager.
:param host:
Optional PowerShell host instance. If this is not set, the default
implementation will be used.
You can provide an alternative `configuration_name` using either `runspace_options`
or by setting this key as the extra fields of your connection.
"""
conn_name_attr = "psrp_conn_id"
default_conn_name = "psrp_default"
conn_type = "psrp"
hook_name = "PowerShell Remoting Protocol"
_conn: RunspacePool | None = None
_wsman_ref: WeakKeyDictionary[RunspacePool, WSMan] = WeakKeyDictionary()
def __init__(
self,
psrp_conn_id: str,
logging_level: int = DEBUG,
operation_timeout: int | None = None,
runspace_options: dict[str, Any] | None = None,
wsman_options: dict[str, Any] | None = None,
on_output_callback: OutputCallback | None = None,
exchange_keys: bool = True,
host: PSHost | None = None,
):
self.conn_id = psrp_conn_id
self._logging_level = logging_level
self._operation_timeout = operation_timeout
self._runspace_options = runspace_options or {}
self._wsman_options = wsman_options or {}
self._on_output_callback = on_output_callback
self._exchange_keys = exchange_keys
self._host = host or PSHost(None, None, False, type(self).__name__, None, None, "1.0")
def __enter__(self):
conn = self.get_conn()
self._wsman_ref[conn].__enter__()
conn.__enter__()
if self._exchange_keys:
conn.exchange_keys()
self._conn = conn
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self._conn.__exit__(exc_type, exc_value, traceback)
self._wsman_ref[self._conn].__exit__(exc_type, exc_value, traceback)
finally:
del self._conn
def get_conn(self) -> RunspacePool:
"""
Return a runspace pool.
The returned object must be used as a context manager.
"""
conn = self.get_connection(self.conn_id)
self.log.info("Establishing WinRM connection %s to host: %s", self.conn_id, conn.host)
extra = conn.extra_dejson.copy()
def apply_extra(d, keys):
d = d.copy()
for key in keys:
value = extra.pop(key, None)
if value is not None:
d[key] = value
return d
wsman_options = apply_extra(
self._wsman_options,
(
"auth",
"cert_validation",
"connection_timeout",
"locale",
"read_timeout",
"reconnection_retries",
"reconnection_backoff",
"ssl",
),
)
conn.host = cast("str", conn.host)
wsman = WSMan(conn.host, username=conn.login, password=conn.password, **wsman_options)
runspace_options = apply_extra(self._runspace_options, ("configuration_name",))
if extra:
raise AirflowException(f"Unexpected extra configuration keys: {', '.join(sorted(extra))}")
pool = RunspacePool(wsman, host=self._host, **runspace_options)
self._wsman_ref[pool] = wsman
return pool
@contextmanager
def invoke(self) -> Generator[PowerShell, None, None]:
"""
Yield a PowerShell object to which commands can be added.
Upon exit, the commands will be invoked.
"""
logger = self.log
# Compat: Airflow 3.1 use structlog, and doesn't have individual per-logger level
if hasattr(logger, "setLevel"):
logger.setLevel(self._logging_level)
elif not logger.is_enabled_for(self._logging_level):
from airflow.sdk.log import logger_at_level
logger = logger_at_level(logger.name, self._logging_level)
local_context = self._conn is None
if local_context:
self.__enter__()
try:
if TYPE_CHECKING:
assert self._conn is not None
ps = PowerShell(self._conn)
yield ps
ps.begin_invoke()
streams = [
ps.output,
ps.streams.debug,
ps.streams.error,
ps.streams.information,
ps.streams.progress,
ps.streams.verbose,
ps.streams.warning,
]
offsets = [0 for _ in streams]
# We're using polling to make sure output and streams are
# handled while the process is running.
while ps.state == PSInvocationState.RUNNING:
ps.poll_invoke(timeout=self._operation_timeout)
for i, stream in enumerate(streams):
offset = offsets[i]
while len(stream) > offset:
record = stream[offset]
# Records received on the output stream during job
# status polling are handled via an optional callback,
# while the other streams are simply logged.
if stream is ps.output:
if self._on_output_callback is not None:
self._on_output_callback(record)
else:
self._log_record(logger.log, record)
offset += 1
offsets[i] = offset
# For good measure, we'll make sure the process has
# stopped running in any case.
ps.end_invoke()
self.log.info("Invocation state: %s", str(PSInvocationState(ps.state)))
if ps.streams.error:
raise AirflowException("Process had one or more errors")
finally:
if local_context:
self.__exit__(None, None, None)
def invoke_cmdlet(
self,
name: str,
use_local_scope: bool | None = None,
arguments: list[str] | None = None,
parameters: dict[str, str] | None = None,
) -> PowerShell:
"""Invoke a PowerShell cmdlet and return session."""
with self.invoke() as ps:
ps.add_cmdlet(name, use_local_scope=use_local_scope)
for argument in arguments or ():
ps.add_argument(argument)
if parameters:
ps.add_parameters(parameters)
return ps
def invoke_powershell(self, script: str) -> PowerShell:
"""Invoke a PowerShell script and return session."""
with self.invoke() as ps:
ps.add_script(script)
return ps
def _log_record(self, log, record):
message_type = record.MESSAGE_TYPE
if message_type == MessageType.ERROR_RECORD:
log(INFO, "%s: %s", record.reason, record)
if record.script_stacktrace:
for trace in record.script_stacktrace.splitlines():
log(INFO, trace)
level = INFORMATIONAL_RECORD_LEVEL_MAP.get(message_type)
if level is not None:
try:
message = str(record.message)
except BaseException as exc:
# See https://github.com/jborean93/pypsrp/pull/130
message = str(exc)
# Sometimes a message will have a trailing \r\n sequence such as
# the tracing output of the Set-PSDebug cmdlet.
message = message.rstrip()
if record.command_name is None:
log(level, "%s", message)
else:
log(level, "%s: %s", record.command_name, message)
elif message_type == MessageType.INFORMATION_RECORD:
log(INFO, "%s (%s): %s", record.computer, record.user, record.message_data)
elif message_type == MessageType.PROGRESS_RECORD:
log(INFO, "Progress: %s (%s)", record.activity, record.description)
else:
log(WARNING, "Unsupported message type: %s", message_type)
def test_connection(self):
"""Test PSRP Connection."""
with PsrpHook(psrp_conn_id=self.conn_id):
pass
| PsrpHook |
python | apache__airflow | airflow-core/src/airflow/cli/commands/config_command.py | {
"start": 2922,
"end": 3052
} | class ____(NamedTuple):
"""Represents a configuration parameter."""
section: str
option: str
@dataclass
| ConfigParameter |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 10366,
"end": 11521
} | class ____(Benchmark):
r"""
Cosine Mixture objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CosineMixture}}(x) = -0.1 \sum_{i=1}^n \cos(5 \pi x_i)
+ \sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., N`.
*Global optimum*: :math:`f(x) = -0.1N` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Ali, M.M, Khompatraporn, C. , Zabinski, B. A Numerical Evaluation
of Several Stochastic Algorithms on Selected Continuous Global
Optimization Test Problems, Journal of Global Optimization, 2005, 31, 635
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-1.0, 1.0)] * self.N
self.global_optimum = [[0. for _ in range(self.N)]]
self.fglob = -0.1 * self.N
def fun(self, x, *args):
self.nfev += 1
return -0.1 * sum(cos(5.0 * pi * x)) + sum(x ** 2.0)
| CosineMixture |
python | tensorflow__tensorflow | tensorflow/python/ops/io_ops.py | {
"start": 20054,
"end": 20907
} | class ____(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. Read will take the front
work string and output (work, work).
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.map(...)`.")
def __init__(self, name=None):
"""Create a IdentityReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops.identity_reader_v2(name=name)
super(IdentityReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("IdentityReader")
| IdentityReader |
python | wandb__wandb | wandb/sdk/wandb_alerts.py | {
"start": 113,
"end": 193
} | class ____(Enum):
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
| AlertLevel |
python | pymupdf__PyMuPDF | src/table.py | {
"start": 19529,
"end": 49160
} | class ____:
def __init__(
self,
x_tolerance=DEFAULT_X_TOLERANCE,
y_tolerance=DEFAULT_Y_TOLERANCE,
keep_blank_chars: bool = False,
use_text_flow=False,
horizontal_ltr=True, # Should words be read left-to-right?
vertical_ttb=False, # Should vertical words be read top-to-bottom?
extra_attrs=None,
split_at_punctuation=False,
expand_ligatures=True,
):
self.x_tolerance = x_tolerance
self.y_tolerance = y_tolerance
self.keep_blank_chars = keep_blank_chars
self.use_text_flow = use_text_flow
self.horizontal_ltr = horizontal_ltr
self.vertical_ttb = vertical_ttb
self.extra_attrs = [] if extra_attrs is None else extra_attrs
# Note: string.punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
self.split_at_punctuation = (
string.punctuation
if split_at_punctuation is True
else (split_at_punctuation or "")
)
self.expansions = LIGATURES if expand_ligatures else {}
def merge_chars(self, ordered_chars: list):
x0, top, x1, bottom = objects_to_bbox(ordered_chars)
doctop_adj = ordered_chars[0]["doctop"] - ordered_chars[0]["top"]
upright = ordered_chars[0]["upright"]
direction = 1 if (self.horizontal_ltr if upright else self.vertical_ttb) else -1
matrix = ordered_chars[0]["matrix"]
rotation = 0
if not upright and matrix[1] < 0:
ordered_chars = reversed(ordered_chars)
rotation = 270
if matrix[0] < 0 and matrix[3] < 0:
rotation = 180
elif matrix[1] > 0:
rotation = 90
word = {
"text": "".join(
self.expansions.get(c["text"], c["text"]) for c in ordered_chars
),
"x0": x0,
"x1": x1,
"top": top,
"doctop": top + doctop_adj,
"bottom": bottom,
"upright": upright,
"direction": direction,
"rotation": rotation,
}
for key in self.extra_attrs:
word[key] = ordered_chars[0][key]
return word
def char_begins_new_word(
self,
prev_char,
curr_char,
) -> bool:
"""This method takes several factors into account to determine if
`curr_char` represents the beginning of a new word:
- Whether the text is "upright" (i.e., non-rotated)
- Whether the user has specified that horizontal text runs
left-to-right (default) or right-to-left, as represented by
self.horizontal_ltr
- Whether the user has specified that vertical text the text runs
top-to-bottom (default) or bottom-to-top, as represented by
self.vertical_ttb
- The x0, top, x1, and bottom attributes of prev_char and
curr_char
- The self.x_tolerance and self.y_tolerance settings. Note: In
this case, x/y refer to those directions for non-rotated text.
For vertical text, they are flipped. A more accurate terminology
might be "*intra*line character distance tolerance" and
"*inter*line character distance tolerance"
An important note: The *intra*line distance is measured from the
*end* of the previous character to the *beginning* of the current
character, while the *inter*line distance is measured from the
*top* of the previous character to the *top* of the next
character. The reasons for this are partly repository-historical,
and partly logical, as successive text lines' bounding boxes often
overlap slightly (and we don't want that overlap to be interpreted
as the two lines being the same line).
The upright-ness of the character determines the attributes to
compare, while horizontal_ltr/vertical_ttb determine the direction
of the comparison.
"""
# Note: Due to the grouping step earlier in the process,
# curr_char["upright"] will always equal prev_char["upright"].
if curr_char["upright"]:
x = self.x_tolerance
y = self.y_tolerance
ay = prev_char["top"]
cy = curr_char["top"]
if self.horizontal_ltr:
ax = prev_char["x0"]
bx = prev_char["x1"]
cx = curr_char["x0"]
else:
ax = -prev_char["x1"]
bx = -prev_char["x0"]
cx = -curr_char["x1"]
else:
x = self.y_tolerance
y = self.x_tolerance
ay = prev_char["x0"]
cy = curr_char["x0"]
if self.vertical_ttb:
ax = prev_char["top"]
bx = prev_char["bottom"]
cx = curr_char["top"]
else:
ax = -prev_char["bottom"]
bx = -prev_char["top"]
cx = -curr_char["bottom"]
return bool(
# Intraline test
(cx < ax)
or (cx > bx + x)
# Interline test
or (cy > ay + y)
)
def iter_chars_to_words(self, ordered_chars):
current_word: list = []
def start_next_word(new_char=None):
nonlocal current_word
if current_word:
yield current_word
current_word = [] if new_char is None else [new_char]
for char in ordered_chars:
text = char["text"]
if not self.keep_blank_chars and text.isspace():
yield from start_next_word(None)
elif text in self.split_at_punctuation:
yield from start_next_word(char)
yield from start_next_word(None)
elif current_word and self.char_begins_new_word(current_word[-1], char):
yield from start_next_word(char)
else:
current_word.append(char)
# Finally, after all chars processed
if current_word:
yield current_word
def iter_sort_chars(self, chars):
def upright_key(x) -> int:
return -int(x["upright"])
for upright_cluster in cluster_objects(list(chars), upright_key, 0):
upright = upright_cluster[0]["upright"]
cluster_key = "doctop" if upright else "x0"
# Cluster by line
subclusters = cluster_objects(
upright_cluster, itemgetter(cluster_key), self.y_tolerance
)
for sc in subclusters:
# Sort within line
sort_key = "x0" if upright else "doctop"
to_yield = sorted(sc, key=itemgetter(sort_key))
# Reverse order if necessary
if not (self.horizontal_ltr if upright else self.vertical_ttb):
yield from reversed(to_yield)
else:
yield from to_yield
def iter_extract_tuples(self, chars):
ordered_chars = chars if self.use_text_flow else self.iter_sort_chars(chars)
grouping_key = itemgetter("upright", *self.extra_attrs)
grouped_chars = itertools.groupby(ordered_chars, grouping_key)
for keyvals, char_group in grouped_chars:
for word_chars in self.iter_chars_to_words(char_group):
yield (self.merge_chars(word_chars), word_chars)
def extract_wordmap(self, chars) -> WordMap:
return WordMap(list(self.iter_extract_tuples(chars)))
def extract_words(self, chars: list) -> list:
words = list(word for word, word_chars in self.iter_extract_tuples(chars))
return words
def extract_words(chars: list, **kwargs) -> list:
return WordExtractor(**kwargs).extract_words(chars)
TEXTMAP_KWARGS = inspect.signature(WordMap.to_textmap).parameters.keys()
WORD_EXTRACTOR_KWARGS = inspect.signature(WordExtractor).parameters.keys()
def chars_to_textmap(chars: list, **kwargs) -> TextMap:
kwargs.update({"presorted": True})
extractor = WordExtractor(
**{k: kwargs[k] for k in WORD_EXTRACTOR_KWARGS if k in kwargs}
)
wordmap = extractor.extract_wordmap(chars)
textmap = wordmap.to_textmap(
**{k: kwargs[k] for k in TEXTMAP_KWARGS if k in kwargs}
)
return textmap
def extract_text(chars: list, **kwargs) -> str:
chars = to_list(chars)
if len(chars) == 0:
return ""
if kwargs.get("layout"):
return chars_to_textmap(chars, **kwargs).as_string
else:
y_tolerance = kwargs.get("y_tolerance", DEFAULT_Y_TOLERANCE)
extractor = WordExtractor(
**{k: kwargs[k] for k in WORD_EXTRACTOR_KWARGS if k in kwargs}
)
words = extractor.extract_words(chars)
if words:
rotation = words[0]["rotation"] # rotation cannot change within a cell
else:
rotation = 0
if rotation == 90:
words.sort(key=lambda w: (w["x1"], -w["top"]))
lines = " ".join([w["text"] for w in words])
elif rotation == 270:
words.sort(key=lambda w: (-w["x1"], w["top"]))
lines = " ".join([w["text"] for w in words])
else:
lines = cluster_objects(words, itemgetter("doctop"), y_tolerance)
lines = "\n".join(" ".join(word["text"] for word in line) for line in lines)
if rotation == 180: # needs extra treatment
lines = "".join([(c if c != "\n" else " ") for c in reversed(lines)])
return lines
def collate_line(
line_chars: list,
tolerance=DEFAULT_X_TOLERANCE,
) -> str:
coll = ""
last_x1 = None
for char in sorted(line_chars, key=itemgetter("x0")):
if (last_x1 is not None) and (char["x0"] > (last_x1 + tolerance)):
coll += " "
last_x1 = char["x1"]
coll += char["text"]
return coll
def dedupe_chars(chars: list, tolerance=1) -> list:
"""
Removes duplicate chars — those sharing the same text, fontname, size,
and positioning (within `tolerance`) as other characters in the set.
"""
key = itemgetter("fontname", "size", "upright", "text")
pos_key = itemgetter("doctop", "x0")
def yield_unique_chars(chars: list):
sorted_chars = sorted(chars, key=key)
for grp, grp_chars in itertools.groupby(sorted_chars, key=key):
for y_cluster in cluster_objects(
list(grp_chars), itemgetter("doctop"), tolerance
):
for x_cluster in cluster_objects(
y_cluster, itemgetter("x0"), tolerance
):
yield sorted(x_cluster, key=pos_key)[0]
deduped = yield_unique_chars(chars)
return sorted(deduped, key=chars.index)
def line_to_edge(line):
edge = dict(line)
edge["orientation"] = "h" if (line["top"] == line["bottom"]) else "v"
return edge
def rect_to_edges(rect) -> list:
top, bottom, left, right = [dict(rect) for x in range(4)]
top.update(
{
"object_type": "rect_edge",
"height": 0,
"y0": rect["y1"],
"bottom": rect["top"],
"orientation": "h",
}
)
bottom.update(
{
"object_type": "rect_edge",
"height": 0,
"y1": rect["y0"],
"top": rect["top"] + rect["height"],
"doctop": rect["doctop"] + rect["height"],
"orientation": "h",
}
)
left.update(
{
"object_type": "rect_edge",
"width": 0,
"x1": rect["x0"],
"orientation": "v",
}
)
right.update(
{
"object_type": "rect_edge",
"width": 0,
"x0": rect["x1"],
"orientation": "v",
}
)
return [top, bottom, left, right]
def curve_to_edges(curve) -> list:
point_pairs = zip(curve["pts"], curve["pts"][1:])
return [
{
"object_type": "curve_edge",
"x0": min(p0[0], p1[0]),
"x1": max(p0[0], p1[0]),
"top": min(p0[1], p1[1]),
"doctop": min(p0[1], p1[1]) + (curve["doctop"] - curve["top"]),
"bottom": max(p0[1], p1[1]),
"width": abs(p0[0] - p1[0]),
"height": abs(p0[1] - p1[1]),
"orientation": "v" if p0[0] == p1[0] else ("h" if p0[1] == p1[1] else None),
}
for p0, p1 in point_pairs
]
def obj_to_edges(obj) -> list:
t = obj["object_type"]
if "_edge" in t:
return [obj]
elif t == "line":
return [line_to_edge(obj)]
else:
return {"rect": rect_to_edges, "curve": curve_to_edges}[t](obj)
def filter_edges(
edges,
orientation=None,
edge_type=None,
min_length=1,
) -> list:
if orientation not in ("v", "h", None):
raise ValueError("Orientation must be 'v' or 'h'")
def test(e) -> bool:
dim = "height" if e["orientation"] == "v" else "width"
et_correct = e["object_type"] == edge_type if edge_type is not None else True
orient_correct = orientation is None or e["orientation"] == orientation
return bool(et_correct and orient_correct and (e[dim] >= min_length))
return list(filter(test, edges))
def cluster_list(xs, tolerance=0) -> list:
if tolerance == 0:
return [[x] for x in sorted(xs)]
if len(xs) < 2:
return [[x] for x in sorted(xs)]
groups = []
xs = list(sorted(xs))
current_group = [xs[0]]
last = xs[0]
for x in xs[1:]:
if x <= (last + tolerance):
current_group.append(x)
else:
groups.append(current_group)
current_group = [x]
last = x
groups.append(current_group)
return groups
def make_cluster_dict(values, tolerance) -> dict:
clusters = cluster_list(list(set(values)), tolerance)
nested_tuples = [
[(val, i) for val in value_cluster] for i, value_cluster in enumerate(clusters)
]
return dict(itertools.chain(*nested_tuples))
def cluster_objects(xs, key_fn, tolerance) -> list:
if not callable(key_fn):
key_fn = itemgetter(key_fn)
values = map(key_fn, xs)
cluster_dict = make_cluster_dict(values, tolerance)
get_0, get_1 = itemgetter(0), itemgetter(1)
cluster_tuples = sorted(((x, cluster_dict.get(key_fn(x))) for x in xs), key=get_1)
grouped = itertools.groupby(cluster_tuples, key=get_1)
return [list(map(get_0, v)) for k, v in grouped]
def move_object(obj, axis: str, value):
assert axis in ("h", "v")
if axis == "h":
new_items = [
("x0", obj["x0"] + value),
("x1", obj["x1"] + value),
]
if axis == "v":
new_items = [
("top", obj["top"] + value),
("bottom", obj["bottom"] + value),
]
if "doctop" in obj:
new_items += [("doctop", obj["doctop"] + value)]
if "y0" in obj:
new_items += [
("y0", obj["y0"] - value),
("y1", obj["y1"] - value),
]
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def snap_objects(objs, attr: str, tolerance) -> list:
axis = {"x0": "h", "x1": "h", "top": "v", "bottom": "v"}[attr]
list_objs = list(objs)
clusters = cluster_objects(list_objs, itemgetter(attr), tolerance)
avgs = [sum(map(itemgetter(attr), cluster)) / len(cluster) for cluster in clusters]
snapped_clusters = [
[move_object(obj, axis, avg - obj[attr]) for obj in cluster]
for cluster, avg in zip(clusters, avgs)
]
return list(itertools.chain(*snapped_clusters))
def snap_edges(
edges,
x_tolerance=DEFAULT_SNAP_TOLERANCE,
y_tolerance=DEFAULT_SNAP_TOLERANCE,
):
"""
Given a list of edges, snap any within `tolerance` pixels of one another
to their positional average.
"""
by_orientation = {"v": [], "h": []}
for e in edges:
by_orientation[e["orientation"]].append(e)
snapped_v = snap_objects(by_orientation["v"], "x0", x_tolerance)
snapped_h = snap_objects(by_orientation["h"], "top", y_tolerance)
return snapped_v + snapped_h
def resize_object(obj, key: str, value):
assert key in ("x0", "x1", "top", "bottom")
old_value = obj[key]
diff = value - old_value
new_items = [
(key, value),
]
if key == "x0":
assert value <= obj["x1"]
new_items.append(("width", obj["x1"] - value))
elif key == "x1":
assert value >= obj["x0"]
new_items.append(("width", value - obj["x0"]))
elif key == "top":
assert value <= obj["bottom"]
new_items.append(("doctop", obj["doctop"] + diff))
new_items.append(("height", obj["height"] - diff))
if "y1" in obj:
new_items.append(("y1", obj["y1"] - diff))
elif key == "bottom":
assert value >= obj["top"]
new_items.append(("height", obj["height"] + diff))
if "y0" in obj:
new_items.append(("y0", obj["y0"] - diff))
return obj.__class__(tuple(obj.items()) + tuple(new_items))
def join_edge_group(edges, orientation: str, tolerance=DEFAULT_JOIN_TOLERANCE):
"""
Given a list of edges along the same infinite line, join those that
are within `tolerance` pixels of one another.
"""
if orientation == "h":
min_prop, max_prop = "x0", "x1"
elif orientation == "v":
min_prop, max_prop = "top", "bottom"
else:
raise ValueError("Orientation must be 'v' or 'h'")
sorted_edges = list(sorted(edges, key=itemgetter(min_prop)))
joined = [sorted_edges[0]]
for e in sorted_edges[1:]:
last = joined[-1]
if e[min_prop] <= (last[max_prop] + tolerance):
if e[max_prop] > last[max_prop]:
# Extend current edge to new extremity
joined[-1] = resize_object(last, max_prop, e[max_prop])
else:
# Edge is separate from previous edges
joined.append(e)
return joined
def merge_edges(
edges,
snap_x_tolerance,
snap_y_tolerance,
join_x_tolerance,
join_y_tolerance,
):
"""
Using the `snap_edges` and `join_edge_group` methods above,
merge a list of edges into a more "seamless" list.
"""
def get_group(edge):
if edge["orientation"] == "h":
return ("h", edge["top"])
else:
return ("v", edge["x0"])
if snap_x_tolerance > 0 or snap_y_tolerance > 0:
edges = snap_edges(edges, snap_x_tolerance, snap_y_tolerance)
_sorted = sorted(edges, key=get_group)
edge_groups = itertools.groupby(_sorted, key=get_group)
edge_gen = (
join_edge_group(
items, k[0], (join_x_tolerance if k[0] == "h" else join_y_tolerance)
)
for k, items in edge_groups
)
edges = list(itertools.chain(*edge_gen))
return edges
def bbox_to_rect(bbox) -> dict:
"""
Return the rectangle (i.e a dict with keys "x0", "top", "x1",
"bottom") for an object.
"""
return {"x0": bbox[0], "top": bbox[1], "x1": bbox[2], "bottom": bbox[3]}
def objects_to_rect(objects) -> dict:
"""
Given an iterable of objects, return the smallest rectangle (i.e. a
dict with "x0", "top", "x1", and "bottom" keys) that contains them
all.
"""
return bbox_to_rect(objects_to_bbox(objects))
def merge_bboxes(bboxes):
"""
Given an iterable of bounding boxes, return the smallest bounding box
that contains them all.
"""
x0, top, x1, bottom = zip(*bboxes)
return (min(x0), min(top), max(x1), max(bottom))
def objects_to_bbox(objects):
"""
Given an iterable of objects, return the smallest bounding box that
contains them all.
"""
return merge_bboxes(map(bbox_getter, objects))
def words_to_edges_h(words, word_threshold: int = DEFAULT_MIN_WORDS_HORIZONTAL):
"""
Find (imaginary) horizontal lines that connect the tops
of at least `word_threshold` words.
"""
by_top = cluster_objects(words, itemgetter("top"), 1)
large_clusters = filter(lambda x: len(x) >= word_threshold, by_top)
rects = list(map(objects_to_rect, large_clusters))
if len(rects) == 0:
return []
min_x0 = min(map(itemgetter("x0"), rects))
max_x1 = max(map(itemgetter("x1"), rects))
edges = []
for r in rects:
edges += [
# Top of text
{
"x0": min_x0,
"x1": max_x1,
"top": r["top"],
"bottom": r["top"],
"width": max_x1 - min_x0,
"orientation": "h",
},
# For each detected row, we also add the 'bottom' line. This will
# generate extra edges, (some will be redundant with the next row
# 'top' line), but this catches the last row of every table.
{
"x0": min_x0,
"x1": max_x1,
"top": r["bottom"],
"bottom": r["bottom"],
"width": max_x1 - min_x0,
"orientation": "h",
},
]
return edges
def get_bbox_overlap(a, b):
a_left, a_top, a_right, a_bottom = a
b_left, b_top, b_right, b_bottom = b
o_left = max(a_left, b_left)
o_right = min(a_right, b_right)
o_bottom = min(a_bottom, b_bottom)
o_top = max(a_top, b_top)
o_width = o_right - o_left
o_height = o_bottom - o_top
if o_height >= 0 and o_width >= 0 and o_height + o_width > 0:
return (o_left, o_top, o_right, o_bottom)
else:
return None
def words_to_edges_v(words, word_threshold: int = DEFAULT_MIN_WORDS_VERTICAL):
"""
Find (imaginary) vertical lines that connect the left, right, or
center of at least `word_threshold` words.
"""
# Find words that share the same left, right, or centerpoints
by_x0 = cluster_objects(words, itemgetter("x0"), 1)
by_x1 = cluster_objects(words, itemgetter("x1"), 1)
def get_center(word):
return float(word["x0"] + word["x1"]) / 2
by_center = cluster_objects(words, get_center, 1)
clusters = by_x0 + by_x1 + by_center
# Find the points that align with the most words
sorted_clusters = sorted(clusters, key=lambda x: -len(x))
large_clusters = filter(lambda x: len(x) >= word_threshold, sorted_clusters)
# For each of those points, find the bboxes fitting all matching words
bboxes = list(map(objects_to_bbox, large_clusters))
# Iterate through those bboxes, condensing overlapping bboxes
condensed_bboxes = []
for bbox in bboxes:
overlap = any(get_bbox_overlap(bbox, c) for c in condensed_bboxes)
if not overlap:
condensed_bboxes.append(bbox)
if not condensed_bboxes:
return []
condensed_rects = map(bbox_to_rect, condensed_bboxes)
sorted_rects = list(sorted(condensed_rects, key=itemgetter("x0")))
max_x1 = max(map(itemgetter("x1"), sorted_rects))
min_top = min(map(itemgetter("top"), sorted_rects))
max_bottom = max(map(itemgetter("bottom"), sorted_rects))
return [
{
"x0": b["x0"],
"x1": b["x0"],
"top": min_top,
"bottom": max_bottom,
"height": max_bottom - min_top,
"orientation": "v",
}
for b in sorted_rects
] + [
{
"x0": max_x1,
"x1": max_x1,
"top": min_top,
"bottom": max_bottom,
"height": max_bottom - min_top,
"orientation": "v",
}
]
def edges_to_intersections(edges, x_tolerance=1, y_tolerance=1) -> dict:
"""
Given a list of edges, return the points at which they intersect
within `tolerance` pixels.
"""
intersections = {}
v_edges, h_edges = [
list(filter(lambda x: x["orientation"] == o, edges)) for o in ("v", "h")
]
for v in sorted(v_edges, key=itemgetter("x0", "top")):
for h in sorted(h_edges, key=itemgetter("top", "x0")):
if (
(v["top"] <= (h["top"] + y_tolerance))
and (v["bottom"] >= (h["top"] - y_tolerance))
and (v["x0"] >= (h["x0"] - x_tolerance))
and (v["x0"] <= (h["x1"] + x_tolerance))
):
vertex = (v["x0"], h["top"])
if vertex not in intersections:
intersections[vertex] = {"v": [], "h": []}
intersections[vertex]["v"].append(v)
intersections[vertex]["h"].append(h)
return intersections
def obj_to_bbox(obj):
"""
Return the bounding box for an object.
"""
return bbox_getter(obj)
def intersections_to_cells(intersections):
"""
Given a list of points (`intersections`), return all rectangular "cells"
that those points describe.
`intersections` should be a dictionary with (x0, top) tuples as keys,
and a list of edge objects as values. The edge objects should correspond
to the edges that touch the intersection.
"""
def edge_connects(p1, p2) -> bool:
def edges_to_set(edges):
return set(map(obj_to_bbox, edges))
if p1[0] == p2[0]:
common = edges_to_set(intersections[p1]["v"]).intersection(
edges_to_set(intersections[p2]["v"])
)
if len(common):
return True
if p1[1] == p2[1]:
common = edges_to_set(intersections[p1]["h"]).intersection(
edges_to_set(intersections[p2]["h"])
)
if len(common):
return True
return False
points = list(sorted(intersections.keys()))
n_points = len(points)
def find_smallest_cell(points, i: int):
if i == n_points - 1:
return None
pt = points[i]
rest = points[i + 1 :]
# Get all the points directly below and directly right
below = [x for x in rest if x[0] == pt[0]]
right = [x for x in rest if x[1] == pt[1]]
for below_pt in below:
if not edge_connects(pt, below_pt):
continue
for right_pt in right:
if not edge_connects(pt, right_pt):
continue
bottom_right = (right_pt[0], below_pt[1])
if (
(bottom_right in intersections)
and edge_connects(bottom_right, right_pt)
and edge_connects(bottom_right, below_pt)
):
return (pt[0], pt[1], bottom_right[0], bottom_right[1])
return None
cell_gen = (find_smallest_cell(points, i) for i in range(len(points)))
return list(filter(None, cell_gen))
def cells_to_tables(page, cells) -> list:
"""
Given a list of bounding boxes (`cells`), return a list of tables that
hold those cells most simply (and contiguously).
"""
def bbox_to_corners(bbox) -> tuple:
x0, top, x1, bottom = bbox
return ((x0, top), (x0, bottom), (x1, top), (x1, bottom))
remaining_cells = list(cells)
# Iterate through the cells found above, and assign them
# to contiguous tables
current_corners = set()
current_cells = []
tables = []
while len(remaining_cells):
initial_cell_count = len(current_cells)
for cell in list(remaining_cells):
cell_corners = bbox_to_corners(cell)
# If we're just starting a table ...
if len(current_cells) == 0:
# ... immediately assign it to the empty group
current_corners |= set(cell_corners)
current_cells.append(cell)
remaining_cells.remove(cell)
else:
# How many corners does this table share with the current group?
corner_count = sum(c in current_corners for c in cell_corners)
# If touching on at least one corner...
if corner_count > 0:
# ... assign it to the current group
current_corners |= set(cell_corners)
current_cells.append(cell)
remaining_cells.remove(cell)
# If this iteration did not find any more cells to append...
if len(current_cells) == initial_cell_count:
# ... start a new cell group
tables.append(list(current_cells))
current_corners.clear()
current_cells.clear()
# Once we have exhausting the list of cells ...
# ... and we have a cell group that has not been stored
if len(current_cells):
# ... store it.
tables.append(list(current_cells))
# PyMuPDF modification:
# Remove tables without text or having only 1 column
for i in range(len(tables) - 1, -1, -1):
r = pymupdf.EMPTY_RECT()
x1_vals = set()
x0_vals = set()
for c in tables[i]:
r |= c
x1_vals.add(c[2])
x0_vals.add(c[0])
if (
len(x1_vals) < 2
or len(x0_vals) < 2
or white_spaces.issuperset(
page.get_textbox(
r,
textpage=TEXTPAGE,
)
)
):
del tables[i]
# Sort the tables top-to-bottom-left-to-right based on the value of the
# topmost-and-then-leftmost coordinate of a table.
_sorted = sorted(tables, key=lambda t: min((c[1], c[0]) for c in t))
return _sorted
| WordExtractor |
python | Textualize__textual | tests/text_area/test_selection_bindings.py | {
"start": 316,
"end": 11197
} | class ____(App):
def __init__(self, read_only: bool = False):
super().__init__()
self.read_only = read_only
def compose(self) -> ComposeResult:
yield TextArea(TEXT, show_line_numbers=True, read_only=self.read_only)
@pytest.fixture(params=[True, False])
async def app(request):
"""Each test that receives an `app` will execute twice.
Once with read_only=True, and once with read_only=False.
"""
return TextAreaApp(read_only=request.param)
async def test_mouse_click(app: TextAreaApp):
"""When you click the TextArea, the cursor moves to the expected location."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
await pilot.click(TextArea, Offset(x=5, y=2))
assert text_area.selection == Selection.cursor((1, 0))
async def test_mouse_click_clamp_from_right(app: TextAreaApp):
"""When you click to the right of the document bounds, the cursor is clamped
to within the document bounds."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
await pilot.click(TextArea, Offset(x=8, y=20))
assert text_area.selection == Selection.cursor((4, 0))
async def test_mouse_click_gutter_clamp(app: TextAreaApp):
"""When you click the gutter, it selects the start of the line."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
await pilot.click(TextArea, Offset(x=0, y=3))
assert text_area.selection == Selection.cursor((2, 0))
async def test_cursor_movement_basic():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("01234567\n012345\n0123456789")
await pilot.press("right")
assert text_area.selection == Selection.cursor((0, 1))
await pilot.press("down")
assert text_area.selection == Selection.cursor((1, 1))
await pilot.press("left")
assert text_area.selection == Selection.cursor((1, 0))
await pilot.press("up")
assert text_area.selection == Selection.cursor((0, 0))
async def test_cursor_selection_right(app: TextAreaApp):
"""When you press shift+right the selection is updated correctly."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
await pilot.press(*["shift+right"] * 3)
assert text_area.selection == Selection((0, 0), (0, 3))
async def test_cursor_selection_right_to_previous_line(app: TextAreaApp):
"""When you press shift+right resulting in the cursor moving to the next line,
the selection is updated correctly."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.selection = Selection.cursor((0, 15))
await pilot.press(*["shift+right"] * 4)
assert text_area.selection == Selection((0, 15), (1, 2))
async def test_cursor_selection_left(app: TextAreaApp):
"""When you press shift+left the selection is updated correctly."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.selection = Selection.cursor((2, 5))
await pilot.press(*["shift+left"] * 3)
assert text_area.selection == Selection((2, 5), (2, 2))
async def test_cursor_selection_left_to_previous_line(app: TextAreaApp):
"""When you press shift+left resulting in the cursor moving back to the previous line,
the selection is updated correctly."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.selection = Selection.cursor((2, 2))
await pilot.press(*["shift+left"] * 3)
# The cursor jumps up to the end of the line above.
end_of_previous_line = len(TEXT.splitlines()[1])
assert text_area.selection == Selection((2, 2), (1, end_of_previous_line))
async def test_cursor_selection_up(app: TextAreaApp):
"""When you press shift+up the selection is updated correctly."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.move_cursor((2, 3))
await pilot.press("shift+up")
assert text_area.selection == Selection((2, 3), (1, 3))
async def test_cursor_selection_up_when_cursor_on_first_line(app: TextAreaApp):
"""When you press shift+up the on the first line, it selects to the start."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.move_cursor((0, 4))
await pilot.press("shift+up")
assert text_area.selection == Selection((0, 4), (0, 0))
await pilot.press("shift+up")
assert text_area.selection == Selection((0, 4), (0, 0))
async def test_cursor_selection_down(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.move_cursor((2, 5))
await pilot.press("shift+down")
assert text_area.selection == Selection((2, 5), (3, 5))
async def test_cursor_selection_down_when_cursor_on_last_line(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("ABCDEF\nGHIJK")
text_area.move_cursor((1, 2))
await pilot.press("shift+down")
assert text_area.selection == Selection((1, 2), (1, 5))
await pilot.press("shift+down")
assert text_area.selection == Selection((1, 2), (1, 5))
async def test_cursor_word_right(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("ABC DEF\nGHIJK")
await pilot.press("ctrl+right")
assert text_area.selection == Selection.cursor((0, 3))
async def test_cursor_word_right_select(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("ABC DEF\nGHIJK")
await pilot.press("ctrl+shift+right")
assert text_area.selection == Selection((0, 0), (0, 3))
async def test_cursor_word_left(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("ABC DEF\nGHIJK")
text_area.move_cursor((0, 7))
await pilot.press("ctrl+left")
assert text_area.selection == Selection.cursor((0, 4))
async def test_cursor_word_left_select(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("ABC DEF\nGHIJK")
text_area.move_cursor((0, 7))
await pilot.press("ctrl+shift+left")
assert text_area.selection == Selection((0, 7), (0, 4))
@pytest.mark.parametrize("key", ["end", "ctrl+e"])
async def test_cursor_to_line_end(key, app: TextAreaApp):
"""You can use the keyboard to jump the cursor to the end of the current line."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.selection = Selection.cursor((2, 2))
await pilot.press(key)
eol_index = len(TEXT.splitlines()[2])
assert text_area.cursor_location == (2, eol_index)
assert text_area.selection.is_empty
@pytest.mark.parametrize("key", ["home", "ctrl+a"])
async def test_cursor_to_line_home_basic_behaviour(key, app: TextAreaApp):
"""You can use the keyboard to jump the cursor to the start of the current line."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.selection = Selection.cursor((2, 2))
await pilot.press(key)
assert text_area.cursor_location == (2, 0)
assert text_area.selection.is_empty
@pytest.mark.parametrize(
"cursor_start,cursor_destination",
[
((0, 0), (0, 4)),
((0, 2), (0, 0)),
((0, 4), (0, 0)),
((0, 5), (0, 4)),
((0, 9), (0, 4)),
((0, 15), (0, 4)),
],
)
async def test_cursor_line_home_smart_home(
cursor_start, cursor_destination, app: TextAreaApp
):
"""If the line begins with whitespace, pressing home firstly goes
to the start of the (non-whitespace) content. Pressing it again takes you to column
0. If you press it again, it goes back to the first non-whitespace column."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text(" hello world")
text_area.move_cursor(cursor_start)
await pilot.press("home")
assert text_area.selection == Selection.cursor(cursor_destination)
async def test_cursor_page_down(app: TextAreaApp):
"""Pagedown moves the cursor down 1 page, retaining column index."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("XXX\n" * 200)
text_area.selection = Selection.cursor((0, 1))
await pilot.press("pagedown")
margin = 2
assert text_area.selection == Selection.cursor(
(app.size.height - margin, 1)
)
async def test_cursor_page_up(app: TextAreaApp):
"""Pageup moves the cursor up 1 page, retaining column index."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.load_text("XXX\n" * 200)
text_area.selection = Selection.cursor((100, 1))
await pilot.press("pageup")
margin = 2
assert text_area.selection == Selection.cursor(
(100 - app.size.height + margin, 1)
)
async def test_cursor_vertical_movement_visual_alignment_snapping(app: TextAreaApp):
"""When you move the cursor vertically, it should stay vertically
aligned even when double-width characters are used."""
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.text = "こんにちは\n012345"
text_area.move_cursor((1, 3), record_width=True)
# The '3' is aligned with ん at (0, 1)
# こんにちは
# 012345
# Pressing `up` takes us from (1, 3) to (0, 1) because record_width=True.
await pilot.press("up")
assert text_area.selection == Selection.cursor((0, 1))
# Pressing `down` takes us from (0, 1) to (1, 3)
await pilot.press("down")
assert text_area.selection == Selection.cursor((1, 3))
async def test_select_line_binding(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
text_area.move_cursor((2, 2))
await pilot.press("f6")
assert text_area.selection == Selection((2, 0), (2, 56))
async def test_select_all_binding(app: TextAreaApp):
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
await pilot.press("f7")
assert text_area.selection == Selection((0, 0), (4, 0))
| TextAreaApp |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_query.py | {
"start": 266,
"end": 7513
} | class ____(WhooshTestCase):
def setUp(self):
super().setUp()
self.sq = connections["whoosh"].get_query()
def test_build_query_all(self):
self.assertEqual(self.sq.build_query(), "*")
def test_build_query_single_word(self):
self.sq.add_filter(SQ(content="hello"))
self.assertEqual(self.sq.build_query(), "(hello)")
def test_build_query_multiple_words_and(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_filter(SQ(content="world"))
self.assertEqual(self.sq.build_query(), "((hello) AND (world))")
def test_build_query_multiple_words_not(self):
self.sq.add_filter(~SQ(content="hello"))
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(self.sq.build_query(), "(NOT ((hello)) AND NOT ((world)))")
def test_build_query_multiple_words_or(self):
self.sq.add_filter(SQ(content="hello") | SQ(content="world"))
self.assertEqual(self.sq.build_query(), "((hello) OR (world))")
def test_build_query_multiple_words_mixed(self):
self.sq.add_filter(SQ(content="why") | SQ(content="hello"))
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(
self.sq.build_query(), "(((why) OR (hello)) AND NOT ((world)))"
)
def test_build_query_phrase(self):
self.sq.add_filter(SQ(content="hello world"))
self.assertEqual(self.sq.build_query(), "(hello AND world)")
self.sq.add_filter(SQ(content__exact="hello world"))
self.assertEqual(
self.sq.build_query(), '((hello AND world) AND ("hello world"))'
)
def test_build_query_boost(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_boost("world", 5)
self.assertEqual(self.sq.build_query(), "(hello) world^5")
def test_correct_exact(self):
self.sq.add_filter(SQ(content=Exact("hello world")))
self.assertEqual(self.sq.build_query(), '("hello world")')
def test_build_query_multiple_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59)))
self.sq.add_filter(SQ(author__gt="daniel"))
self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13)))
self.sq.add_filter(SQ(title__gte="B"))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(
self.sq.build_query(),
"((why) AND pub_date:([to 20090210015900]) AND author:({daniel to}) AND created:({to 20090212121300}) AND title:([B to]) AND id:(1 OR 2 OR 3) AND rating:([3 to 5]))",
)
def test_build_query_in_filter_multiple_words(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"]))
self.assertEqual(
self.sq.build_query(),
'((why) AND title:("A Famous Paper" OR "An Infamous Article"))',
)
def test_build_query_in_filter_datetime(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))
self.assertEqual(self.sq.build_query(), "((why) AND pub_date:(20090706015621))")
def test_build_query_in_with_set(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=set(["A Famous Paper", "An Infamous Article"])))
query = self.sq.build_query()
self.assertTrue("(why)" in query)
# Because ordering in Py3 is now random.
if 'title:("A ' in query:
self.assertTrue(
'title:("A Famous Paper" OR "An Infamous Article")' in query
)
else:
self.assertTrue(
'title:("An Infamous Article" OR "A Famous Paper")' in query
)
def test_build_query_wildcard_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__startswith="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack*))")
def test_build_query_fuzzy_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__fuzzy="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack~2/3))")
def test_build_query_with_contains(self):
self.sq.add_filter(SQ(content="circular"))
self.sq.add_filter(SQ(title__contains="haystack"))
self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack*))")
def test_build_query_with_endswith(self):
self.sq.add_filter(SQ(content="circular"))
self.sq.add_filter(SQ(title__endswith="haystack"))
self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack))")
def test_clean(self):
self.assertEqual(self.sq.clean("hello world"), "hello world")
self.assertEqual(self.sq.clean("hello AND world"), "hello and world")
self.assertEqual(
self.sq.clean(
'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ world'
),
"hello and or not to '+' '-' '&&' '||' '!' '(' ')' '{' '}' '[' ']' '^' '\"' '~' '*' '?' ':' '\\' world",
)
self.assertEqual(
self.sq.clean("so please NOTe i am in a bAND and bORed"),
"so please NOTe i am in a bAND and bORed",
)
def test_build_query_with_models(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_model(MockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
self.sq.add_model(AnotherMockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
def test_build_query_with_datetime(self):
self.sq.add_filter(SQ(pub_date=datetime.datetime(2009, 5, 9, 16, 20)))
self.assertEqual(self.sq.build_query(), "pub_date:(20090509162000)")
def test_build_query_with_sequence_and_filter_not_in(self):
self.sq.add_filter(SQ(id=[1, 2, 3]))
self.assertEqual(self.sq.build_query(), "id:(1,2,3)")
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
self.sq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))
# Reset to default.
self.sq.set_result_class(None)
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
def test_in_filter_values_list(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=MockModel.objects.values_list("id", flat=True)))
self.assertEqual(self.sq.build_query(), "((why) AND title:(1 OR 2 OR 3))")
def test_narrow_sq(self):
sqs = SearchQuerySet(using="whoosh").narrow(SQ(foo="moof"))
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
self.assertEqual(sqs.query.narrow_queries.pop(), "foo:(moof)")
| WhooshSearchQueryTestCase |
python | eventlet__eventlet | eventlet/event.py | {
"start": 100,
"end": 192
} | class ____:
def __repr__(self):
return 'NOT_USED'
NOT_USED = NOT_USED()
| NOT_USED |
python | modin-project__modin | modin/core/io/column_stores/hdf_dispatcher.py | {
"start": 965,
"end": 3478
} | class ____(ColumnStoreDispatcher): # pragma: no cover
"""
Class handles utils for reading hdf data.
Inherits some common for columnar store files util functions from
`ColumnStoreDispatcher` class.
"""
@classmethod
def _validate_hdf_format(cls, path_or_buf):
"""
Validate `path_or_buf` and then return `table_type` parameter of store group attribute.
Parameters
----------
path_or_buf : str, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
Returns
-------
str
`table_type` parameter of store group attribute.
"""
s = pandas.HDFStore(path_or_buf)
groups = s.groups()
if len(groups) == 0:
raise ValueError("No dataset in HDF5 file.")
candidate_only_group = groups[0]
format = getattr(candidate_only_group._v_attrs, "table_type", None)
s.close()
return format
@classmethod
def _read(cls, path_or_buf, **kwargs):
"""
Load an h5 file from the file path or buffer, returning a query compiler.
Parameters
----------
path_or_buf : str, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
**kwargs : dict
Pass into pandas.read_hdf function.
Returns
-------
BaseQueryCompiler
Query compiler with imported data for further processing.
"""
if cls._validate_hdf_format(path_or_buf=path_or_buf) is None:
return cls.single_worker_read(
path_or_buf,
reason="File format seems to be `fixed`. For better distribution consider "
+ "saving the file in `table` format. df.to_hdf(format=`table`).",
**kwargs
)
columns = kwargs.pop("columns", None)
# Have to do this because of Dask's keyword arguments
kwargs["_key"] = kwargs.pop("key", None)
if not columns:
start = kwargs.pop("start", None)
stop = kwargs.pop("stop", None)
empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0, **kwargs)
if start is not None:
kwargs["start"] = start
if stop is not None:
kwargs["stop"] = stop
columns = empty_pd_df.columns
return cls.build_query_compiler(path_or_buf, columns, **kwargs)
| HDFDispatcher |
python | tornadoweb__tornado | tornado/test/netutil_test.py | {
"start": 563,
"end": 1377
} | class ____(AsyncTestCase):
resolver = None # type: typing.Any
@gen_test
def test_localhost(self):
addrinfo = yield self.resolver.resolve("localhost", 80, socket.AF_UNSPEC)
# Most of the time localhost resolves to either the ipv4 loopback
# address alone, or ipv4+ipv6. But some versions of pycares will only
# return the ipv6 version, so we have to check for either one alone.
self.assertTrue(
((socket.AF_INET, ("127.0.0.1", 80)) in addrinfo)
or ((socket.AF_INET6, ("::1", 80)) in addrinfo),
f"loopback address not found in {addrinfo}",
)
# It is impossible to quickly and consistently generate an error in name
# resolution, so test this case separately, using mocks as needed.
@abstract_base_test
| _ResolverTestMixin |
python | walkccc__LeetCode | solutions/1106. Parsing A Boolean Expression/1106.py | {
"start": 0,
"end": 868
} | class ____:
def parseBoolExpr(self, expression: str) -> bool:
def dfs(s: int, e: int) -> list[str]:
if s == e:
return True if expression[s] == 't' else False
exps = []
layer = 0
for i in range(s, e + 1):
c = expression[i]
if layer == 0 and c in '!&|':
op = c
elif c == '(':
layer += 1
if layer == 1:
left = i + 1
elif c == ')':
layer -= 1
if layer == 0:
exps.append(dfs(left, i - 1))
elif c == ',' and layer == 1:
exps.append(dfs(left, i - 1))
left = i + 1
if op == '|':
return functools.reduce(operator.or_, exps)
if op == '&':
return functools.reduce(operator.and_, exps)
if op == '!':
return not exps[0]
return dfs(0, len(expression) - 1)
| Solution |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 16008,
"end": 16442
} | class ____(Indexer):
"""Lifecycle methods for a TagIndex on a Repo."""
def _create(self) -> spack.tag.TagIndex:
return spack.tag.TagIndex()
def read(self, stream):
self.index = spack.tag.TagIndex.from_json(stream)
def update(self, pkg_fullname):
self.index.update_package(pkg_fullname.split(".")[-1], self.repository)
def write(self, stream):
self.index.to_json(stream)
| TagIndexer |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 56037,
"end": 56337
} | class ____(_TestOnlySetsInBinaryOps, __TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
super().setUp()
#------------------------------------------------------------------------------
| TestOnlySetsOperator |
python | ethereum__web3.py | web3/_utils/events.py | {
"start": 14479,
"end": 15557
} | class ____(ABC):
_match_values: tuple[Any, ...] = None
_immutable = False
def __init__(self, arg_type: TypeStr) -> None:
self.arg_type = arg_type
def match_single(self, value: Any) -> None:
if self._immutable:
raise Web3ValueError(
"Setting values is forbidden after filter is deployed."
)
if self._match_values is None:
self._match_values = _normalize_match_values((value,))
else:
raise Web3ValueError("An argument match value/s has already been set.")
def match_any(self, *values: Collection[Any]) -> None:
if self._immutable:
raise Web3ValueError(
"Setting values is forbidden after filter is deployed."
)
if self._match_values is None:
self._match_values = _normalize_match_values(values)
else:
raise Web3ValueError("An argument match value/s has already been set.")
@property
@abstractmethod
def match_values(self) -> None:
pass
| BaseArgumentFilter |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 14905,
"end": 16063
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Union[CLIPVisionConfig, CLIPTextConfig]):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| CLIPEncoderLayer |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/endpoints/authorization.py | {
"start": 357,
"end": 4584
} | class ____(BaseEndpoint):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint::
# We will deny any request which URI schema is not with https
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: https://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = scopes
# TODO: decide whether this should be a required argument
request.user = None # TODO: explain this in docs
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
log.debug('Dispatching response_type %s request to %r.',
request.response_type, response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token_type)
@catch_errors_and_unavailability
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = utils.scope_to_list(request.scope)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
| AuthorizationEndpoint |
python | numpy__numpy | numpy/lib/tests/test_shape_base.py | {
"start": 22595,
"end": 25306
} | class ____:
def test_basic(self):
# Using 0-dimensional ndarray
a = np.array(1)
b = np.array([[1, 2], [3, 4]])
k = np.array([[1, 2], [3, 4]])
assert_array_equal(np.kron(a, b), k)
a = np.array([[1, 2], [3, 4]])
b = np.array(1)
assert_array_equal(np.kron(a, b), k)
# Using 1-dimensional ndarray
a = np.array([3])
b = np.array([[1, 2], [3, 4]])
k = np.array([[3, 6], [9, 12]])
assert_array_equal(np.kron(a, b), k)
a = np.array([[1, 2], [3, 4]])
b = np.array([3])
assert_array_equal(np.kron(a, b), k)
# Using 3-dimensional ndarray
a = np.array([[[1]], [[2]]])
b = np.array([[1, 2], [3, 4]])
k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
assert_array_equal(np.kron(a, b), k)
a = np.array([[1, 2], [3, 4]])
b = np.array([[[1]], [[2]]])
k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
assert_array_equal(np.kron(a, b), k)
def test_return_type(self):
class myarray(np.ndarray):
__array_priority__ = 1.0
a = np.ones([2, 2])
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
assert_equal(type(kron(a, ma)), myarray)
assert_equal(type(kron(ma, a)), myarray)
@pytest.mark.parametrize(
"array_class", [np.asarray, np.asmatrix]
)
def test_kron_smoke(self, array_class):
a = array_class(np.ones([3, 3]))
b = array_class(np.ones([3, 3]))
k = array_class(np.ones([9, 9]))
assert_array_equal(np.kron(a, b), k)
def test_kron_ma(self):
x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
k = np.ma.array(np.diag([1, 4, 4, 16]),
mask=~np.array(np.identity(4), dtype=bool))
assert_array_equal(k, np.kron(x, x))
@pytest.mark.parametrize(
"shape_a,shape_b", [
((1, 1), (1, 1)),
((1, 2, 3), (4, 5, 6)),
((2, 2), (2, 2, 2)),
((1, 0), (1, 1)),
((2, 0, 2), (2, 2)),
((2, 0, 0, 2), (2, 0, 2)),
])
def test_kron_shape(self, shape_a, shape_b):
a = np.ones(shape_a)
b = np.ones(shape_b)
normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a
normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b
expected_shape = np.multiply(normalised_shape_a, normalised_shape_b)
k = np.kron(a, b)
assert np.array_equal(
k.shape, expected_shape), "Unexpected shape from kron"
| TestKron |
python | wandb__wandb | wandb/sdk/internal/sample.py | {
"start": 29,
"end": 2470
} | class ____:
def __init__(self, min_samples=None):
self._samples = min_samples or 64
# force power of 2 samples
self._samples = 2 ** int(math.ceil(math.log(self._samples, 2)))
# target oversample by factor of 2
self._samples2 = self._samples * 2
# max size of each buffer
self._max = self._samples2 // 2
self._shift = 0
self._mask = (1 << self._shift) - 1
self._buckets = int(math.log(self._samples2, 2))
self._buckets_bits = int(math.log(self._buckets, 2))
self._buckets_mask = (1 << self._buckets_bits + 1) - 1
self._buckets_index = 0
self._bucket = []
self._index = [0] * self._buckets
self._count = 0
self._log2 = [0]
# pre-allocate buckets
for _ in range(self._buckets):
self._bucket.append([0] * self._max)
# compute integer log2
self._log2 += [int(math.log(i, 2)) for i in range(1, 2**self._buckets + 1)]
def _show(self):
print("=" * 20) # noqa: T201
for b in range(self._buckets):
b = (b + self._buckets_index) % self._buckets
vals = [self._bucket[b][i] for i in range(self._index[b])]
print(f"{b}: {vals}") # noqa: T201
def add(self, val):
self._count += 1
cnt = self._count
if cnt & self._mask:
return
b = cnt >> self._shift
b = self._log2[b] # b = int(math.log(b, 2))
if b >= self._buckets:
self._index[self._buckets_index] = 0
self._buckets_index = (self._buckets_index + 1) % self._buckets
self._shift += 1
self._mask = (self._mask << 1) | 1
b += self._buckets - 1
b = (b + self._buckets_index) % self._buckets
self._bucket[b][self._index[b]] = val
self._index[b] += 1
def get(self):
full = []
sampled = []
# self._show()
for b in range(self._buckets):
max_num = 2**b
b = (b + self._buckets_index) % self._buckets
modb = self._index[b] // max_num
for i in range(self._index[b]):
if not modb or i % modb == 0:
sampled.append(self._bucket[b][i])
full.append(self._bucket[b][i])
if len(sampled) < self._samples:
return tuple(full)
return tuple(sampled)
| UniformSampleAccumulator |
python | getsentry__sentry | src/sentry/replays/lib/new_query/conditions.py | {
"start": 9951,
"end": 10896
} | class ____(GenericArray):
"""String array condition class."""
@staticmethod
def visit_match(expression: Expression, value: str) -> Condition:
v = f"(?i){value[1:-1]}"
return Condition(
Function(
"arrayExists",
parameters=[
Lambda(["item"], Function("match", parameters=[Identifier("item"), v])),
expression,
],
),
Op.EQ,
1,
)
@staticmethod
def visit_not_match(expression: Expression, value: str) -> Condition:
v = f"(?i){value[1:-1]}"
return Condition(
Function(
"arrayExists",
parameters=[
Lambda(["item"], Function("match", parameters=[Identifier("item"), v])),
expression,
],
),
Op.EQ,
0,
)
| StringArray |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 134071,
"end": 136014
} | class ____(TypedDict, total=False):
type: Required[Literal['custom-error']]
schema: Required[CoreSchema]
custom_error_type: Required[str]
custom_error_message: str
custom_error_context: dict[str, Union[str, int, float]]
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def custom_error_schema(
schema: CoreSchema,
custom_error_type: str,
*,
custom_error_message: str | None = None,
custom_error_context: dict[str, Any] | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> CustomErrorSchema:
"""
Returns a schema that matches a custom error value, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.custom_error_schema(
schema=core_schema.int_schema(),
custom_error_type='MyError',
custom_error_message='Error msg',
)
v = SchemaValidator(schema)
v.validate_python(1)
```
Args:
schema: The schema to use for the custom error schema
custom_error_type: The custom error type to use for the custom error schema
custom_error_message: The custom error message to use for the custom error schema
custom_error_context: The custom error context to use for the custom error schema
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='custom-error',
schema=schema,
custom_error_type=custom_error_type,
custom_error_message=custom_error_message,
custom_error_context=custom_error_context,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| CustomErrorSchema |
python | google__pytype | pytype/pytd/parse/parser_test_base.py | {
"start": 280,
"end": 3698
} | class ____(test_base.UnitTest):
"""Test utility class. Knows how to parse PYTD and compare source code."""
loader: load_pytd.Loader
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.loader = load_pytd.Loader(
config.Options.create(python_version=cls.python_version))
def setUp(self):
super().setUp()
self.options = parser.PyiOptions(python_version=self.python_version)
def Parse(self, src, name=None, version=None, platform=None):
if version:
self.options.python_version = version
if platform:
self.options.platform = platform
tree = parser.parse_string(
textwrap.dedent(src), name=name, options=self.options)
tree = tree.Visit(visitors.NamedTypeToClassType())
tree = tree.Visit(visitors.AdjustTypeParameters())
# Convert back to named types for easier testing
tree = tree.Visit(visitors.ClassTypeToNamedType())
tree.Visit(visitors.VerifyVisitor())
return tree
def ParseWithBuiltins(self, src):
ast = parser.parse_string(textwrap.dedent(src), options=self.options)
ast = ast.Visit(visitors.LookupExternalTypes(
{"builtins": self.loader.builtins, "typing": self.loader.typing}))
ast = ast.Visit(visitors.NamedTypeToClassType())
ast = ast.Visit(visitors.AdjustTypeParameters())
ast.Visit(visitors.FillInLocalPointers({
"": ast, "builtins": self.loader.builtins}))
ast.Visit(visitors.VerifyVisitor())
return ast
def ToAST(self, src_or_tree):
if isinstance(src_or_tree, str):
# Put into a canonical form (removes comments, standard indents):
return self.Parse(src_or_tree + "\n")
else: # isinstance(src_or_tree, tuple):
src_or_tree.Visit(visitors.VerifyVisitor())
return src_or_tree
def AssertSourceEquals(self, src_or_tree_1, src_or_tree_2):
# Strip leading "\n"s for convenience
ast1 = self.ToAST(src_or_tree_1)
ast2 = self.ToAST(src_or_tree_2)
src1 = pytd_utils.Print(ast1).strip() + "\n"
src2 = pytd_utils.Print(ast2).strip() + "\n"
# Verify printed versions are the same and ASTs are the same.
ast1 = ast1.Visit(visitors.ClassTypeToNamedType())
ast2 = ast2.Visit(visitors.ClassTypeToNamedType())
if src1 != src2 or not pytd_utils.ASTeq(ast1, ast2):
# Due to differing opinions on the form of debug output, allow an
# environment variable to control what output you want. Set
# PY_UNITTEST_DIFF to get diff output.
if os.getenv("PY_UNITTEST_DIFF"):
self.maxDiff = None # for better diff output (assertMultiLineEqual) # pylint: disable=invalid-name
self.assertMultiLineEqual(src1, src2)
else:
sys.stdout.flush()
sys.stderr.flush()
print("Source files or ASTs differ:", file=sys.stderr)
print("-" * 36, " Actual ", "-" * 36, file=sys.stderr)
print(textwrap.dedent(src1).strip(), file=sys.stderr)
print("-" * 36, "Expected", "-" * 36, file=sys.stderr)
print(textwrap.dedent(src2).strip(), file=sys.stderr)
print("-" * 80, file=sys.stderr)
if not pytd_utils.ASTeq(ast1, ast2):
print("Actual AST:", ast1, file=sys.stderr)
print("Expect AST:", ast2, file=sys.stderr)
self.fail("source files differ")
def ApplyVisitorToString(self, data, visitor):
tree = self.Parse(data)
new_tree = tree.Visit(visitor)
return pytd_utils.Print(new_tree)
| ParserTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 18945,
"end": 19187
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("BILLING_MANAGER", "OUTSIDE_COLLABORATOR", "UNAFFILIATED")
| OrgRemoveOutsideCollaboratorAuditEntryMembershipType |
python | google__pytype | pytype/tools/xref/testdata/class_def.py | {
"start": 924,
"end": 968
} | class ____:
pass
def f():
global Quux
| Quux |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/schedules.py | {
"start": 997,
"end": 37778
} | class ____(_croniter):
"""Lightweight shim to enable caching certain values that may be calculated many times."""
@classmethod
@functools.lru_cache(maxsize=128)
def expand(cls, *args, **kwargs): # pyright: ignore[reportIncompatibleMethodOverride]
return super().expand(*args, **kwargs)
def _is_simple_cron(
cron_expression: str,
dt: datetime.datetime,
) -> bool:
"""This function is purely an optimization to see if the provided datetime is already on an obvious boundary
of the common and easy to detect (daily at midnight and on the hour). The optimization is to avoid calling
_find_schedule_time to find the next cron boundary.
"""
if cron_expression == "0 0 * * *":
return dt.hour == 0 and dt.minute == 0 and dt.second == 0 and dt.microsecond == 0
if cron_expression == "0 * * * *":
return dt.minute == 0 and dt.second == 0 and dt.microsecond == 0
return False
def is_valid_cron_string(cron_string: str) -> bool:
if not CroniterShim.is_valid(cron_string):
return False
# Croniter < 1.4 returns 2 items
# Croniter >= 1.4 returns 3 items
expanded, *_ = CroniterShim.expand(cron_string)
# dagster only recognizes cron strings that resolve to 5 parts (e.g. not seconds resolution)
if len(expanded) != 5:
return False
if len(expanded[3]) == 1 and expanded[3][0] == 2: # February
if len(expanded[2]) == 1 and expanded[2][0] in {30, 31}: # 30th or 31st of February
return False
return True
def is_valid_cron_schedule(cron_schedule: Union[str, Sequence[str]]) -> bool:
return (
is_valid_cron_string(cron_schedule)
if isinstance(cron_schedule, str)
else len(cron_schedule) > 0
and all(is_valid_cron_string(cron_string) for cron_string in cron_schedule)
)
def cron_string_repeats_every_hour(cron_string: str) -> bool:
"""Returns if the given cron schedule repeats every hour."""
cron_parts, nth_weekday_of_month, *_ = CroniterShim.expand(cron_string)
return len(cron_parts[1]) == 1 and cron_parts[1][0] == "*"
def apply_fold_and_post_transition(date: datetime.datetime) -> datetime.datetime:
date = apply_post_transition(date)
return _apply_fold(date)
def _apply_fold(date: datetime.datetime) -> datetime.datetime:
"""For consistency, always choose the latter of the two possible times during a fall DST
transition when there are two possibilities - match behavior described in the docs:
https://docs.dagster.io/guides/automate/schedules/customizing-execution-timezone#execution-times-and-daylight-savings-time)
Never call this with datetimes that could be non-existant. datetime_ambiguous will return true
but folding them will leave them non-existant.
""" # noqa: D415
if date.fold == 0 and date.hour in DAYLIGHT_SAVINGS_HOURS and datetime_ambiguous(date):
return date.replace(fold=1)
return date
def apply_post_transition(
date: datetime.datetime,
) -> datetime.datetime:
if date.hour in DAYLIGHT_SAVINGS_HOURS and not datetime_exists(date):
# If we fall on a non-existant time (e.g. between 2 and 3AM during a DST transition)
# advance to the end of the window, which does exist - match behavior described in the docs:
# https://docs.dagster.io/guides/automate/schedules/customizing-execution-timezone#execution-times-and-daylight-savings-time)
# This assumes that all dst offsets are <= to an hour, which is true at time of writing.
# The date passed to dst needs to be in DST to get the offset for the timezone.
dst_offset = check.not_none(date.tzinfo).dst(date + datetime.timedelta(hours=1))
# This assumes that all dst transitions happen on the hour, which is true at time of writing.
# Rewind time to the start of the transition and then add the offset to get the first time out of the transition.
start_dst = date.replace(minute=0, second=0, microsecond=0)
return start_dst + check.not_none(dst_offset)
return date
def _replace_date_fields(
date: datetime.datetime,
hour: int,
minute: int,
day: int,
):
new_date = date.replace(
day=day,
hour=hour,
minute=minute,
second=0,
microsecond=0,
)
return apply_fold_and_post_transition(new_date)
SECONDS_PER_MINUTE = 60
MINUTES_PER_HOUR = 60
def _find_hourly_schedule_time(
minutes: Sequence[int],
date: datetime.datetime,
ascending: bool,
already_on_boundary: bool,
) -> datetime.datetime:
if ascending:
# short-circuit if minutes and seconds are already correct
if len(minutes) == 1 and (
already_on_boundary
or (date.minute == minutes[0] and date.second == 0 and date.microsecond == 0)
):
# switch to utc so that timedelta behaves as expected instead of doing walltime math
new_date = date.astimezone(datetime.timezone.utc) + datetime.timedelta(hours=1)
new_date = new_date.astimezone(date.tzinfo)
return new_date
# clear microseconds
new_timestamp = math.ceil(date.timestamp())
# clear seconds
new_timestamp = (
new_timestamp
+ (SECONDS_PER_MINUTE - new_timestamp % SECONDS_PER_MINUTE) % SECONDS_PER_MINUTE
)
current_minute = (new_timestamp // SECONDS_PER_MINUTE) % SECONDS_PER_MINUTE
final_timestamp = None
for minute in minutes:
new_timestamp_cand = new_timestamp + SECONDS_PER_MINUTE * (
(minute - current_minute) % MINUTES_PER_HOUR
)
# move forward an hour if we haven't moved forwards yet
if new_timestamp_cand <= date.timestamp():
new_timestamp_cand = new_timestamp_cand + SECONDS_PER_MINUTE * MINUTES_PER_HOUR
final_timestamp = (
new_timestamp_cand
if not final_timestamp
else min(final_timestamp, new_timestamp_cand)
)
else:
if len(minutes) == 1 and (
already_on_boundary
or (date.minute == minutes[0] and date.second == 0 and date.microsecond == 0)
):
# switch to utc so that timedelta behaves as expected instead of doing walltime math
new_date = date.astimezone(datetime.timezone.utc) - datetime.timedelta(hours=1)
new_date = new_date.astimezone(date.tzinfo)
return new_date
# clear microseconds
new_timestamp = math.floor(date.timestamp())
# clear seconds
new_timestamp = new_timestamp - new_timestamp % SECONDS_PER_MINUTE
# move minutes back to correct place
current_minute = (new_timestamp // SECONDS_PER_MINUTE) % SECONDS_PER_MINUTE
final_timestamp = None
for minute in minutes:
new_timestamp_cand = new_timestamp - SECONDS_PER_MINUTE * (
(current_minute - minute) % MINUTES_PER_HOUR
)
# move back an hour if we haven't moved backwards yet
if new_timestamp_cand >= date.timestamp():
new_timestamp_cand = new_timestamp_cand - SECONDS_PER_MINUTE * MINUTES_PER_HOUR
final_timestamp = (
new_timestamp_cand
if not final_timestamp
else max(final_timestamp, new_timestamp_cand)
)
return datetime.datetime.fromtimestamp(check.not_none(final_timestamp), tz=date.tzinfo)
def _find_daily_schedule_time(
minute: int,
hour: int,
date: datetime.datetime,
ascending: bool,
already_on_boundary: bool,
) -> datetime.datetime:
# First move to the correct time of day today (ignoring whether it is the correct day)
if not already_on_boundary or (
date.hour != hour or date.minute != minute or date.second != 0 or date.microsecond != 0
):
new_time = _replace_date_fields(
date,
hour,
minute,
date.day,
)
else:
new_time = date
if ascending:
if already_on_boundary or new_time.timestamp() <= date.timestamp():
new_time = new_time + datetime.timedelta(days=1)
else:
if already_on_boundary or new_time.timestamp() >= date.timestamp():
new_time = new_time - datetime.timedelta(days=1)
# If the hour or minute has changed the schedule in cronstring,
# double-check that it's still correct in case we crossed a DST boundary
if new_time.hour != hour or new_time.minute != minute:
new_time = _replace_date_fields(
new_time,
hour,
minute,
new_time.day,
)
return apply_fold_and_post_transition(new_time)
def _get_crontab_day_of_week(dt: datetime.datetime) -> int:
weekday = dt.isoweekday()
# crontab has 0-6, sunday - saturday
# isoweekday is 1-7 monday - sunday
return weekday if weekday <= 6 else 0
def _find_weekly_schedule_time(
minute: int,
hour: int,
day_of_week: int,
date: datetime.datetime,
ascending: bool,
already_on_boundary: bool,
) -> datetime.datetime:
# first move to the correct time of day
if not already_on_boundary:
new_time = _replace_date_fields(
date,
hour,
minute,
date.day,
)
# Move to the correct day of the week
current_day_of_week = _get_crontab_day_of_week(new_time)
if day_of_week != current_day_of_week:
if ascending:
new_time = new_time + relativedelta(days=(day_of_week - current_day_of_week) % 7)
else:
new_time = new_time - relativedelta(days=(current_day_of_week - day_of_week) % 7)
else:
new_time = date
# Make sure that we've actually moved in the correct direction, advance if we haven't
if ascending:
if already_on_boundary or new_time.timestamp() <= date.timestamp():
new_time = new_time + relativedelta(weeks=1)
else:
if already_on_boundary or new_time.timestamp() >= date.timestamp():
new_time = new_time - relativedelta(weeks=1)
# If the hour or minute has from the schedule in cronstring,
# double-check that it's still correct in case we crossed a DST boundary
if new_time.hour != hour or new_time.minute != minute:
new_time = _replace_date_fields(
new_time,
hour,
minute,
new_time.day,
)
return apply_fold_and_post_transition(new_time)
def _find_monthly_schedule_time(
minute: int,
hour: int,
day: int,
date: datetime.datetime,
ascending: bool,
already_on_boundary: bool,
) -> datetime.datetime:
# First move to the correct day and time of day
if not already_on_boundary:
new_time = _replace_date_fields(
date,
check.not_none(hour),
check.not_none(minute),
check.not_none(day),
)
else:
new_time = date
if ascending:
if already_on_boundary or new_time.timestamp() <= date.timestamp():
new_time = new_time + relativedelta(months=1)
else:
if already_on_boundary or new_time.timestamp() >= date.timestamp():
# Move back a month if needed
new_time = new_time - relativedelta(months=1)
# If the hour or minute has changed from the schedule in cronstring,
# double-check that it's still correct in case we crossed a DST boundary
if new_time.hour != hour or new_time.minute != minute:
new_time = _replace_date_fields(
new_time,
check.not_none(hour),
check.not_none(minute),
check.not_none(day),
)
return apply_fold_and_post_transition(new_time)
def _find_schedule_time(
minutes: Optional[Sequence[int]],
hour: Optional[int],
day_of_month: Optional[int],
day_of_week: Optional[int],
schedule_type: "ScheduleType",
date: datetime.datetime,
ascending: bool,
# lets us skip slow work to find the starting point if we know that
# we are already on the boundary of the cron interval
already_on_boundary: bool,
) -> datetime.datetime:
from dagster._core.definitions.partitions.schedule_type import ScheduleType
if schedule_type == ScheduleType.HOURLY:
return _find_hourly_schedule_time(
check.not_none(minutes), date, ascending, already_on_boundary
)
elif schedule_type == ScheduleType.DAILY:
minutes = check.not_none(minutes)
check.invariant(len(minutes) == 1)
return _find_daily_schedule_time(
minutes[0],
check.not_none(hour),
date,
ascending,
already_on_boundary,
)
elif schedule_type == ScheduleType.WEEKLY:
minutes = check.not_none(minutes)
check.invariant(len(minutes) == 1)
return _find_weekly_schedule_time(
minutes[0],
check.not_none(hour),
check.not_none(day_of_week),
date,
ascending,
already_on_boundary,
)
elif schedule_type == ScheduleType.MONTHLY:
minutes = check.not_none(minutes)
check.invariant(len(minutes) == 1)
return _find_monthly_schedule_time(
minutes[0],
check.not_none(hour),
check.not_none(day_of_month),
date,
ascending,
already_on_boundary,
)
else:
raise Exception(f"Unexpected schedule type {schedule_type}")
def _get_dates_to_consider_after_ambigious_time(
cron_iter: CroniterShim,
next_date: datetime.datetime,
repeats_every_hour: bool,
ascending: bool,
):
# Return a list of all times that need to be considered when the next date returned by
# croniter is ambigious (e.g. 2:30 AM during a fall DST transition). This is tricky because
# we need to make sure that we are emitting times in the correct order, so we return a sorted
# contiguous sequence of times that include all potentially ambigious times.
post_transition_time = next_date.replace(fold=1)
# Most schedules only need to consider the POST_TRANSITION time and can return here.
if not repeats_every_hour:
return [post_transition_time]
# hourly schedules are more complicated - they'll continue firing once an hour no
# matter what, including both PRE_TRANSITION and POST_TRANSITION times. So we need
# to make sure that every time in the ambigious timerange has both its PRE_TRANSITION
# and POST_TRANSITION times considered and returned.
pre_transition_time = next_date.replace(fold=0)
dates_to_consider = [post_transition_time, pre_transition_time]
curr_pre_transition_time = pre_transition_time
curr_post_transition_time = post_transition_time
while True:
if ascending:
# Time always advances because get_next() is called, so we will eventually break
# Stop once the current PRE_TRANSITION time exceeds the original POST_TRANSITION time
# (so we know we have moved forward across the whole range)
if curr_pre_transition_time.timestamp() >= post_transition_time.timestamp():
break
next_date = cron_iter.get_next(datetime.datetime)
else:
# Time always decreases because get_prev() is called, so we will eventually break
# Stop once the current POST_TRANSITION time has gone past the original PRE_TRANSITION
# time (so we know we have moved backward across the whole range)
if curr_post_transition_time.timestamp() <= pre_transition_time.timestamp():
break
next_date = cron_iter.get_prev(datetime.datetime)
# Make sure we add both the PRE_TRANSITION and POST_TRANSITION times to the
# list of dates to consider so every time emitted by the
# croniter instance is considered and returned from the calling iterator
curr_pre_transition_time = datetime.datetime(
year=next_date.year,
month=next_date.month,
day=next_date.day,
hour=next_date.hour,
minute=next_date.minute,
second=next_date.second,
microsecond=next_date.microsecond,
fold=0,
tzinfo=post_transition_time.tzinfo,
)
dates_to_consider.append(curr_pre_transition_time)
curr_post_transition_time = datetime.datetime(
year=next_date.year,
month=next_date.month,
day=next_date.day,
hour=next_date.hour,
minute=next_date.minute,
second=next_date.second,
microsecond=next_date.microsecond,
fold=1,
tzinfo=post_transition_time.tzinfo,
)
dates_to_consider.append(curr_post_transition_time)
return sorted(dates_to_consider, key=lambda d: d.timestamp())
def _timezone_aware_cron_iter(
cron_string, start_datetime: datetime.datetime, ascending: bool
) -> Iterator[datetime.datetime]:
"""Use croniter to determine the next timestamp matching the passed in cron string
that is past the passed in UTC timestamp. croniter can only be trusted to compute
non-timezone aware cron intervals, so we first figure out the time corresponding to the
passed in timestamp without taking any timezones into account, use croniter to
determine the next time that matches the cron string, translate that back into the passed in
timezone, and repeat, returning the first time that is later than the passed in timestamp.
"""
# Create a naive (timezone-free) datetime to pass into croniter
naive_time = datetime.datetime(
year=start_datetime.year,
month=start_datetime.month,
day=start_datetime.day,
hour=start_datetime.hour,
minute=start_datetime.minute,
second=start_datetime.second,
microsecond=start_datetime.microsecond,
)
# Go back an hour to ensure that we consider the full set of possible candidates (otherwise
# we might fail to properly consider a time that happens twice during a fall DST transition).
# 1 hour is sufficient because that's the maximum amount of time that can be offset during a
# DST transition.
if ascending:
naive_time = naive_time - datetime.timedelta(hours=1)
else:
naive_time = naive_time + datetime.timedelta(hours=1)
cron_iter = CroniterShim(cron_string, naive_time)
# hourly schedules handle DST transitions differently: they skip times that don't exist
# entirely and just move on to the next matching time (instead of returning
# the end time of the non-existant interval), and when there are two times that match the cron
# string, they return both instead of picking the latter time.
repeats_every_hour = cron_string_repeats_every_hour(cron_string)
# Chronological order of dates to return
dates_to_consider = []
start_timestamp = start_datetime.timestamp()
while True:
# Work through everything currently in dates_to_consider
if ascending:
for next_date_with_tz in dates_to_consider:
next_timestamp = next_date_with_tz.timestamp()
if next_timestamp > start_timestamp:
start_timestamp = next_timestamp
yield next_date_with_tz
else:
for next_date_with_tz in reversed(dates_to_consider):
next_timestamp = next_date_with_tz.timestamp()
if next_timestamp < start_timestamp:
start_timestamp = next_timestamp
yield next_date_with_tz
# Clear the list and generate new candidates using croniter
dates_to_consider = []
if ascending:
next_date = cron_iter.get_next(datetime.datetime)
else:
next_date = cron_iter.get_prev(datetime.datetime)
next_date_with_tz = datetime.datetime(
year=next_date.year,
month=next_date.month,
day=next_date.day,
hour=next_date.hour,
minute=next_date.minute,
second=next_date.second,
microsecond=next_date.microsecond,
tzinfo=start_datetime.tzinfo,
)
dates_to_consider = [next_date_with_tz]
if not datetime_exists(next_date_with_tz):
if repeats_every_hour:
# hourly schedules just move on to the next time
dates_to_consider = []
else:
# other schedules advance to the time at the end of the interval (so that e.g.
# a daily schedule doesn't miss an entire day)
dates_to_consider = [apply_post_transition(next_date_with_tz)]
elif datetime_ambiguous(next_date_with_tz):
dates_to_consider = _get_dates_to_consider_after_ambigious_time(
cron_iter=cron_iter,
next_date=next_date_with_tz,
repeats_every_hour=repeats_every_hour,
ascending=ascending,
)
def _has_out_of_range_cron_interval_str(cron_string: str):
assert CroniterShim.is_valid(cron_string)
try:
for i, cron_part in enumerate(cron_string.lower().split()):
expr_parts = cron_part.split(",")
while len(expr_parts) > 0:
expr = expr_parts.pop()
t = re.sub(
r"^\*(\/.+)$",
r"%d-%d\1" % (CRON_RANGES[i][0], CRON_RANGES[i][1]), # noqa: UP031
str(expr),
)
m = CRON_STEP_SEARCH_REGEX.search(t)
if not m:
# try normalizing "{start}/{step}" to "{start}-{max}/{step}".
t = re.sub(r"^(.+)\/(.+)$", r"\1-%d/\2" % (CRON_RANGES[i][1]), str(expr)) # noqa: UP031
m = CRON_STEP_SEARCH_REGEX.search(t)
if m:
(low, high, step) = m.group(1), m.group(2), m.group(4) or 1
if i == 2 and high == "l":
high = "31"
if not INT_REGEX.search(low) or not INT_REGEX.search(high):
continue
low, high, step = map(int, [low, high, step])
if step > high:
return True
except:
pass
return False
def has_out_of_range_cron_interval(cron_schedule: Union[str, Sequence[str]]):
"""Utility function to detect cron schedules like '*/90 * * * *', which are valid cron schedules
but which evaluate to once every hour, not once every 90 minutes as might be expected. This is
useful to detect so that we can issue warnings or some other kind of feedback to the user. This
function does not detect cases where the step does not divide cleanly in the range, which is
another case that might cause some surprising behavior (e.g. '*/7 * * * *').
"""
return (
_has_out_of_range_cron_interval_str(cron_schedule)
if isinstance(cron_schedule, str)
else any(_has_out_of_range_cron_interval_str(s) for s in cron_schedule)
)
def cron_string_iterator(
start_timestamp: float,
cron_string: str,
execution_timezone: Optional[str],
ascending: bool = True,
start_offset: int = 0,
) -> Iterator[datetime.datetime]:
"""Generator of datetimes >= start_timestamp for the given cron string."""
from dagster._core.definitions.partitions.schedule_type import ScheduleType
# leap day special casing
if cron_string.endswith(" 29 2 *"):
min_hour, _ = cron_string.split(" 29 2 *")
day_before = f"{min_hour} 28 2 *"
# run the iterator for Feb 28th
for dt in cron_string_iterator(
start_timestamp=start_timestamp,
cron_string=day_before,
execution_timezone=execution_timezone,
ascending=ascending,
start_offset=start_offset,
):
# only return on leap years
if calendar.isleap(dt.year):
# shift 28th back to 29th
shifted_dt = dt + datetime.timedelta(days=1)
yield shifted_dt
return
execution_timezone = execution_timezone or "UTC"
# Croniter < 1.4 returns 2 items
# Croniter >= 1.4 returns 3 items
cron_parts, nth_weekday_of_month, *_ = CroniterShim.expand(cron_string)
is_numeric = [len(part) == 1 and isinstance(part[0], int) for part in cron_parts]
is_wildcard = [len(part) == 1 and part[0] == "*" for part in cron_parts]
all_numeric_minutes = len(cron_parts[0]) > 0 and all(
cron_part != "*" for cron_part in cron_parts[0]
)
known_schedule_type: Optional[ScheduleType] = None
expected_hour = None
expected_minutes = None
expected_day = None
expected_day_of_week = None
# Special-case common intervals (hourly/daily/weekly/monthly) since croniter iteration can be
# much slower and has correctness issues on DST boundaries
if not nth_weekday_of_month:
if (
all(is_numeric[0:3])
and all(is_wildcard[3:])
and cron_parts[2][0] <= MAX_DAY_OF_MONTH_WITH_GUARANTEED_MONTHLY_INTERVAL # pyright: ignore[reportOperatorIssue]
): # monthly
known_schedule_type = ScheduleType.MONTHLY
elif all(is_numeric[0:2]) and is_numeric[4] and all(is_wildcard[2:4]): # weekly
known_schedule_type = ScheduleType.WEEKLY
elif all(is_numeric[0:2]) and all(is_wildcard[2:]): # daily
known_schedule_type = ScheduleType.DAILY
elif all_numeric_minutes and all(is_wildcard[1:]): # hourly
known_schedule_type = ScheduleType.HOURLY
if is_numeric[1]:
expected_hour = cron_parts[1][0]
if all_numeric_minutes:
expected_minutes = [cron_part for cron_part in cron_parts[0]]
if is_numeric[2]:
expected_day = cron_parts[2][0]
if is_numeric[4]:
expected_day_of_week = cron_parts[4][0]
if known_schedule_type:
start_datetime = datetime.datetime.fromtimestamp(
start_timestamp, tz=get_timezone(execution_timezone)
)
if start_offset == 0 and _is_simple_cron(cron_string, start_datetime):
# In simple cases, where you're already on a cron boundary, the below logic is unnecessary
# and slow
next_date = start_datetime
# This is already on a cron boundary, so yield it
yield start_datetime
else:
next_date = _find_schedule_time(
expected_minutes, # pyright: ignore[reportArgumentType]
expected_hour, # pyright: ignore[reportArgumentType]
expected_day, # pyright: ignore[reportArgumentType]
expected_day_of_week, # pyright: ignore[reportArgumentType]
known_schedule_type,
start_datetime,
ascending=not ascending, # Going in the reverse direction
already_on_boundary=False,
)
check.invariant(start_offset <= 0)
for _ in range(-start_offset):
next_date = _find_schedule_time(
expected_minutes, # pyright: ignore[reportArgumentType]
expected_hour, # pyright: ignore[reportArgumentType]
expected_day, # pyright: ignore[reportArgumentType]
expected_day_of_week, # pyright: ignore[reportArgumentType]
known_schedule_type,
next_date,
ascending=not ascending, # Going in the reverse direction
already_on_boundary=True,
)
while True:
next_date = _find_schedule_time(
expected_minutes, # pyright: ignore[reportArgumentType]
expected_hour, # pyright: ignore[reportArgumentType]
expected_day, # pyright: ignore[reportArgumentType]
expected_day_of_week, # pyright: ignore[reportArgumentType]
known_schedule_type,
next_date,
ascending=ascending,
already_on_boundary=True,
)
if start_offset == 0:
if ascending:
# Guard against _find_schedule_time returning unexpected results
check.invariant(next_date.timestamp() >= start_timestamp)
else:
check.invariant(next_date.timestamp() <= start_timestamp)
yield next_date
else:
yield from _croniter_string_iterator(
start_timestamp, cron_string, execution_timezone, ascending, start_offset
)
def _croniter_string_iterator(
start_timestamp: float,
cron_string: str,
timezone_str: str,
ascending: bool = True,
start_offset: int = 0,
):
start_datetime = datetime.datetime.fromtimestamp(start_timestamp, get_timezone(timezone_str))
reverse_cron = _timezone_aware_cron_iter(cron_string, start_datetime, ascending=not ascending)
next_date = None
check.invariant(start_offset <= 0)
for _ in range(-start_offset + 1):
next_date = next(reverse_cron)
next_date = check.not_none(next_date).astimezone(start_datetime.tzinfo)
forward_cron = _timezone_aware_cron_iter(cron_string, next_date, ascending=ascending)
while True:
next_date = next(forward_cron)
if start_offset == 0:
if ascending:
# Guard against _find_schedule_time returning unexpected results
check.invariant(next_date.timestamp() >= start_timestamp)
else:
check.invariant(next_date.timestamp() <= start_timestamp)
yield next_date
def reverse_cron_string_iterator(
end_timestamp: float,
cron_string: str,
execution_timezone: Optional[str],
) -> Iterator[datetime.datetime]:
yield from cron_string_iterator(end_timestamp, cron_string, execution_timezone, ascending=False)
def schedule_execution_time_iterator(
start_timestamp: float,
cron_schedule: Union[str, Sequence[str]],
execution_timezone: Optional[str],
ascending: bool = True,
) -> Iterator[datetime.datetime]:
"""Generator of execution datetimes >= start_timestamp for the given schedule.
Here cron_schedule is either a cron string or a sequence of cron strings. In the latter case,
the next execution datetime is obtained by computing the next cron datetime
after the current execution datetime for each cron string in the sequence, and then choosing
the earliest among them.
"""
check.invariant(
is_valid_cron_schedule(cron_schedule), desc=f"{cron_schedule} must be a valid cron schedule"
)
if isinstance(cron_schedule, str):
yield from (
cron_string_iterator(start_timestamp, cron_schedule, execution_timezone)
if ascending
else reverse_cron_string_iterator(start_timestamp, cron_schedule, execution_timezone)
)
else:
iterators = [
(
cron_string_iterator(start_timestamp, cron_string, execution_timezone)
if ascending
else reverse_cron_string_iterator(start_timestamp, cron_string, execution_timezone)
)
for cron_string in cron_schedule
]
next_dates = [next(it) for it in iterators]
while True:
# Choose earliest out of all subsequent datetimes.
earliest_next_date = min(next_dates)
yield earliest_next_date
# Increment all iterators that generated the earliest subsequent datetime.
for i, next_date in enumerate(next_dates):
if next_date == earliest_next_date:
next_dates[i] = next(iterators[i])
def get_latest_completed_cron_tick(
cron_string: str,
current_time: datetime.datetime,
timezone: Optional[str],
) -> datetime.datetime:
cron_iter = reverse_cron_string_iterator(
end_timestamp=current_time.timestamp(),
cron_string=cron_string,
execution_timezone=timezone,
)
return next(cron_iter)
def get_next_cron_tick(
cron_string: str,
current_time: datetime.datetime,
timezone: Optional[str],
) -> datetime.datetime:
cron_iter = cron_string_iterator(
start_timestamp=current_time.timestamp(),
cron_string=cron_string,
execution_timezone=timezone,
)
return next(cron_iter)
def get_smallest_cron_interval(
cron_string: str,
execution_timezone: Optional[str] = None,
) -> datetime.timedelta:
"""Find the smallest interval between cron ticks for a given cron schedule.
Uses a sampling-based approach to find the minimum interval by generating
consecutive cron ticks and measuring the gaps between them. Sampling stops
early if either of these limits is reached:
- A maximum of 1000 generated ticks
- A time horizon of 20 years past the sampling start
Args:
cron_string: A cron string
execution_timezone: Timezone to use for cron evaluation (defaults to UTC)
Returns:
The smallest timedelta between any two consecutive cron ticks
Raises:
CheckError: If the cron string is invalid or not recognized by Dagster
"""
check.invariant(
is_valid_cron_string(cron_string), desc=f"{cron_string} must be a valid cron string"
)
execution_timezone = execution_timezone or "UTC"
# Always start at current time in the specified timezone
start_time = get_current_datetime(tz=execution_timezone)
# Start sampling from a year ago to capture seasonal variations (DST, leap years)
sampling_start = start_time - datetime.timedelta(days=365)
# Cap the lookahead horizon to avoid extremely distant datetimes (e.g., year ~3000 on Windows)
horizon_deadline = sampling_start + relativedelta(years=20)
# Generate consecutive cron ticks
cron_iter = schedule_execution_time_iterator(
start_timestamp=sampling_start.timestamp(),
cron_schedule=cron_string,
execution_timezone=execution_timezone,
ascending=True,
)
# Collect the first tick
prev_tick = next(cron_iter)
min_interval = None
# Sample up to 1000 ticks, but also stop at the 20-year horizon
for _ in range(999):
try:
current_tick = next(cron_iter)
# Stop if we've gone beyond our lookahead horizon
if current_tick > horizon_deadline:
break
interval = current_tick - prev_tick
# Handle DST transitions where two ticks can have the same wall clock time
# but different fold values (indicating they're actually different points in time)
if interval == datetime.timedelta(seconds=0):
# Check if this is a DST ambiguous time scenario where both ticks
# represent the same local time but different actual moments
if (
current_tick.hour == prev_tick.hour
and current_tick.minute == prev_tick.minute
and current_tick.second == prev_tick.second
and current_tick.fold != prev_tick.fold
):
# This is a DST fall-back transition - skip this zero interval
# as it's not representative of the true minimum cron interval
prev_tick = current_tick
continue
# We've encountered a genuine zero interval (which shouldn't happen)
raise Exception("Encountered a genuine zero interval")
if interval < datetime.timedelta(seconds=0):
# This happens when the sampling encounters a daylight savings transition where the clocks roll back
# Just skip this interval and continue sampling
prev_tick = current_tick
continue
# Update minimum interval
if min_interval is None or interval < min_interval:
min_interval = interval
prev_tick = current_tick
except StopIteration:
# This shouldn't happen with cron iterators, but handle gracefully
break
if min_interval is None:
# Fallback - should not happen with valid cron schedules
raise ValueError("Could not determine minimum interval from cron schedule")
return min_interval
| CroniterShim |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 8506,
"end": 9662
} | class ____:
reopen = False
@property
def priority_queue_cls(self) -> str:
return "scrapy.pqueues.DownloaderAwarePriorityQueue"
def test_logic(self):
for url, slot in _URLS_WITH_SLOTS:
request = Request(url)
request.meta[Downloader.DOWNLOAD_SLOT] = slot
self.scheduler.enqueue_request(request)
if self.reopen:
self.close_scheduler()
self.create_scheduler()
dequeued_slots = []
requests = []
downloader = self.mock_crawler.engine.downloader
while self.scheduler.has_pending_requests():
request = self.scheduler.next_request()
slot = downloader.get_slot_key(request)
dequeued_slots.append(slot)
downloader.increment(slot)
requests.append(request)
for request in requests:
slot = downloader.get_slot_key(request)
downloader.decrement(slot)
assert _is_scheduling_fair([s for u, s in _URLS_WITH_SLOTS], dequeued_slots)
assert sum(len(s.active) for s in downloader.slots.values()) == 0
| DownloaderAwareSchedulerTestMixin |
python | pymupdf__PyMuPDF | src/table.py | {
"start": 11532,
"end": 12729
} | class ____(float):
pass
NON_NEGATIVE_SETTINGS = [
"snap_tolerance",
"snap_x_tolerance",
"snap_y_tolerance",
"join_tolerance",
"join_x_tolerance",
"join_y_tolerance",
"edge_min_length",
"min_words_vertical",
"min_words_horizontal",
"intersection_tolerance",
"intersection_x_tolerance",
"intersection_y_tolerance",
]
TABLE_STRATEGIES = ["lines", "lines_strict", "text", "explicit"]
UNSET = UnsetFloat(0)
DEFAULT_SNAP_TOLERANCE = 3
DEFAULT_JOIN_TOLERANCE = 3
DEFAULT_MIN_WORDS_VERTICAL = 3
DEFAULT_MIN_WORDS_HORIZONTAL = 1
DEFAULT_X_TOLERANCE = 3
DEFAULT_Y_TOLERANCE = 3
DEFAULT_X_DENSITY = 7.25
DEFAULT_Y_DENSITY = 13
bbox_getter = itemgetter("x0", "top", "x1", "bottom")
LIGATURES = {
"ff": "ff",
"ffi": "ffi",
"ffl": "ffl",
"fi": "fi",
"fl": "fl",
"st": "st",
"ſt": "st",
}
def to_list(collection) -> list:
if isinstance(collection, list):
return collection
elif isinstance(collection, Sequence):
return list(collection)
elif hasattr(collection, "to_dict"):
res = collection.to_dict("records") # pragma: nocover
return res
else:
return list(collection)
| UnsetFloat |
python | davidhalter__jedi | jedi/inference/base_value.py | {
"start": 5515,
"end": 10860
} | class ____(HelperValueMixin):
"""
To be implemented by subclasses.
"""
tree_node = None
# Possible values: None, tuple, list, dict and set. Here to deal with these
# very important containers.
array_type = None
api_type = 'not_defined_please_report_bug'
def __init__(self, inference_state, parent_context=None):
self.inference_state = inference_state
self.parent_context = parent_context
def py__getitem__(self, index_value_set, contextualized_node):
from jedi.inference import analysis
# TODO this value is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_VALUES
def py__simple_getitem__(self, index):
raise SimpleGetItemNotFound
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.inference import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def py__next__(self, contextualized_node=None):
return self.py__iter__(contextualized_node)
def get_signatures(self):
return []
def is_class(self):
return False
def is_class_mixin(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def is_builtins_module(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
def get_safe_value(self, default=sentinel):
if default is sentinel:
raise ValueError("There exists no safe value for value %s" % self)
return default
def execute_operation(self, other, operator):
debug.warning("%s not possible between %s and %s", operator, self, other)
return NO_VALUES
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_VALUES
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_VALUES
def py__getattribute__alternatives(self, name_or_str):
"""
For now a way to add values in cases like __getattr__.
"""
return NO_VALUES
def py__get__(self, instance, class_value):
debug.warning("No __get__ defined on %s", self)
return ValueSet([self])
def py__get__on_class(self, calling_instance, instance, class_value):
return NotImplemented
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root value knows if it's a stub or not.
return self.parent_context.is_stub()
def _as_context(self):
raise HasNoContext
@property
def name(self):
raise NotImplementedError
def get_type_hint(self, add_class_info=True):
return None
def infer_type_vars(self, value_set):
"""
When the current instance represents a type annotation, this method
tries to find information about undefined type vars and returns a dict
from type var name to value set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
Parameters
----------
`self`: represents the annotation of the current parameter to infer the
value for. In the above example, this would initially be the
`Iterable[_T]` of the `iterable` parameter and then, when recursing,
just the `_T` generic parameter.
`value_set`: represents the actual argument passed to the parameter
we're inferred for, or (for recursive calls) their types. In the
above example this would first be the representation of the list
`[1]` and then, when recursing, just of `1`.
"""
return {}
def iterate_values(values, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all values but ignores the ordering and just returns
all values that the iterate functions yield.
"""
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in values.iterate(contextualized_node, is_async=is_async)
)
| Value |
python | encode__httpx | httpx/_status_codes.py | {
"start": 84,
"end": 5639
} | class ____(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
* RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)
* RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)
* RFC 7725: An HTTP Status Code to Report Legal Obstacles
* RFC 8297: An HTTP Status Code for Indicating Hints
* RFC 8470: Using Early Data in HTTP
"""
def __new__(cls, value: int, phrase: str = "") -> codes:
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase # type: ignore[attr-defined]
return obj
def __str__(self) -> str:
return str(self.value)
@classmethod
def get_reason_phrase(cls, value: int) -> str:
try:
return codes(value).phrase # type: ignore
except ValueError:
return ""
@classmethod
def is_informational(cls, value: int) -> bool:
"""
Returns `True` for 1xx status codes, `False` otherwise.
"""
return 100 <= value <= 199
@classmethod
def is_success(cls, value: int) -> bool:
"""
Returns `True` for 2xx status codes, `False` otherwise.
"""
return 200 <= value <= 299
@classmethod
def is_redirect(cls, value: int) -> bool:
"""
Returns `True` for 3xx status codes, `False` otherwise.
"""
return 300 <= value <= 399
@classmethod
def is_client_error(cls, value: int) -> bool:
"""
Returns `True` for 4xx status codes, `False` otherwise.
"""
return 400 <= value <= 499
@classmethod
def is_server_error(cls, value: int) -> bool:
"""
Returns `True` for 5xx status codes, `False` otherwise.
"""
return 500 <= value <= 599
@classmethod
def is_error(cls, value: int) -> bool:
"""
Returns `True` for 4xx or 5xx status codes, `False` otherwise.
"""
return 400 <= value <= 599
# informational
CONTINUE = 100, "Continue"
SWITCHING_PROTOCOLS = 101, "Switching Protocols"
PROCESSING = 102, "Processing"
EARLY_HINTS = 103, "Early Hints"
# success
OK = 200, "OK"
CREATED = 201, "Created"
ACCEPTED = 202, "Accepted"
NON_AUTHORITATIVE_INFORMATION = 203, "Non-Authoritative Information"
NO_CONTENT = 204, "No Content"
RESET_CONTENT = 205, "Reset Content"
PARTIAL_CONTENT = 206, "Partial Content"
MULTI_STATUS = 207, "Multi-Status"
ALREADY_REPORTED = 208, "Already Reported"
IM_USED = 226, "IM Used"
# redirection
MULTIPLE_CHOICES = 300, "Multiple Choices"
MOVED_PERMANENTLY = 301, "Moved Permanently"
FOUND = 302, "Found"
SEE_OTHER = 303, "See Other"
NOT_MODIFIED = 304, "Not Modified"
USE_PROXY = 305, "Use Proxy"
TEMPORARY_REDIRECT = 307, "Temporary Redirect"
PERMANENT_REDIRECT = 308, "Permanent Redirect"
# client error
BAD_REQUEST = 400, "Bad Request"
UNAUTHORIZED = 401, "Unauthorized"
PAYMENT_REQUIRED = 402, "Payment Required"
FORBIDDEN = 403, "Forbidden"
NOT_FOUND = 404, "Not Found"
METHOD_NOT_ALLOWED = 405, "Method Not Allowed"
NOT_ACCEPTABLE = 406, "Not Acceptable"
PROXY_AUTHENTICATION_REQUIRED = 407, "Proxy Authentication Required"
REQUEST_TIMEOUT = 408, "Request Timeout"
CONFLICT = 409, "Conflict"
GONE = 410, "Gone"
LENGTH_REQUIRED = 411, "Length Required"
PRECONDITION_FAILED = 412, "Precondition Failed"
REQUEST_ENTITY_TOO_LARGE = 413, "Request Entity Too Large"
REQUEST_URI_TOO_LONG = 414, "Request-URI Too Long"
UNSUPPORTED_MEDIA_TYPE = 415, "Unsupported Media Type"
REQUESTED_RANGE_NOT_SATISFIABLE = 416, "Requested Range Not Satisfiable"
EXPECTATION_FAILED = 417, "Expectation Failed"
IM_A_TEAPOT = 418, "I'm a teapot"
MISDIRECTED_REQUEST = 421, "Misdirected Request"
UNPROCESSABLE_ENTITY = 422, "Unprocessable Entity"
LOCKED = 423, "Locked"
FAILED_DEPENDENCY = 424, "Failed Dependency"
TOO_EARLY = 425, "Too Early"
UPGRADE_REQUIRED = 426, "Upgrade Required"
PRECONDITION_REQUIRED = 428, "Precondition Required"
TOO_MANY_REQUESTS = 429, "Too Many Requests"
REQUEST_HEADER_FIELDS_TOO_LARGE = 431, "Request Header Fields Too Large"
UNAVAILABLE_FOR_LEGAL_REASONS = 451, "Unavailable For Legal Reasons"
# server errors
INTERNAL_SERVER_ERROR = 500, "Internal Server Error"
NOT_IMPLEMENTED = 501, "Not Implemented"
BAD_GATEWAY = 502, "Bad Gateway"
SERVICE_UNAVAILABLE = 503, "Service Unavailable"
GATEWAY_TIMEOUT = 504, "Gateway Timeout"
HTTP_VERSION_NOT_SUPPORTED = 505, "HTTP Version Not Supported"
VARIANT_ALSO_NEGOTIATES = 506, "Variant Also Negotiates"
INSUFFICIENT_STORAGE = 507, "Insufficient Storage"
LOOP_DETECTED = 508, "Loop Detected"
NOT_EXTENDED = 510, "Not Extended"
NETWORK_AUTHENTICATION_REQUIRED = 511, "Network Authentication Required"
# Include lower-case styles for `requests` compatibility.
for code in codes:
setattr(codes, code._name_.lower(), int(code))
| codes |
python | encode__httpx | httpx/_transports/wsgi.py | {
"start": 617,
"end": 1032
} | class ____(SyncByteStream):
def __init__(self, result: typing.Iterable[bytes]) -> None:
self._close = getattr(result, "close", None)
self._result = _skip_leading_empty_chunks(result)
def __iter__(self) -> typing.Iterator[bytes]:
for part in self._result:
yield part
def close(self) -> None:
if self._close is not None:
self._close()
| WSGIByteStream |
python | sympy__sympy | sympy/physics/quantum/tests/test_operator.py | {
"start": 1147,
"end": 8417
} | class ____(HermitianOperator):
@classmethod
def default_args(self):
return ("T",)
t_ket = CustomKet()
t_op = CustomOp()
def test_operator():
A = Operator('A')
B = Operator('B')
C = Operator('C')
assert isinstance(A, Operator)
assert isinstance(A, QExpr)
assert A.label == (Symbol('A'),)
assert A.is_commutative is False
assert A.hilbert_space == HilbertSpace()
assert A*B != B*A
assert (A*(B + C)).expand() == A*B + A*C
assert ((A + B)**2).expand() == A**2 + A*B + B*A + B**2
assert t_op.label[0] == Symbol(t_op.default_args()[0])
assert Operator() == Operator("O")
with warns_deprecated_sympy():
assert A*IdentityOperator() == A
def test_operator_inv():
A = Operator('A')
assert A*A.inv() == 1
assert A.inv()*A == 1
def test_hermitian():
H = HermitianOperator('H')
assert isinstance(H, HermitianOperator)
assert isinstance(H, Operator)
assert Dagger(H) == H
assert H.inv() != H
assert H.is_commutative is False
assert Dagger(H).is_commutative is False
def test_unitary():
U = UnitaryOperator('U')
assert isinstance(U, UnitaryOperator)
assert isinstance(U, Operator)
assert U.inv() == Dagger(U)
assert U*Dagger(U) == 1
assert Dagger(U)*U == 1
assert U.is_commutative is False
assert Dagger(U).is_commutative is False
def test_identity():
with warns_deprecated_sympy():
I = IdentityOperator()
O = Operator('O')
x = Symbol("x")
three = sympify(3)
assert isinstance(I, IdentityOperator)
assert isinstance(I, Operator)
assert I * O == O
assert O * I == O
assert I * Dagger(O) == Dagger(O)
assert Dagger(O) * I == Dagger(O)
assert isinstance(I * I, IdentityOperator)
assert three * I == three
assert I * x == x
assert I.inv() == I
assert Dagger(I) == I
assert qapply(I * O) == O
assert qapply(O * I) == O
for n in [2, 3, 5]:
assert represent(IdentityOperator(n)) == eye(n)
def test_outer_product():
k = Ket('k')
b = Bra('b')
op = OuterProduct(k, b)
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = k*b
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = 2*k*b
assert op == Mul(Integer(2), k, b)
op = 2*(k*b)
assert op == Mul(Integer(2), OuterProduct(k, b))
assert Dagger(k*b) == OuterProduct(Dagger(b), Dagger(k))
assert Dagger(k*b).is_commutative is False
#test the _eval_trace
assert Tr(OuterProduct(JzKet(1, 1), JzBra(1, 1))).doit() == 1
# test scaled kets and bras
assert OuterProduct(2 * k, b) == 2 * OuterProduct(k, b)
assert OuterProduct(k, 2 * b) == 2 * OuterProduct(k, b)
# test sums of kets and bras
k1, k2 = Ket('k1'), Ket('k2')
b1, b2 = Bra('b1'), Bra('b2')
assert (OuterProduct(k1 + k2, b1) ==
OuterProduct(k1, b1) + OuterProduct(k2, b1))
assert (OuterProduct(k1, b1 + b2) ==
OuterProduct(k1, b1) + OuterProduct(k1, b2))
assert (OuterProduct(1 * k1 + 2 * k2, 3 * b1 + 4 * b2) ==
3 * OuterProduct(k1, b1) +
4 * OuterProduct(k1, b2) +
6 * OuterProduct(k2, b1) +
8 * OuterProduct(k2, b2))
def test_operator_dagger():
A = Operator('A')
B = Operator('B')
assert Dagger(A*B) == Dagger(B)*Dagger(A)
assert Dagger(A + B) == Dagger(A) + Dagger(B)
assert Dagger(A**2) == Dagger(A)**2
def test_differential_operator():
x = Symbol('x')
f = Function('f')
d = DifferentialOperator(Derivative(f(x), x), f(x))
g = Wavefunction(x**2, x)
assert qapply(d*g) == Wavefunction(2*x, x)
assert d.expr == Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 2), f(x))
d = DifferentialOperator(Derivative(f(x), x, 2), f(x))
g = Wavefunction(x**3, x)
assert qapply(d*g) == Wavefunction(6*x, x)
assert d.expr == Derivative(f(x), x, 2)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 3), f(x))
d = DifferentialOperator(1/x*Derivative(f(x), x), f(x))
assert d.expr == 1/x*Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == \
DifferentialOperator(Derivative(1/x*Derivative(f(x), x), x), f(x))
assert qapply(d*g) == Wavefunction(3*x, x)
# 2D cartesian Laplacian
y = Symbol('y')
d = DifferentialOperator(Derivative(f(x, y), x, 2) +
Derivative(f(x, y), y, 2), f(x, y))
w = Wavefunction(x**3*y**2 + y**3*x**2, x, y)
assert d.expr == Derivative(f(x, y), x, 2) + Derivative(f(x, y), y, 2)
assert d.function == f(x, y)
assert d.variables == (x, y)
assert diff(d, x) == \
DifferentialOperator(Derivative(d.expr, x), f(x, y))
assert diff(d, y) == \
DifferentialOperator(Derivative(d.expr, y), f(x, y))
assert qapply(d*w) == Wavefunction(2*x**3 + 6*x*y**2 + 6*x**2*y + 2*y**3,
x, y)
# 2D polar Laplacian (th = theta)
r, th = symbols('r th')
d = DifferentialOperator(1/r*Derivative(r*Derivative(f(r, th), r), r) +
1/(r**2)*Derivative(f(r, th), th, 2), f(r, th))
w = Wavefunction(r**2*sin(th), r, (th, 0, pi))
assert d.expr == \
1/r*Derivative(r*Derivative(f(r, th), r), r) + \
1/(r**2)*Derivative(f(r, th), th, 2)
assert d.function == f(r, th)
assert d.variables == (r, th)
assert diff(d, r) == \
DifferentialOperator(Derivative(d.expr, r), f(r, th))
assert diff(d, th) == \
DifferentialOperator(Derivative(d.expr, th), f(r, th))
assert qapply(d*w) == Wavefunction(3*sin(th), r, (th, 0, pi))
def test_eval_power():
from sympy.core import Pow
from sympy.core.expr import unchanged
O = Operator('O')
U = UnitaryOperator('U')
H = HermitianOperator('H')
assert O**-1 == O.inv() # same as doc test
assert U**-1 == U.inv()
assert H**-1 == H.inv()
x = symbols("x", commutative = True)
assert unchanged(Pow, H, x) # verify Pow(H,x)=="X^n"
assert H**x == Pow(H, x)
assert Pow(H,x) == Pow(H, x, evaluate=False) # Just check
from sympy.physics.quantum.gate import XGate
X = XGate(0) # is hermitian and unitary
assert unchanged(Pow, X, x) # verify Pow(X,x)=="X^x"
assert X**x == Pow(X, x)
assert Pow(X, x, evaluate=False) == Pow(X, x) # Just check
n = symbols("n", integer=True, even=True)
assert X**n == 1
n = symbols("n", integer=True, odd=True)
assert X**n == X
n = symbols("n", integer=True)
assert unchanged(Pow, X, n) # verify Pow(X,n)=="X^n"
assert X**n == Pow(X, n)
assert Pow(X, n, evaluate=False)==Pow(X, n) # Just check
assert X**4 == 1
assert X**7 == X
| CustomOp |
python | jazzband__django-model-utils | model_utils/tracker.py | {
"start": 4403,
"end": 4653
} | class ____(DescriptorWrapper[T]):
"""
Wrapper for descriptors with all three descriptor methods.
"""
def __delete__(self, obj: models.Model) -> None:
cast(FullDescriptor[T], self.descriptor).__delete__(obj)
| FullDescriptorWrapper |
python | sympy__sympy | sympy/core/basic.py | {
"start": 73251,
"end": 76719
} | class ____(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict=None, old=False):
if self == expr:
if repl_dict is None:
return {}
return repl_dict.copy()
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Do not
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
pot = _preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
from .symbol import Symbol
from .function import Derivative, Function
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
# Delayed to avoid cyclic import
from .singleton import S
from .traversal import (preorder_traversal as _preorder_traversal,
iterargs, iterfreeargs)
preorder_traversal = deprecated(
"""
Using preorder_traversal from the sympy.core.basic submodule is
deprecated.
Instead, use preorder_traversal from the top-level sympy namespace, like
sympy.preorder_traversal
""",
deprecated_since_version="1.10",
active_deprecations_target="deprecated-traversal-functions-moved",
)(_preorder_traversal)
| Atom |
python | FactoryBoy__factory_boy | tests/test_utils.py | {
"start": 616,
"end": 1846
} | class ____(unittest.TestCase):
def test_nothing(self):
txt = str(utils.log_pprint())
self.assertEqual('', txt)
def test_only_args(self):
txt = str(utils.log_pprint((1, 2, 3)))
self.assertEqual('1, 2, 3', txt)
def test_only_kwargs(self):
txt = str(utils.log_pprint(kwargs={'a': 1, 'b': 2}))
self.assertIn(txt, ['a=1, b=2', 'b=2, a=1'])
def test_bytes_args(self):
txt = str(utils.log_pprint((b'\xe1\xe2',)))
expected = "b'\\xe1\\xe2'"
self.assertEqual(expected, txt)
def test_text_args(self):
txt = str(utils.log_pprint(('ŧêßŧ',)))
expected = "'ŧêßŧ'"
self.assertEqual(expected, txt)
def test_bytes_kwargs(self):
txt = str(utils.log_pprint(kwargs={'x': b'\xe1\xe2', 'y': b'\xe2\xe1'}))
expected1 = "x=b'\\xe1\\xe2', y=b'\\xe2\\xe1'"
expected2 = "y=b'\\xe2\\xe1', x=b'\\xe1\\xe2'"
self.assertIn(txt, (expected1, expected2))
def test_text_kwargs(self):
txt = str(utils.log_pprint(kwargs={'x': 'ŧêßŧ', 'y': 'ŧßêŧ'}))
expected1 = "x='ŧêßŧ', y='ŧßêŧ'"
expected2 = "y='ŧßêŧ', x='ŧêßŧ'"
self.assertIn(txt, (expected1, expected2))
| LogPPrintTestCase |
python | python-attrs__attrs | tests/test_validators.py | {
"start": 29124,
"end": 30178
} | class ____:
"""
Tests for `_subclass_of`.
"""
def test_success(self):
"""
Nothing happens if classes match.
"""
v = _subclass_of(int)
v(None, simple_attr("test"), int)
def test_subclass(self):
"""
Subclasses are accepted too.
"""
v = _subclass_of(int)
# yep, bools are a subclass of int :(
v(None, simple_attr("test"), bool)
def test_fail(self):
"""
Raises `TypeError` on wrong types.
"""
v = _subclass_of(int)
a = simple_attr("test")
with pytest.raises(TypeError) as e:
v(None, a, str)
assert (
"'test' must be a subclass of <class 'int'> (got <class 'str'>).",
a,
int,
str,
) == e.value.args
def test_repr(self):
"""
Returned validator has a useful `__repr__`.
"""
v = _subclass_of(int)
assert ("<subclass_of validator for type <class 'int'>>") == repr(v)
| TestSubclassOf |
python | pypa__pip | src/pip/_vendor/urllib3/connectionpool.py | {
"start": 1689,
"end": 2909
} | class ____(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
.. note::
ConnectionPool.urlopen() does not normalize or percent-encode target URIs
which is useful if your target server doesn't support percent-encoded
target URIs.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
| ConnectionPool |
python | google__pytype | pytype/overlays/classgen.py | {
"start": 6353,
"end": 11217
} | class ____(abstract.PyTDFunction):
"""Implements constructors for fields."""
def get_kwarg(self, args, name, default):
if name not in args.namedargs:
return default
try:
return abstract_utils.get_atomic_python_constant(args.namedargs[name])
except abstract_utils.ConversionError:
self.ctx.errorlog.not_supported_yet(
self.ctx.vm.frames, f"Non-constant argument {name!r}"
)
def get_positional_names(self):
# TODO(mdemello): We currently assume all field constructors are called with
# namedargs, which has worked in practice but is not required by the attrs
# or dataclasses apis.
return []
def is_method(var):
if var is None:
return False
return isinstance(
var.data[0],
(
abstract.INTERPRETER_FUNCTION_TYPES,
special_builtins.ClassMethodInstance,
special_builtins.PropertyInstance,
special_builtins.StaticMethodInstance,
),
)
def is_dunder(name):
return name.startswith("__") and name.endswith("__")
def add_member(node, cls, name, typ):
if typ.formal:
# If typ contains a type parameter, we mark it as empty so that instances
# will use __annotations__ to fill in concrete type parameter values.
instance = typ.ctx.convert.empty.to_variable(node)
else:
# See test_attr.TestAttrib.test_repeated_default - keying on the name
# prevents attributes from sharing the same default object.
instance = typ.ctx.vm.init_class(node, typ, extra_key=name)
cls.members[name] = instance
def is_relevant_class_local(
class_local: abstract_utils.Local,
class_local_name: str,
allow_methods: bool,
):
"""Tests whether the current class local could be relevant for type checking.
For example, this doesn't match __dunder__ class locals.
To get an abstract_utils.Local from a vm.LocalOps, you can use,
'vm_instance.annotated_locals[cls_name][op.name]'.
Args:
class_local: the local to query
class_local_name: the name of the class local (because abstract_utils.Local
does not hold this information).
allow_methods: whether to allow methods class locals to match
Returns:
Whether this class local could possibly be relevant for type checking.
Callers will usually want to filter even further.
"""
if is_dunder(class_local_name):
return False
if not allow_methods and not class_local.typ and is_method(class_local.orig):
return False
return True
def get_class_locals(cls_name: str, allow_methods: bool, ordering, ctx):
"""Gets a dictionary of the class's local variables.
Args:
cls_name: The name of an abstract.InterpreterClass.
allow_methods: A bool, whether to allow methods as variables.
ordering: A classgen.Ordering describing the order in which the variables
should appear.
ctx: The abstract context.
Returns:
A collections.OrderedDict of the locals.
"""
out = collections.OrderedDict()
if cls_name not in ctx.vm.local_ops:
# See TestAttribPy3.test_cannot_decorate in tests/test_attr2.py. The
# class will not be in local_ops if a previous decorator hides it.
return out
for op in ctx.vm.local_ops[cls_name]:
local = ctx.vm.annotated_locals[cls_name][op.name]
if not is_relevant_class_local(local, op.name, allow_methods):
continue
if ordering is Ordering.FIRST_ANNOTATE:
if not op.is_annotate() or op.name in out:
continue
else:
assert ordering is Ordering.LAST_ASSIGN
if not op.is_assign():
continue
elif op.name in out:
out.move_to_end(op.name)
out[op.name] = local
return out
def make_replace_method(ctx, node, cls, *, kwargs_name="kwargs"):
"""Create a replace() method for a dataclass."""
# This is used by several packages that extend dataclass.
# The signature is
# def replace(self: T, **kwargs) -> T
typevar = abstract.TypeParameter(abstract_utils.T + cls.name, ctx, bound=cls)
return overlay_utils.make_method(
ctx=ctx,
node=node,
name="replace",
return_type=typevar,
self_param=overlay_utils.Param("self", typevar),
kwargs=overlay_utils.Param(kwargs_name),
)
def get_or_create_annotations_dict(members, ctx):
"""Get __annotations__ from members map, create and attach it if not present.
The returned dict is also referenced by members, so it is safe to mutate.
Args:
members: A dict of member name to variable.
ctx: context.Context instance.
Returns:
members['__annotations__'] unpacked as a python dict
"""
annotations_dict = abstract_utils.get_annotations_dict(members)
if annotations_dict is None:
annotations_dict = abstract.AnnotationsDict({}, ctx)
members["__annotations__"] = annotations_dict.to_variable(ctx.root_node)
return annotations_dict
@dataclasses.dataclass
| FieldConstructor |
python | getsentry__sentry | src/sentry/api/endpoints/artifact_lookup.py | {
"start": 11046,
"end": 11863
} | class ____:
def __init__(self, request: Request, project: Project):
if is_system_auth(request.auth):
self.base_url = get_internal_artifact_lookup_source_url(project)
else:
self.base_url = request.build_absolute_uri(request.path)
def url_for_file_id(self, download_id: str) -> str:
# NOTE: Returning a self-route that requires authentication (via Bearer token)
# is not really forward compatible with a pre-signed URL that does not
# require any authentication or headers whatsoever.
# This also requires a workaround in Symbolicator, as its generic http
# downloader blocks "internal" IPs, whereas the internal Sentry downloader
# is explicitly exempt.
return f"{self.base_url}?download={download_id}"
| UrlConstructor |
python | openai__openai-python | src/openai/resources/conversations/conversations.py | {
"start": 16992,
"end": 17747
} | class ____:
def __init__(self, conversations: AsyncConversations) -> None:
self._conversations = conversations
self.create = _legacy_response.async_to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> AsyncItemsWithRawResponse:
return AsyncItemsWithRawResponse(self._conversations.items)
| AsyncConversationsWithRawResponse |
python | kamyu104__LeetCode-Solutions | Python/reformat-the-string.py | {
"start": 50,
"end": 925
} | class ____(object):
def reformat(self, s):
"""
:type s: str
:rtype: str
"""
def char_gen(start, end, count):
for c in xrange(ord(start), ord(end)+1):
c = chr(c)
for i in xrange(count[c]):
yield c
yield ''
count = collections.defaultdict(int)
alpha_cnt = 0
for c in s:
count[c] += 1
if c.isalpha():
alpha_cnt += 1
if abs(len(s)-2*alpha_cnt) > 1:
return ""
result = []
it1, it2 = char_gen('a', 'z', count), char_gen('0', '9', count)
if alpha_cnt < len(s)-alpha_cnt:
it1, it2 = it2, it1
while len(result) < len(s):
result.append(next(it1))
result.append(next(it2))
return "".join(result)
| Solution |
python | django__django | tests/admin_inlines/models.py | {
"start": 5991,
"end": 6118
} | class ____(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
| Chapter |
python | walkccc__LeetCode | solutions/2798. Number of Employees Who Met the Target/2798.py | {
"start": 0,
"end": 146
} | class ____:
def numberOfEmployeesWhoMetTarget(self, hours: list[int], target: int) -> int:
return sum(hour >= target for hour in hours)
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1595079,
"end": 1596904
} | class ____(VegaLiteSchema):
"""
WindowFieldDef schema wrapper.
Parameters
----------
op : :class:`AggregateOp`, :class:`WindowOnlyOp`, Literal['argmax', 'argmin', 'average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb', 'row_number', 'rank', 'dense_rank', 'percent_rank', 'cume_dist', 'ntile', 'lag', 'lead', 'first_value', 'last_value', 'nth_value']
The window or aggregation operation to apply within a window (e.g., ``"rank"``,
``"lead"``, ``"sum"``, ``"average"`` or ``"count"``). See the list of all supported
operations `here <https://vega.github.io/vega-lite/docs/window.html#ops>`__.
field : str, :class:`FieldName`
The data field for which to compute the aggregate or window function. This can be
omitted for window functions that do not operate over a field such as ``"count"``,
``"rank"``, ``"dense_rank"``.
param : float
Parameter values for the window functions. Parameter values can be omitted for
operations that do not accept a parameter.
See the list of all supported operations and their parameters `here
<https://vega.github.io/vega-lite/docs/transforms/window.html>`__.
as : str, :class:`FieldName`
The output name for the window operation.
"""
_schema = {"$ref": "#/definitions/WindowFieldDef"}
def __init__(
self,
op: Optional[SchemaBase | AggregateOp_T | WindowOnlyOp_T] = Undefined,
field: Optional[str | SchemaBase] = Undefined,
param: Optional[float] = Undefined,
**kwds,
):
super().__init__(op=op, field=field, param=param, **kwds)
| WindowFieldDef |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 9435,
"end": 10121
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that takes checks if all elements in a row are greater than or equal to 0.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
result = X >= 0
return result.astype(float)
| GETransformer |
python | getsentry__sentry | tests/sentry/api/endpoints/test_chunk_upload.py | {
"start": 885,
"end": 17808
} | class ____(APITestCase):
@pytest.fixture(autouse=True)
def _restore_upload_url_options(self):
options.delete("system.upload-url-prefix")
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
self.token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.url = reverse("sentry-api-0-chunk-upload", args=[self.organization.slug])
def _get_launchpad_auth_headers(self, method="GET", data=b""):
"""Generate Launchpad RPC signature authentication headers."""
signature = generate_service_request_signature(
self.url, data, ["test-secret-key"], "Launchpad"
)
return {"HTTP_AUTHORIZATION": f"rpcsignature {signature}"}
def test_chunk_parameters(self) -> None:
response = self.client.get(
self.url, HTTP_AUTHORIZATION=f"Bearer {self.token.token}", format="json"
)
assert response.status_code == 200, response.content
assert response.data["chunkSize"] == settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE
assert response.data["chunksPerRequest"] == MAX_CHUNKS_PER_REQUEST
assert response.data["maxRequestSize"] == MAX_REQUEST_SIZE
assert response.data["maxFileSize"] == MAX_FILE_SIZE
assert response.data["concurrency"] == MAX_CONCURRENCY
assert response.data["hashAlgorithm"] == HASH_ALGORITHM
assert response.data["url"] == generate_region_url() + self.url
assert response.data["accept"] == CHUNK_UPLOAD_ACCEPT
with override_options({"system.upload-url-prefix": "test"}):
response = self.client.get(
self.url, HTTP_AUTHORIZATION=f"Bearer {self.token.token}", format="json"
)
assert response.data["url"] == options.get("system.upload-url-prefix") + self.url
assert math.log2(response.data["chunkSize"]) % 1 == 0, (
"chunkSize is not a power of two. This change will break Sentry CLI versions ≤2.39.1, "
"since these versions only support chunk sizes which are a power of two. Chunk uploads "
"will error in these versions when the CLI receives a chunk size which is not a power "
"of two from the server, and there is no way for users to work around the error."
)
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_chunk_parameters_launchpad_auth(self) -> None:
"""Test that Launchpad authentication works for GET requests."""
headers = self._get_launchpad_auth_headers("GET")
response = self.client.get(self.url, **headers, format="json")
assert response.status_code == 200, response.content
assert response.data["chunkSize"] == settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE
assert response.data["url"] == generate_region_url() + self.url
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_chunk_parameters_launchpad_auth_different_org(self) -> None:
"""Test that Launchpad auth bypasses organization permission checks."""
# Create a different organization that the user doesn't have access to
other_org = self.create_organization(name="Other Org")
other_url = reverse("sentry-api-0-chunk-upload", args=[other_org.slug])
# Standard auth should fail
response = self.client.get(
other_url, HTTP_AUTHORIZATION=f"Bearer {self.token.token}", format="json"
)
assert response.status_code == 403
# Launchpad auth should succeed
signature = generate_service_request_signature(
other_url, b"", ["test-secret-key"], "Launchpad"
)
response = self.client.get(
other_url, HTTP_AUTHORIZATION=f"rpcsignature {signature}", format="json"
)
assert response.status_code == 200
def test_launchpad_auth_missing_secret(self) -> None:
"""Test that missing shared secret setting causes authentication to fail."""
headers = self._get_launchpad_auth_headers("GET")
response = self.client.get(self.url, **headers, format="json")
assert response.status_code == 500 # RpcAuthenticationSetupException
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_launchpad_auth_invalid_signature(self) -> None:
"""Test that invalid signature causes authentication to fail."""
response = self.client.get(
self.url, HTTP_AUTHORIZATION="rpcsignature rpc0:invalid_signature", format="json"
)
assert response.status_code == 401
def test_relative_url_support(self) -> None:
# Starting `sentry-cli@1.70.1` we added a support for relative chunk-uploads urls
# >= 1.70.1
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/1.70.1",
format="json",
)
assert response.data["url"] == self.url.lstrip(API_PREFIX)
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/2.20.5",
format="json",
)
assert response.data["url"] == self.url.lstrip(API_PREFIX)
# < 1.70.1
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/1.70.0",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/0.69.3",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
# user overridden upload url prefix has priority, even when calling from sentry-cli that supports relative urls
with override_options({"system.upload-url-prefix": "test"}):
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/1.70.1",
format="json",
)
assert response.data["url"] == options.get("system.upload-url-prefix") + self.url
with override_options({"hybrid_cloud.disable_relative_upload_urls": True}):
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/1.70.1",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
def test_region_upload_urls(self) -> None:
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/1.70.1",
format="json",
)
assert response.data["url"] == self.url.lstrip(API_PREFIX)
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/0.69.3",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/2.29.999",
format="json",
)
assert response.data["url"] == self.url.lstrip(API_PREFIX)
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/2.30.0",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
with override_options({"hybrid_cloud.disable_relative_upload_urls": True}):
response = self.client.get(
self.url,
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
HTTP_USER_AGENT="sentry-cli/2.29.99",
format="json",
)
assert response.data["url"] == generate_region_url() + self.url
def test_wrong_api_token(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self.user, scope_list=["org:org"])
response = self.client.get(self.url, HTTP_AUTHORIZATION=f"Bearer {token.token}")
assert response.status_code == 403, response.content
def test_upload(self) -> None:
data1 = b"1 this is my testString"
data2 = b"2 this is my testString"
checksum1 = sha1(data1).hexdigest()
checksum2 = sha1(data2).hexdigest()
blob1 = SimpleUploadedFile(checksum1, data1, content_type="multipart/form-data")
blob2 = SimpleUploadedFile(checksum2, data2, content_type="multipart/form-data")
response = self.client.post(
self.url,
data={"file": [blob1, blob2]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
# this tells drf to select the MultiPartParser. We use that instead of
# FileUploadParser because we have our own specific file chunking mechanism
# in the chunk endpoint that has requirements like blob/chunk's filename = checksum.
format="multipart",
)
assert response.status_code == 200, response.content
file_blobs = FileBlob.objects.all()
assert len(file_blobs) == 2
assert file_blobs[0].checksum == checksum1
assert file_blobs[1].checksum == checksum2
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_upload_launchpad_auth(self) -> None:
"""Test that chunk upload works with Launchpad authentication."""
# For this test, we'll mock the authentication to bypass the signature validation
# and focus on testing the permission logic
from unittest.mock import patch
from sentry.preprod.authentication import LaunchpadRpcSignatureAuthentication
data1 = b"1 this is my testString"
data2 = b"2 this is my testString"
checksum1 = sha1(data1).hexdigest()
checksum2 = sha1(data2).hexdigest()
blob1 = SimpleUploadedFile(checksum1, data1, content_type="multipart/form-data")
blob2 = SimpleUploadedFile(checksum2, data2, content_type="multipart/form-data")
# Mock the authentication to return a successful result
with (
patch.object(
LaunchpadRpcSignatureAuthentication,
"authenticate",
return_value=(None, "rpc0:test_signature"),
),
patch("sentry.middleware.access_log._get_token_name", return_value="rpcsignature"),
):
response = self.client.post(
self.url,
data={"file": [blob1, blob2]},
HTTP_AUTHORIZATION="rpcsignature test_signature",
format="multipart",
)
assert response.status_code == 200, response.content
file_blobs = FileBlob.objects.all()
assert len(file_blobs) == 2
assert file_blobs[0].checksum == checksum1
assert file_blobs[1].checksum == checksum2
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_upload_launchpad_auth_different_org(self) -> None:
"""Test that Launchpad auth bypasses organization permission checks for uploads."""
# Create a different organization that the user doesn't have access to
other_org = self.create_organization(name="Other Org")
other_url = reverse("sentry-api-0-chunk-upload", args=[other_org.slug])
data1 = b"1 this is my testString"
checksum1 = sha1(data1).hexdigest()
blob1 = SimpleUploadedFile(checksum1, data1, content_type="multipart/form-data")
# Standard auth should fail
response = self.client.post(
other_url,
data={"file": [blob1]},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 403
# For now, let's just test that the Launchpad auth class exists and permission logic works
# The actual signature validation is complex to test due to multipart encoding
from unittest.mock import Mock
from sentry.api.endpoints.chunk import ChunkUploadPermission
from sentry.preprod.authentication import LaunchpadRpcSignatureAuthentication
# Test the permission logic directly
permission = ChunkUploadPermission()
mock_request = Mock()
mock_request.successful_authenticator = LaunchpadRpcSignatureAuthentication()
# This should return True for Launchpad auth
assert permission.has_permission(mock_request, None)
assert permission.has_object_permission(mock_request, None, other_org)
def test_empty_upload(self) -> None:
response = self.client.post(
self.url, HTTP_AUTHORIZATION=f"Bearer {self.token.token}", format="multipart"
)
assert response.status_code == 200
file_blobs = FileBlob.objects.all()
assert len(file_blobs) == 0
@override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"])
def test_empty_upload_launchpad_auth(self) -> None:
"""Test that empty uploads work with Launchpad authentication."""
from unittest.mock import patch
from sentry.preprod.authentication import LaunchpadRpcSignatureAuthentication
# Mock the authentication to return a successful result
with (
patch.object(
LaunchpadRpcSignatureAuthentication,
"authenticate",
return_value=(None, "rpc0:test_signature"),
),
patch("sentry.middleware.access_log._get_token_name", return_value="rpcsignature"),
):
response = self.client.post(
self.url,
HTTP_AUTHORIZATION="rpcsignature test_signature",
format="multipart",
)
assert response.status_code == 200
file_blobs = FileBlob.objects.all()
assert len(file_blobs) == 0
def test_too_many_chunks(self) -> None:
files = []
# Exactly the limit
for x in range(0, MAX_CHUNKS_PER_REQUEST + 1):
content = b"x"
files.append(SimpleUploadedFile(sha1(content).hexdigest(), content))
response = self.client.post(
self.url,
data={"file": files},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 400, response.content
def test_too_large_request(self) -> None:
files = []
# Exactly the limit
for x in range(0, MAX_CHUNKS_PER_REQUEST):
content = b"x" * (MAX_REQUEST_SIZE // MAX_CHUNKS_PER_REQUEST)
files.append(SimpleUploadedFile(sha1(content).hexdigest(), content))
response = self.client.post(
self.url,
data={"file": files},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 200, response.content
# We overflow the request here
files.append(SimpleUploadedFile(sha1(b"content").hexdigest(), b"content"))
response = self.client.post(
self.url,
data={"file": files},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 400, response.content
def test_too_large_chunk(self) -> None:
files = []
content = b"x" * (settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE + 1)
files.append(SimpleUploadedFile(sha1(content).hexdigest(), content))
response = self.client.post(
self.url,
data={"file": files},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 400, response.content
def test_checksum_missmatch(self) -> None:
files = []
content = b"x" * (settings.SENTRY_CHUNK_UPLOAD_BLOB_SIZE + 1)
files.append(SimpleUploadedFile("wrong checksum", content))
response = self.client.post(
self.url,
data={"file": files},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
format="multipart",
)
assert response.status_code == 400, response.content
| ChunkUploadTest |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 147712,
"end": 149158
} | class ____(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.median(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
output_shape = reduce_shape(
x.shape, axis=self.axis, keepdims=self.keepdims
)
if backend.standardize_dtype(x.dtype) == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.median", "keras.ops.numpy.median"])
def median(x, axis=None, keepdims=False):
"""Compute the median along the specified axis.
Args:
x: Input tensor.
axis: Axis or axes along which the medians are computed. Defaults to
`axis=None` which is to compute the median(s) along a flattened
version of the array.
keepdims: If this is set to `True`, the axes which are reduce
are left in the result as dimensions with size one.
Returns:
The output tensor.
"""
if any_symbolic_tensors((x,)):
return Median(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.median(x, axis=axis, keepdims=keepdims)
| Median |
python | getsentry__sentry | src/sentry/api/serializers/models/discoversavedquery.py | {
"start": 1018,
"end": 1330
} | class ____(DiscoverSavedQueryResponseOptional):
id: str
name: str
projects: list[int]
version: int
queryDataset: str
datasetSource: str
expired: bool
dateCreated: str
dateUpdated: str
createdBy: UserSerializerResponse
@register(DiscoverSavedQuery)
| DiscoverSavedQueryResponse |
python | python__mypy | mypy/test/testformatter.py | {
"start": 127,
"end": 2639
} | class ____(TestCase):
def test_trim_source(self) -> None:
assert trim_source_line("0123456789abcdef", max_len=16, col=5, min_width=2) == (
"0123456789abcdef",
0,
)
# Locations near start.
assert trim_source_line("0123456789abcdef", max_len=7, col=0, min_width=2) == (
"0123456...",
0,
)
assert trim_source_line("0123456789abcdef", max_len=7, col=4, min_width=2) == (
"0123456...",
0,
)
# Middle locations.
assert trim_source_line("0123456789abcdef", max_len=7, col=5, min_width=2) == (
"...1234567...",
-2,
)
assert trim_source_line("0123456789abcdef", max_len=7, col=6, min_width=2) == (
"...2345678...",
-1,
)
assert trim_source_line("0123456789abcdef", max_len=7, col=8, min_width=2) == (
"...456789a...",
1,
)
# Locations near the end.
assert trim_source_line("0123456789abcdef", max_len=7, col=11, min_width=2) == (
"...789abcd...",
4,
)
assert trim_source_line("0123456789abcdef", max_len=7, col=13, min_width=2) == (
"...9abcdef",
6,
)
assert trim_source_line("0123456789abcdef", max_len=7, col=15, min_width=2) == (
"...9abcdef",
6,
)
def test_split_words(self) -> None:
assert split_words("Simple message") == ["Simple", "message"]
assert split_words('Message with "Some[Long, Types]" in it') == [
"Message",
"with",
'"Some[Long, Types]"',
"in",
"it",
]
assert split_words('Message with "Some[Long, Types]" and [error-code]') == [
"Message",
"with",
'"Some[Long, Types]"',
"and",
"[error-code]",
]
assert split_words('"Type[Stands, First]" then words') == [
'"Type[Stands, First]"',
"then",
"words",
]
assert split_words('First words "Then[Stands, Type]"') == [
"First",
"words",
'"Then[Stands, Type]"',
]
assert split_words('"Type[Only, Here]"') == ['"Type[Only, Here]"']
assert split_words("OneWord") == ["OneWord"]
assert split_words(" ") == ["", ""]
if __name__ == "__main__":
main()
| FancyErrorFormattingTestCases |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/event_logger.py | {
"start": 1406,
"end": 1582
} | class ____(TypedDict):
environment: str
clicks: list[ReplayActionsEventPayloadClick]
replay_id: str
type: Literal["replay_actions"]
| ReplayActionsEventClickPayload |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 19076,
"end": 28886
} | class ____(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
This distribution uses routines from the Boost Math C++ library for
the computation of the ``pdf``, ``cdf``, ``ppf``, ``sf`` and ``isf``
methods. [1]_
Maximum likelihood estimates of parameters are only available when the location and
scale are fixed. When either of these parameters is free, ``beta.fit`` resorts to
numerical optimization, but this problem is unbounded: the location and scale may be
chosen to make the minimum and maximum elements of the data coincide with the
endpoints of the support, and the shape parameters may be chosen to make the PDF at
these points infinite. For best results, pass ``floc`` and ``fscale`` keyword
arguments to fix the location and scale, or use `scipy.stats.fit` with
``method='mse'``.
%(after_notes)s
References
----------
.. [1] The Boost Developers. "Boost C++ Libraries". https://www.boost.org/.
%(example)s
"""
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
return [ia, ib]
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
with np.errstate(over='ignore'):
return scu._beta_pdf(x, a, b)
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.betainc(a, b, x)
def _sf(self, x, a, b):
return sc.betaincc(a, b, x)
def _isf(self, x, a, b):
return sc.betainccinv(a, b, x)
def _ppf(self, q, a, b):
return scu._beta_ppf(q, a, b)
def _stats(self, a, b):
a_plus_b = a + b
_beta_mean = a/a_plus_b
_beta_variance = a*b / (a_plus_b**2 * (a_plus_b + 1))
_beta_skewness = ((2 * (b - a) * np.sqrt(a_plus_b + 1)) /
((a_plus_b + 2) * np.sqrt(a * b)))
_beta_kurtosis_excess_n = 6 * ((a - b)**2 * (a_plus_b + 1) -
a * b * (a_plus_b + 2))
_beta_kurtosis_excess_d = a * b * (a_plus_b + 2) * (a_plus_b + 3)
_beta_kurtosis_excess = _beta_kurtosis_excess_n / _beta_kurtosis_excess_d
return (
_beta_mean,
_beta_variance,
_beta_skewness,
_beta_kurtosis_excess)
def _fitstart(self, data):
if isinstance(data, CensoredData):
data = data._uncensor()
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super()._fitstart(data, args=(a, b))
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where `method="MLE"` and
both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super().fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
def _entropy(self, a, b):
def regular(a, b):
return (sc.betaln(a, b) - (a - 1) * sc.psi(a) -
(b - 1) * sc.psi(b) + (a + b - 2) * sc.psi(a + b))
def asymptotic_ab_large(a, b):
sum_ab = a + b
log_term = 0.5 * (
np.log(2*np.pi) + np.log(a) + np.log(b) - 3*np.log(sum_ab) + 1
)
t1 = 110/sum_ab + 20*sum_ab**-2.0 + sum_ab**-3.0 - 2*sum_ab**-4.0
t2 = -50/a - 10*a**-2.0 - a**-3.0 + a**-4.0
t3 = -50/b - 10*b**-2.0 - b**-3.0 + b**-4.0
return log_term + (t1 + t2 + t3) / 120
def asymptotic_b_large(a, b):
sum_ab = a + b
t1 = sc.gammaln(a) - (a - 1) * sc.psi(a)
t2 = (
- 1/(2*b) + 1/(12*b) - b**-2.0/12 - b**-3.0/120 + b**-4.0/120
+ b**-5.0/252 - b**-6.0/252 + 1/sum_ab - 1/(12*sum_ab)
+ sum_ab**-2.0/6 + sum_ab**-3.0/120 - sum_ab**-4.0/60
- sum_ab**-5.0/252 + sum_ab**-6.0/126
)
log_term = sum_ab*np.log1p(a/b) + np.log(b) - 2*np.log(sum_ab)
return t1 + t2 + log_term
def asymptotic_a_large(a, b):
return asymptotic_b_large(b, a)
def threshold_large(v):
j = np.floor(np.log10(v))
d = np.floor(v / 10 ** j) + 2
return xpx.apply_where(v != 1.0, (d, j), lambda d_, j_: d_ * 10**(7 + j_),
fill_value=1000)
threshold_a = threshold_large(a)
threshold_b = threshold_large(b)
return _lazyselect([(a >= 4.96e6) & (b >= 4.96e6),
(a <= 4.9e6) & (b - a >= 1e6) & (b >= threshold_a),
(b <= 4.9e6) & (a - b >= 1e6) & (a >= threshold_b),
(a < 4.9e6) & (b < 4.9e6)
],
[asymptotic_ab_large, asymptotic_b_large,
asymptotic_a_large, regular],
[a, b]
)
beta = beta_gen(a=0.0, b=1.0, name='beta')
| beta_gen |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/query_metrics/query_column.py | {
"start": 641,
"end": 2995
} | class ____(QueryMetricProvider):
metric_name = "query.column"
value_keys = (
"column",
"query",
)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> list[dict]:
batch_selectable, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
column: Optional[str] = metric_value_kwargs.get("column")
if column:
query_parameters = QueryParameters(column=column)
else:
raise ValueError("`column` must be provided.") # noqa: TRY003 # FIXME CoP
substituted_batch_subquery = (
cls._get_substituted_batch_subquery_from_query_and_batch_selectable(
query=query,
batch_selectable=batch_selectable,
execution_engine=execution_engine,
query_parameters=query_parameters,
)
)
return cls._get_sqlalchemy_records_from_substituted_batch_subquery(
substituted_batch_subquery=substituted_batch_subquery,
execution_engine=execution_engine,
)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> list[dict]:
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
df: pyspark.DataFrame
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
df.createOrReplaceTempView("tmp_view")
column: Optional[str] = metric_value_kwargs.get("column")
query = query.format(col=column, batch="tmp_view")
engine: pyspark.SparkSession = execution_engine.spark
result: List[pyspark.Row] = engine.sql(query).limit(MAX_RESULT_RECORDS).collect()
return [element.asDict() for element in result]
| QueryColumn |
python | huggingface__transformers | src/transformers/utils/dummy_detectron2_objects.py | {
"start": 116,
"end": 340
} | class ____:
def __init__(self, *args, **kwargs):
requires_backends(self, ["detectron2"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["detectron2"])
| LayoutLMv2Model |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_http.py | {
"start": 5011,
"end": 5096
} | class ____(GoEthereumAsyncDebugModuleTest):
pass
| TestGoEthereumAsyncDebugModuleTest |
python | xlwings__xlwings | xlwings/pro/_xlcalamine.py | {
"start": 2199,
"end": 2578
} | class ____(base_classes.Apps):
def __init__(self):
self._apps = [App(self)]
def __iter__(self):
return iter(self._apps)
def __len__(self):
return len(self._apps)
def __getitem__(self, index):
return self._apps[index]
def add(self, **kwargs):
self._apps.insert(0, App(self, **kwargs))
return self._apps[0]
| Apps |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 31123,
"end": 31659
} | class ____(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ("expr",)
expr: Expr
def as_const(self, eval_ctx: EvalContext | None = None) -> Markup | t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
| MarkSafeIfAutoescape |
python | gevent__gevent | src/gevent/tests/test__util.py | {
"start": 397,
"end": 580
} | class ____(local.local):
# pylint:disable=disallowed-name
def __init__(self, foo):
self.foo = foo
@greentest.skipOnPyPy("5.10.x is *very* slow formatting stacks")
| MyLocal |
python | coleifer__peewee | tests/schema.py | {
"start": 31759,
"end": 31910
} | class ____(TestModel):
key = CharField()
val = IntegerField()
class Meta:
primary_key = False
table_name = 'tmkv_new'
| TMKVNew |
python | streamlit__streamlit | lib/streamlit/components/v2/component_registry.py | {
"start": 10657,
"end": 17059
} | class ____:
"""Registry for bidirectional components V2.
The registry stores and updates :class:`BidiComponentDefinition` instances in
a thread-safe mapping guarded by a lock.
"""
def __init__(self) -> None:
"""Initialize the component registry with an empty, thread-safe store."""
self._components: MutableMapping[str, BidiComponentDefinition] = {}
self._lock = threading.Lock()
def register_components_from_definitions(
self, component_definitions: dict[str, dict[str, Any]]
) -> None:
"""Register components from processed definition data.
Parameters
----------
component_definitions : dict[str, dict[str, Any]]
Mapping from component identifier to definition data.
"""
with self._lock:
# Register all component definitions
for comp_name, comp_def_data in component_definitions.items():
# Validate required keys and gracefully handle optional ones.
name = comp_def_data.get("name")
if not name:
raise ValueError(
f"Component definition for key '{comp_name}' is missing required 'name' field"
)
definition = BidiComponentDefinition(
name=name,
js=comp_def_data.get("js"),
css=comp_def_data.get("css"),
html=comp_def_data.get("html"),
css_asset_relative_path=comp_def_data.get(
"css_asset_relative_path"
),
js_asset_relative_path=comp_def_data.get("js_asset_relative_path"),
)
self._components[comp_name] = definition
_LOGGER.debug(
"Registered component %s from processed definitions", comp_name
)
def register(self, definition: BidiComponentDefinition) -> None:
"""Register or overwrite a component definition by name.
This method is the primary entry point for adding a component to the
registry. It is used when a component is first declared via the public
API (e.g., ``st.components.v2.component``).
If a component with the same name already exists (e.g., a placeholder
from a manifest scan), it is overwritten. A warning is logged if the
new definition differs from the old one to alert developers of
potential conflicts.
Parameters
----------
definition : BidiComponentDefinition
The component definition to store.
"""
# Register the definition
with self._lock:
name = definition.name
if name in self._components:
existing_definition = self._components[name]
# Check if the existing definition is different and NOT a placeholder.
# We expect placeholders (from manifest scanning) to be overwritten
# by the actual definition from the script execution, so we silence
# the warning in that specific case.
if (
existing_definition != definition
and not existing_definition.is_placeholder
):
_LOGGER.warning(
"Component %s is already registered. Overwriting "
"previous definition. This may lead to unexpected behavior "
"if different modules register the same component name with "
"different definitions.",
name,
)
self._components[name] = definition
_LOGGER.debug("Registered component %s", name)
def get(self, name: str) -> BidiComponentDefinition | None:
"""Return a component definition by name, or ``None`` if not found.
Parameters
----------
name : str
Component name to retrieve.
Returns
-------
BidiComponentDefinition or None
The component definition if present, otherwise ``None``.
"""
with self._lock:
return self._components.get(name)
def unregister(self, name: str) -> None:
"""Remove a component definition from the registry.
Primarily useful for tests and dynamic scenarios.
Parameters
----------
name : str
Component name to unregister.
"""
with self._lock:
if name in self._components:
del self._components[name]
_LOGGER.debug("Unregistered component %s", name)
def clear(self) -> None:
"""Clear all component definitions from the registry."""
with self._lock:
self._components.clear()
_LOGGER.debug("Cleared all components from registry")
def update_component(self, definition: BidiComponentDefinition) -> None:
"""Update (replace) a stored component definition by name.
This method provides a stricter way to update a component definition
and is used for internal processes like file-watcher updates. Unlike
``register``, it will raise an error if the component is not already
present in the registry.
This ensures that background processes can only modify components that
have been explicitly defined in the current session, preventing race
conditions or unexpected behavior where a file-watcher event might try
to update a component that has since been unregistered.
Callers must supply a fully validated :class:`BidiComponentDefinition`.
The registry replaces the stored definition under ``definition.name`` in
a thread-safe manner.
Parameters
----------
definition : BidiComponentDefinition
The fully-resolved component definition to store.
"""
with self._lock:
name = definition.name
if name not in self._components:
raise StreamlitComponentRegistryError(
f"Cannot update unregistered component: {name}"
)
self._components[name] = definition
_LOGGER.debug("Updated component definition for %s", name)
| BidiComponentRegistry |
python | pytransitions__transitions | transitions/extensions/asyncio.py | {
"start": 36223,
"end": 36544
} | class ____(dict):
def __init__(self, item):
super().__init__()
self._value = item
def __setitem__(self, key, item):
self._value = item
def __getitem__(self, key):
return self._value
def __repr__(self):
return repr("{{'*': {0}}}".format(self._value))
| _DictionaryMock |
python | django__django | tests/auth_tests/test_models.py | {
"start": 24105,
"end": 24351
} | class ____(TestCase):
def test_str(self):
p = Permission.objects.get(codename="view_customemailfield")
self.assertEqual(
str(p), "Auth_Tests | custom email field | Can view custom email field"
)
| PermissionTests |
python | kamyu104__LeetCode-Solutions | Python/number-of-people-that-can-be-seen-in-a-grid.py | {
"start": 1025,
"end": 2065
} | class ____(object):
def seePeople(self, heights):
"""
:type heights: List[List[int]]
:rtype: List[List[int]]
"""
def count(heights, i, stk):
cnt = 0
while stk and heights(stk[-1]) < heights(i):
stk.pop()
cnt += 1
if stk:
cnt += 1
if stk and heights(stk[-1]) == heights(i):
stk.pop()
stk.append(i)
return cnt
result = [[0]*len(heights[0]) for _ in xrange(len(heights))]
for i in xrange(len(heights)):
stk = []
for j in reversed(xrange(len(heights[0]))):
result[i][j] += count(lambda x: heights[i][x], j, stk)
for j in xrange(len(heights[0])):
stk = []
for i in reversed(xrange(len(heights))):
result[i][j] += count(lambda x: heights[x][j], i, stk)
return result
# Time: O(m * n)
# Space: O(m + n)
# mono stack
| Solution2 |
python | django__django | tests/admin_views/models.py | {
"start": 10919,
"end": 11114
} | class ____(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector, models.CASCADE)
name = models.CharField(max_length=100)
| DooHickey |
python | PyCQA__pylint | pylint/config/exceptions.py | {
"start": 438,
"end": 713
} | class ____(Exception):
"""Raised if an ArgumentManager instance tries to parse an option that is
unknown.
"""
def __init__(self, options: list[str], *args: object) -> None:
self.options = options
super().__init__(*args)
| _UnrecognizedOptionError |
python | gevent__gevent | src/greentest/3.10/test_asyncore.py | {
"start": 14973,
"end": 25347
} | class ____:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with socket.socket(self.family) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
@threading_helper.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
with socket.socket(self.family, socket.SOCK_STREAM) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
threading_helper.join_thread(t)
| BaseTestAPI |
python | getsentry__sentry | src/sentry/issue_detection/detectors/mn_plus_one_db_span_detector.py | {
"start": 9861,
"end": 11375
} | class ____(PerformanceDetector):
"""
Detects N+1 DB query issues where the repeated query is interspersed with
other spans (which may or may not be other queries) that all repeat together
(hence, MN+1).
Currently does not consider parent or source spans, and only looks for a
repeating pattern of spans (A B C A B C etc).
Uses a small state machine internally.
"""
__slots__ = ("state",)
type = DetectorType.M_N_PLUS_ONE_DB
settings_key = DetectorType.M_N_PLUS_ONE_DB
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.state: MNPlusOneState = SearchingForMNPlusOne(self.settings, self.event())
def is_creation_allowed_for_organization(self, organization: Organization | None) -> bool:
return not features.has(
"organizations:experimental-mn-plus-one-detector-rollout", organization
)
def is_creation_allowed_for_project(self, project: Project) -> bool:
return self.settings["detection_enabled"]
def visit_span(self, span: Span) -> None:
self.state, performance_problem = self.state.next(span)
if performance_problem:
self.stored_problems[performance_problem.fingerprint] = performance_problem
def on_complete(self) -> None:
if performance_problem := self.state.finish():
self.stored_problems[performance_problem.fingerprint] = performance_problem
| MNPlusOneDBSpanDetector |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 12301,
"end": 12474
} | class ____(PrefectBaseModel):
any_: Optional[List[str]] = Field(
default=None, description="A list of task run state names to include"
)
| TaskRunFilterStateName |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/coverage/__init__.py | {
"start": 12988,
"end": 14440
} | class ____:
"""Checks code coverage paths to verify they are valid and reports on the findings."""
def __init__(self, args: CoverageConfig, collection_search_re: t.Optional[t.Pattern] = None) -> None:
self.args = args
self.collection_search_re = collection_search_re
self.invalid_paths: list[str] = []
self.invalid_path_chars = 0
def check_path(self, path: str) -> bool:
"""Return True if the given coverage path is valid, otherwise display a warning and return False."""
if os.path.isfile(to_bytes(path)):
return True
if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
# the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
# coverage is still reported for these non-existent files, but warnings are not needed
return False
self.invalid_paths.append(path)
self.invalid_path_chars += len(path)
if self.args.verbosity > 1:
display.warning('Invalid coverage path: %s' % path)
return False
def report(self) -> None:
"""Display a warning regarding invalid paths if any were found."""
if self.invalid_paths:
display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
| PathChecker |
python | kamyu104__LeetCode-Solutions | Python/xor-after-range-multiplication-queries-ii.py | {
"start": 144,
"end": 1131
} | class ____(object):
def xorAfterQueries(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
def inv(x):
return pow(x, MOD-2, MOD)
block_size = int(len(nums)**0.5)+1
diffs = collections.defaultdict(lambda: [1]*len(nums))
for l, r, k, v in queries:
if k <= block_size:
diffs[k][l] = (diffs[k][l]*v)%MOD
r += k-(r-l)%k
if r < len(nums):
diffs[k][r] = (diffs[k][r]*inv(v))%MOD
else:
for i in xrange(l, r+1, k):
nums[i] = (nums[i]*v)%MOD
for k, diff in diffs.iteritems():
for i in xrange(len(diff)):
if i-k >= 0:
diff[i] = (diff[i]*diff[i-k])%MOD
nums[i] = (nums[i]*diff[i])%MOD
return reduce(lambda accu, x: accu^x, nums, 0)
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 7068,
"end": 8444
} | class ____(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
| TestAlongAxis |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 17226,
"end": 17933
} | class ____(typing.NamedTuple):
a: str
@given(st.builds(AnnotatedNamedTuple))
def test_infers_args_for_namedtuple_builds(thing):
assert isinstance(thing.a, str)
@given(st.from_type(AnnotatedNamedTuple))
def test_infers_args_for_namedtuple_from_type(thing):
assert isinstance(thing.a, str)
@given(st.builds(AnnotatedNamedTuple, a=st.none()))
def test_override_args_for_namedtuple(thing):
assert thing.a is None
@pytest.mark.parametrize("thing", [typing.Optional, list, type, _List, _Type])
def test_cannot_resolve_bare_forward_reference(thing):
t = thing["ConcreteFoo"]
with pytest.raises(InvalidArgument):
check_can_generate_examples(st.from_type(t))
| AnnotatedNamedTuple |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 50438,
"end": 53606
} | class ____(MraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.mra = MraModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mra(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
| MraForQuestionAnswering |
python | pandas-dev__pandas | pandas/tests/series/methods/test_astype.py | {
"start": 17550,
"end": 18963
} | class ____:
@pytest.mark.parametrize(
"data, dtype",
[
([True, NA], "boolean"),
(["A", NA], "category"),
(["2020-10-10", "2020-10-10"], "datetime64[ns]"),
(["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),
(
["2012-01-01 00:00:00-05:00", NaT],
"datetime64[ns, US/Eastern]",
),
([1, None], "UInt16"),
(["1/1/2021", "2/1/2021"], "period[M]"),
(["1/1/2021", "2/1/2021", NaT], "period[M]"),
(["1 Day", "59 Days", NaT], "timedelta64[ns]"),
# currently no way to parse IntervalArray from a list of strings
],
)
def test_astype_string_to_extension_dtype_roundtrip(
self, data, dtype, request, nullable_string_dtype
):
if dtype == "boolean":
mark = pytest.mark.xfail(
reason="TODO StringArray.astype() with missing values #GH40566"
)
request.applymarker(mark)
# GH-40351
ser = Series(data, dtype=dtype)
# Note: just passing .astype(dtype) fails for dtype="category"
# with bc ser.dtype.categories will be object dtype whereas
# result.dtype.categories will have string dtype
result = ser.astype(nullable_string_dtype).astype(ser.dtype)
tm.assert_series_equal(result, ser)
| TestAstypeString |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/experiment_service.py | {
"start": 15654,
"end": 18311
} | class ____(GoogleCloudBaseOperator):
"""
Use the Vertex AI SDK to delete experiment run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_run_name: Required. The specific run name or ID for this experiment.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"location",
"project_id",
"impersonation_chain",
"experiment_name",
"experiment_run_name",
)
def __init__(
self,
*,
project_id: str,
location: str,
experiment_name: str,
experiment_run_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.experiment_name = experiment_name
self.experiment_run_name = experiment_run_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
self.hook = ExperimentRunHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.hook.delete_experiment_run(
project_id=self.project_id,
location=self.location,
experiment_name=self.experiment_name,
experiment_run_name=self.experiment_run_name,
)
except exceptions.NotFound:
raise AirflowException(f"Experiment Run with name {self.experiment_run_name} not found")
self.log.info("Deleted experiment run: %s", self.experiment_run_name)
| DeleteExperimentRunOperator |
python | kamyu104__LeetCode-Solutions | Python/water-bottles-ii.py | {
"start": 48,
"end": 443
} | class ____(object):
def maxBottlesDrunk(self, numBottles, numExchange):
"""
:type numBottles: int
:type numExchange: int
:rtype: int
"""
result = numBottles
while numBottles >= numExchange:
numBottles -= numExchange
numExchange += 1
result += 1
numBottles += 1
return result
| Solution |
python | huggingface__transformers | tests/models/smollm3/test_modeling_smollm3.py | {
"start": 1634,
"end": 2048
} | class ____(CausalLMModelTester):
config_class = SmolLM3Config
if is_torch_available():
base_model_class = SmolLM3Model
causal_lm_class = SmolLM3ForCausalLM
question_answering_class = SmolLM3ForQuestionAnswering
sequence_classification_class = SmolLM3ForSequenceClassification
token_classification_class = SmolLM3ForTokenClassification
@require_torch
| SmolLM3ModelTester |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 93090,
"end": 93288
} | class ____(
ScalarRemoveTest, fixtures.DeclarativeMappedTest
):
run_create_tables = None
useobject = True
cascade_scalar_deletes = True
uselist = True
| ScalarRemoveListObjectCascade |
python | pypa__warehouse | warehouse/legacy/api/xmlrpc/cache/services.py | {
"start": 636,
"end": 1877
} | class ____:
def __init__(
self,
redis_url,
purger,
redis_db=0,
name="lru",
expires=None,
metric_reporter=None,
):
self.redis_conn = redis.StrictRedis.from_url(redis_url, db=redis_db)
self.redis_lru = cache.RedisLru(
self.redis_conn, name=name, expires=expires, metric_reporter=metric_reporter
)
self._purger = purger
@classmethod
def create_service(cls, context, request):
return cls(
request.registry.settings.get("warehouse.xmlrpc.cache.url"),
request.task(purge_tag).delay,
name=request.registry.settings.get("warehouse.xmlrpc.cache.name", "xmlrpc"),
expires=int(
request.registry.settings.get(
"warehouse.xmlrpc.cache.expires", 25 * 60 * 60
)
),
)
def fetch(self, func, args, kwargs, key, tag, expires):
return self.redis_lru.fetch(func, args, kwargs, key, tag, expires)
def purge(self, tag):
return self.redis_lru.purge(tag)
def purge_tags(self, tags):
for tag in tags:
self._purger(tag)
@implementer(interfaces.IXMLRPCCache)
| RedisXMLRPCCache |
python | numpy__numpy | numpy/_core/_exceptions.py | {
"start": 786,
"end": 945
} | class ____(TypeError):
""" Base class for all ufunc exceptions """
def __init__(self, ufunc):
self.ufunc = ufunc
@_display_as_base
| UFuncTypeError |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 62570,
"end": 64579
} | class ____:
"""
Parser state.
States are pushed and popped from a stack as necessary, and the "current"
state is always at the top of the stack.
Upon entering and leaving a group { } or math/non-math, the stack is pushed
and popped accordingly.
"""
def __init__(self, fontset: Fonts, font: str, font_class: str, fontsize: float,
dpi: float):
self.fontset = fontset
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self) -> ParserState:
return copy.copy(self)
@property
def font(self) -> str:
return self._font
@font.setter
def font(self, name: str) -> None:
if name in ('rm', 'it', 'bf', 'bfit'):
self.font_class = name
self._font = name
def get_current_underline_thickness(self) -> float:
"""Return the underline thickness for this state."""
return self.fontset.get_underline_thickness(
self.font, self.fontsize, self.dpi)
def cmd(expr: str, args: ParserElement) -> ParserElement:
r"""
Helper to define TeX commands.
``cmd("\cmd", args)`` is equivalent to
``"\cmd" - (args | Error("Expected \cmd{arg}{...}"))`` where the names in
the error message are taken from element names in *args*. If *expr*
already includes arguments (e.g. "\cmd{arg}{...}"), then they are stripped
when constructing the parse element, but kept (and *expr* is used as is) in
the error message.
"""
def names(elt: ParserElement) -> T.Generator[str, None, None]:
if isinstance(elt, ParseExpression):
for expr in elt.exprs:
yield from names(expr)
elif elt.resultsName:
yield elt.resultsName
csname = expr.split("{", 1)[0]
err = (csname + "".join("{%s}" % name for name in names(args))
if expr == csname else expr)
return csname - (args | Error(f"Expected {err}"))
| ParserState |
python | apache__airflow | airflow-core/src/airflow/utils/state.py | {
"start": 847,
"end": 1256
} | class ____(str, Enum):
"""States that a Task Instance can be in that indicate it has reached a terminal state."""
SUCCESS = "success"
FAILED = "failed"
SKIPPED = "skipped" # A user can raise a AirflowSkipException from a task & it will be marked as skipped
UPSTREAM_FAILED = "upstream_failed"
REMOVED = "removed"
def __str__(self) -> str:
return self.value
| TerminalTIState |
python | kamyu104__LeetCode-Solutions | Python/optimal-account-balancing.py | {
"start": 86,
"end": 997
} | class ____(object):
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
accounts = collections.defaultdict(int)
for src, dst, amount in transactions:
accounts[src] += amount
accounts[dst] -= amount
debts = [account for account in accounts.itervalues() if account]
dp = [0]*(2**len(debts))
sums = [0]*(2**len(debts))
for i in xrange(len(dp)):
bit = 1
for j in xrange(len(debts)):
if (i & bit) == 0:
nxt = i | bit
sums[nxt] = sums[i]+debts[j]
if sums[nxt] == 0:
dp[nxt] = max(dp[nxt], dp[i]+1)
else:
dp[nxt] = max(dp[nxt], dp[i])
bit <<= 1
return len(debts)-dp[-1]
| Solution |
python | django__django | tests/staticfiles_tests/test_templatetags.py | {
"start": 134,
"end": 1076
} | class ____(StaticFilesTestCase):
def test_template_tag(self):
self.assertStaticRenders("does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("testfile.txt", "/static/testfile.txt")
self.assertStaticRenders(
"special?chars"ed.html", "/static/special%3Fchars%26quoted.html"
)
@override_settings(
STORAGES={
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "staticfiles_tests.storage.QueryStringStorage"
},
}
)
def test_template_tag_escapes(self):
"""
Storage.url() should return an encoded path and might be overridden
to also include a querystring. {% static %} escapes the URL to avoid
raw '&', for example.
"""
self.assertStaticRenders("a.html", "a.html?a=b&c=d")
self.assertStaticRenders("a.html", "a.html?a=b&c=d", autoescape=False)
| TestTemplateTag |
python | apache__airflow | task-sdk/tests/task_sdk/api/test_client.py | {
"start": 40723,
"end": 42827
} | class ____:
@pytest.mark.parametrize(
"request_params",
[
({"name": "this_asset"}),
({"uri": "s3://bucket/key"}),
],
)
def test_by_name_get_success(self, request_params):
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path in ("/assets/by-name", "/assets/by-uri"):
return httpx.Response(
status_code=200,
json={
"name": "this_asset",
"uri": "s3://bucket/key",
"group": "asset",
"extra": {"foo": "bar"},
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.assets.get(**request_params)
assert isinstance(result, AssetResponse)
assert result.name == "this_asset"
assert result.uri == "s3://bucket/key"
@pytest.mark.parametrize(
"request_params",
[
({"name": "this_asset"}),
({"uri": "s3://bucket/key"}),
],
)
def test_by_name_get_404_not_found(self, request_params):
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path in ("/assets/by-name", "/assets/by-uri"):
return httpx.Response(
status_code=404,
json={
"detail": {
"message": "Asset with name non_existent not found",
"reason": "not_found",
}
},
)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.assets.get(**request_params)
assert isinstance(result, ErrorResponse)
assert result.error == ErrorType.ASSET_NOT_FOUND
| TestAssetOperations |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 5948,
"end": 8110
} | class ____(PrependedAppendedText):
"""
Layout object for rendering a field with prepended text.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
field : str
The name of the field to be rendered.
text : str
The prepended text, can be HTML like.
input_size : str, optional
For Bootstrap4+ additional classes to customise the input-group size
e.g. ``input-group-sm``. By default None
active : bool
For Bootstrap3, a boolean to render the text active. By default
``False``.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
PrependedText('amount', '$')
"""
def __init__(
self,
field,
text,
*,
input_size=None,
active=False,
css_class=None,
wrapper_class=None,
template=None,
**kwargs,
):
self.text = text
super().__init__(
field,
prepended_text=text,
input_size=input_size,
active=active,
css_class=css_class,
wrapper_class=wrapper_class,
template=template,
**kwargs,
)
| PrependedText |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 41628,
"end": 43400
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
self.depthwise_conv = nn.Conv2d(
config.memory_fuser_embed_dim,
config.memory_fuser_embed_dim,
kernel_size=config.memory_fuser_kernel_size,
padding=config.memory_fuser_padding,
groups=config.memory_fuser_embed_dim,
) # depthwise conv
self.layer_norm = Sam2VideoLayerNorm(config.memory_fuser_embed_dim, eps=1e-6, data_format="channels_first")
self.activation = ACT2FN[config.memory_fuser_hidden_act]
self.pointwise_conv1 = nn.Linear(
config.memory_fuser_embed_dim, config.memory_fuser_intermediate_dim
) # pointwise/1x1 convs, implemented with linear layers
self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim)
self.scale = nn.Parameter(
config.memory_fuser_layer_scale_init_value * torch.ones(config.memory_fuser_embed_dim),
requires_grad=True,
)
def forward(self, hidden_states):
input = hidden_states
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.scale * hidden_states
hidden_states = hidden_states.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
hidden_states = input + hidden_states
return hidden_states
| Sam2VideoMemoryFuserCXBlock |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py | {
"start": 15407,
"end": 21606
} | class ____(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Tabular Training job."""
template_fields = (
"parent_model",
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
target_column: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
validation_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
weight_column: str | None = None,
budget_milli_node_hours: int = 1000,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
region: str,
impersonation_chain: str | Sequence[str] | None = None,
parent_model: str | None = None,
**kwargs,
) -> None:
super().__init__(
region=region, impersonation_chain=impersonation_chain, parent_model=parent_model, **kwargs
)
self.dataset_id = dataset_id
self.target_column = target_column
self.optimization_prediction_type = optimization_prediction_type
self.optimization_objective = optimization_objective
self.column_specs = column_specs
self.column_transformations = column_transformations
self.optimization_objective_recall_value = optimization_objective_recall_value
self.optimization_objective_precision_value = optimization_objective_precision_value
self.validation_fraction_split = validation_fraction_split
self.predefined_split_column_name = predefined_split_column_name
self.timestamp_split_column_name = timestamp_split_column_name
self.weight_column = weight_column
self.budget_milli_node_hours = budget_milli_node_hours
self.disable_early_stopping = disable_early_stopping
self.export_evaluated_data_items = export_evaluated_data_items
self.export_evaluated_data_items_bigquery_destination_uri = (
export_evaluated_data_items_bigquery_destination_uri
)
self.export_evaluated_data_items_override_destination = (
export_evaluated_data_items_override_destination
)
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
credentials, _ = self.hook.get_credentials_and_project_id()
self.parent_model = self.parent_model.split("@")[0] if self.parent_model else None
model, training_id = self.hook.create_auto_ml_tabular_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.TabularDataset(
dataset_name=self.dataset_id,
project=self.project_id,
credentials=credentials,
),
parent_model=self.parent_model,
is_default_version=self.is_default_version,
model_version_aliases=self.model_version_aliases,
model_version_description=self.model_version_description,
target_column=self.target_column,
optimization_prediction_type=self.optimization_prediction_type,
optimization_objective=self.optimization_objective,
column_specs=self.column_specs,
column_transformations=self.column_transformations,
optimization_objective_recall_value=self.optimization_objective_recall_value,
optimization_objective_precision_value=self.optimization_objective_precision_value,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
weight_column=self.weight_column,
budget_milli_node_hours=self.budget_milli_node_hours,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
disable_early_stopping=self.disable_early_stopping,
export_evaluated_data_items=self.export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
self.export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=(
self.export_evaluated_data_items_override_destination
),
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
context["ti"].xcom_push(key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, model_id=model_id)
else:
result = model # type: ignore
context["ti"].xcom_push(key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, training_id=training_id)
return result
@deprecated(
planned_removal_date="March 24, 2026",
use_instead="airflow.providers.google.cloud.operators.vertex_ai.generative_model.SupervisedFineTuningTrainOperator",
category=AirflowProviderDeprecationWarning,
)
| CreateAutoMLTabularTrainingJobOperator |
python | astropy__astropy | astropy/io/fits/tests/test_checksum.py | {
"start": 1021,
"end": 22120
} | class ____(BaseChecksumTests):
# All checksums have been verified against CFITSIO
def test_sample_file(self):
hdul = fits.open(self.data("checksum.fits"), checksum=True)
assert hdul._read_all
hdul.close()
def test_image_create(self):
n = np.arange(100, dtype=np.int64)
hdu = fits.PrimaryHDU(n)
hdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
assert (hdu.data == hdul[0].data).all()
assert "CHECKSUM" in hdul[0].header
assert "DATASUM" in hdul[0].header
assert hdul[0].header["CHECKSUM"] == "ZHMkeGKjZGKjbGKj"
assert hdul[0].header["DATASUM"] == "4950"
def test_scaled_data(self):
with fits.open(self.data("scale.fits")) as hdul:
orig_data = hdul[0].data.copy()
hdul[0].scale("int16", "old")
hdul.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul1:
assert (hdul1[0].data == orig_data).all()
assert "CHECKSUM" in hdul1[0].header
assert hdul1[0].header["CHECKSUM"] == "cUmaeUjZcUjacUjW"
assert "DATASUM" in hdul1[0].header
assert hdul1[0].header["DATASUM"] == "1891563534"
def test_scaled_data_auto_rescale(self):
"""
Regression test for
https://github.com/astropy/astropy/issues/3883#issuecomment-115122647
Ensure that when scaled data is automatically rescaled on
opening/writing a file that the checksum and datasum are computed for
the rescaled array.
"""
with fits.open(self.data("scale.fits")) as hdul:
# Write out a copy of the data with the rescaling applied
hdul.writeto(self.temp("rescaled.fits"))
# Reopen the new file and save it back again with a checksum
with fits.open(self.temp("rescaled.fits")) as hdul:
hdul.writeto(self.temp("rescaled2.fits"), overwrite=True, checksum=True)
# Now do like in the first writeto but use checksum immediately
with fits.open(self.data("scale.fits")) as hdul:
hdul.writeto(self.temp("rescaled3.fits"), checksum=True)
# Also don't rescale the data but add a checksum
with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul:
hdul.writeto(self.temp("scaled.fits"), checksum=True)
# Must used nested with statements to support older Python versions
# (but contextlib.nested is not available in newer Pythons :(
with fits.open(self.temp("rescaled2.fits")) as hdul1:
with fits.open(self.temp("rescaled3.fits")) as hdul2:
with fits.open(self.temp("scaled.fits")) as hdul3:
hdr1 = hdul1[0].header
hdr2 = hdul2[0].header
hdr3 = hdul3[0].header
assert hdr1["DATASUM"] == hdr2["DATASUM"]
assert hdr1["CHECKSUM"] == hdr2["CHECKSUM"]
assert hdr1["DATASUM"] != hdr3["DATASUM"]
assert hdr1["CHECKSUM"] != hdr3["CHECKSUM"]
def test_uint16_data(self):
checksums = [
("aDcXaCcXaCcXaCcX", "0"),
("oYiGqXi9oXiEoXi9", "1746888714"),
("VhqQWZoQVfoQVZoQ", "0"),
("4cPp5aOn4aOn4aOn", "0"),
("8aCN8X9N8aAN8W9N", "1756785133"),
("UhqdUZnbUfnbUZnb", "0"),
("4cQJ5aN94aNG4aN9", "0"),
]
with fits.open(self.data("o4sp040b0_raw.fits"), uint=True) as hdul:
hdul.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), uint=True, checksum=True) as hdul1:
for idx, (hdu_a, hdu_b) in enumerate(zip(hdul, hdul1)):
if hdu_a.data is None or hdu_b.data is None:
assert hdu_a.data is hdu_b.data
else:
assert (hdu_a.data == hdu_b.data).all()
assert "CHECKSUM" in hdul[idx].header
assert hdul[idx].header["CHECKSUM"] == checksums[idx][0]
assert "DATASUM" in hdul[idx].header
assert hdul[idx].header["DATASUM"] == checksums[idx][1]
def test_groups_hdu_data(self):
imdata = np.arange(100.0).reshape((10, 1, 1, 2, 5))
pdata1 = np.arange(10) + 0.1
pdata2 = 42
x = fits.hdu.groups.GroupData(
imdata, parnames=["abc", "xyz"], pardata=[pdata1, pdata2], bitpix=-32
)
hdu = fits.GroupsHDU(x)
hdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
assert comparerecords(hdul[0].data, hdu.data)
assert "CHECKSUM" in hdul[0].header
assert hdul[0].header["CHECKSUM"] == "3eDQAZDO4dDOAZDO"
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "2797758084"
def test_binary_table_data(self):
a1 = np.array(["NGC1001", "NGC1002", "NGC1003"])
a2 = np.array([11.1, 12.3, 15.2])
col1 = fits.Column(name="target", format="20A", array=a1)
col2 = fits.Column(name="V_mag", format="E", array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert "CHECKSUM" in hdul[0].header
assert hdul[0].header["CHECKSUM"] == "D8iBD6ZAD6fAD6ZA"
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "0"
assert "CHECKSUM" in hdul[1].header
assert hdul[1].header["CHECKSUM"] == "aD1Oa90MaC0Ma90M"
assert "DATASUM" in hdul[1].header
assert hdul[1].header["DATASUM"] == "1062205743"
def test_variable_length_table_data(self):
c1 = fits.Column(
name="var",
format="PJ()",
array=np.array([[45.0, 56], np.array([11, 12, 13])], "O"),
)
c2 = fits.Column(name="xyz", format="2I", array=[[11, 3], [12, 4]])
tbhdu = fits.BinTableHDU.from_columns([c1, c2])
tbhdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
assert comparerecords(tbhdu.data, hdul[1].data)
assert "CHECKSUM" in hdul[0].header
assert hdul[0].header["CHECKSUM"] == "D8iBD6ZAD6fAD6ZA"
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "0"
assert "CHECKSUM" in hdul[1].header
assert hdul[1].header["CHECKSUM"] == "YIGoaIEmZIEmaIEm"
assert "DATASUM" in hdul[1].header
assert hdul[1].header["DATASUM"] == "1507485"
def test_variable_length_table_data2(self):
"""regression test for #12119"""
time_data = [
np.array([2021, 1, 5, 10, 5, 30], dtype=np.uint16),
np.array([2021, 2, 19, 11, 19, 56], dtype=np.uint16),
np.array([2021, 4, 21, 16, 10, 24], dtype=np.uint16),
np.array([2021, 7, 22, 14, 42, 20], dtype=np.uint16),
]
time_col = fits.Column(name="time", format="6I", array=time_data)
version_data = ["5.45.70", "5.45.71", "5.45.102", "5.50.109"]
version_col = fits.Column(name="Version", format="PA(8)", array=version_data)
columns = [time_col, version_col]
testfile = self.temp("tmp.fits")
tbl = fits.BinTableHDU.from_columns(columns, name="DemoBinTable")
hdul = fits.HDUList([fits.PrimaryHDU(), tbl])
# here checksum is computed from in-memory data, which was producing
# a wrong checksum and warnings when reading back the file
hdul.writeto(testfile, checksum=True)
testfile2 = self.temp("tmp2.fits")
with fits.open(testfile, checksum=True) as hdul:
datasum = hdul[1]._datasum
assert datasum == "2998821219"
checksum = hdul[1]._checksum
assert checksum == "7aC39YA37aA37YA3"
# so write again the file but here data was not loaded so checksum
# is computed directly from the file bytes, which was producing
# a correct checksum. Below we compare both to make sure they are
# consistent.
hdul.writeto(testfile2, checksum=True)
with fits.open(testfile2, checksum=True) as hdul:
assert datasum == hdul[1]._datasum
assert checksum == hdul[1]._checksum
def test_variable_length_table_data3(self):
"""regression test for #14396"""
# This is testing specifically a scenario where the start of the heap
# is not aligned with 4-byte blocks (32 bit integers)
# By default, the heap starts immediately after the table, which is at
# NAXIS1 x NAXIS2, or byte 17 in this case. This is not aligned with
# the 4-byte blocks
testfile = self.temp("tmp.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
col2 = fits.Column(name="b", format="QD", array=[[1]])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1, col2])
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1648357376"
assert hdul[1].header["CHECKSUM"] == "2CoL4BnL2BnL2BnL"
# Here we force the heap to be aligned with the 4-byte blocks by using
# the THEAP keyword. This shows that we cannot always calculate DATASUM
# by simple concatenating the table data with the heap data.
testfile = self.temp("tmp2.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
col2 = fits.Column(name="b", format="QD", array=[[1]])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1, col2])
tab.header["THEAP"] = 20
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "2716860416"
assert hdul[1].header["CHECKSUM"] == "jIAFjI19jI8CjI89"
# Here we take the previous table and just update the THEAP value to 17.
# This should put the heap in the same position as the first case and
# thus the DATASUM should be the same. However, the CHECKSUM should be
# different, as the header is different (it now has the THEAP keyword).
testfile = self.temp("tmp3.fits")
tab.header["THEAP"] = 17
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1648357376"
assert hdul[1].header["CHECKSUM"] == "jcdDjZZBjabBjYZB"
def test_small_heap(self):
"""regression test for #18735"""
# Tests situations where the start of the heap is not aligned with
# 4-byte blocks (32 bit integers), and the size of our heap is smaller
# than 4 bytes
testfile = self.temp("tmp.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1])
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1627389952"
assert hdul[1].header["CHECKSUM"] == "nNejoMchnMchnMch"
testfile = self.temp("tmp2.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
col2 = fits.Column(name="b", format="PB", array=[[1]])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1, col2])
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1644232704"
assert hdul[1].header["CHECKSUM"] == "4IIH7H9H4HGH4H9H"
testfile = self.temp("tmp3.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
col2 = fits.Column(name="b", format="PB", array=[[1, 2]])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1, col2])
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1661010432"
assert hdul[1].header["CHECKSUM"] == "4IHK6I9H4IGH4I9H"
testfile = self.temp("tmp4.fits")
col1 = fits.Column(name="a", format="1A", array=["a"])
col2 = fits.Column(name="b", format="PB", array=[[1, 2, 3]])
tab = fits.BinTableHDU.from_columns(name="test", columns=[col1, col2])
tab.writeto(testfile, checksum=True)
with fits.open(testfile, checksum=True) as hdul:
assert hdul[1].header["DATASUM"] == "1677787651"
assert hdul[1].header["CHECKSUM"] == "1HCH3G9F1GCF1G9F"
def test_ascii_table_data(self):
a1 = np.array(["abc", "def"])
r1 = np.array([11.0, 12.0])
c1 = fits.Column(name="abc", format="A3", array=a1)
# This column used to be E format, but the single-precision float lost
# too much precision when scaling so it was changed to a D
c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6)
c3 = fits.Column(name="t1", format="I", array=[91, 92, 93])
x = fits.ColDefs([c1, c2, c3])
hdu = fits.TableHDU.from_columns(x)
hdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum=True)
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
assert "CHECKSUM" in hdul[0].header
assert hdul[0].header["CHECKSUM"] == "D8iBD6ZAD6fAD6ZA"
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "0"
assert "CHECKSUM" in hdul[1].header
assert hdul[1].header["CHECKSUM"] == "3rKFAoI94oICAoI9"
assert "DATASUM" in hdul[1].header
assert hdul[1].header["DATASUM"] == "1914653725"
def test_open_with_no_keywords(self):
hdul = fits.open(self.data("arange.fits"), checksum=True)
hdul.close()
def test_append(self):
hdul = fits.open(self.data("tb.fits"))
hdul.writeto(self.temp("tmp.fits"), overwrite=True)
n = np.arange(100)
fits.append(self.temp("tmp.fits"), n, checksum=True)
hdul.close()
hdul = fits.open(self.temp("tmp.fits"), checksum=True)
assert hdul[0]._checksum is None
hdul.close()
def test_writeto_convenience(self):
n = np.arange(100)
fits.writeto(self.temp("tmp.fits"), n, overwrite=True, checksum=True)
hdul = fits.open(self.temp("tmp.fits"), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto(self):
n = np.arange(100, dtype="int16")
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp("tmp.fits"), checksum=True)
hdul = fits.open(self.temp("tmp.fits"), checksum=True)
self._check_checksums(hdul[0])
hdul.close()
def test_hdu_writeto_existing(self):
"""
Tests that when using writeto with checksum=True, a checksum and
datasum are added to HDUs that did not previously have one.
Regression test for https://github.com/spacetelescope/PyFITS/issues/8
"""
with fits.open(self.data("tb.fits")) as hdul:
hdul.writeto(self.temp("test.fits"), checksum=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "CHECKSUM" in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header["CHECKSUM"] == "7UgqATfo7TfoATfo"
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "0"
assert "CHECKSUM" in hdul[1].header
assert hdul[1].header["CHECKSUM"] == "99daD8bX98baA8bU"
assert "DATASUM" in hdul[1].header
assert hdul[1].header["DATASUM"] == "1829680925"
def test_datasum_only(self):
n = np.arange(100, dtype="int16")
hdu = fits.ImageHDU(n)
hdu.writeto(self.temp("tmp.fits"), overwrite=True, checksum="datasum")
with fits.open(self.temp("tmp.fits"), checksum=True) as hdul:
if not (hasattr(hdul[0], "_datasum") and hdul[0]._datasum):
pytest.fail("Missing DATASUM keyword")
if not (hasattr(hdul[0], "_checksum") and not hdul[0]._checksum):
pytest.fail("Non-empty CHECKSUM keyword")
def test_open_update_mode_preserve_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 where
checksums are being removed from headers when a file is opened in
update mode, even though no changes were made to the file.
"""
testfile = self.copy_file("checksum.fits")
with fits.open(testfile) as hdul:
data = hdul[1].data.copy()
hdul = fits.open(testfile, mode="update")
hdul.close()
with fits.open(testfile) as hdul:
assert "CHECKSUM" in hdul[1].header
assert "DATASUM" in hdul[1].header
assert comparerecords(data, hdul[1].data)
def test_open_update_mode_update_checksum(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148, part
2. This ensures that if a file contains a checksum, the checksum is
updated when changes are saved to the file, even if the file was opened
with the default of checksum=False.
An existing checksum and/or datasum are only stripped if the file is
opened with checksum='remove'.
"""
testfile = self.copy_file("checksum.fits")
with fits.open(testfile) as hdul:
header = hdul[1].header.copy()
data = hdul[1].data.copy()
with fits.open(testfile, mode="update") as hdul:
hdul[1].header["FOO"] = "BAR"
hdul[1].data[0]["TIME"] = 42
with fits.open(testfile) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-3] == header[:-2]
assert "CHECKSUM" in header2
assert "DATASUM" in header2
assert header2["FOO"] == "BAR"
assert (data2["TIME"][1:] == data["TIME"][1:]).all()
assert data2["TIME"][0] == 42
with fits.open(testfile, mode="update", checksum="remove") as hdul:
pass
with fits.open(testfile) as hdul:
header2 = hdul[1].header
data2 = hdul[1].data
assert header2[:-1] == header[:-2]
assert "CHECKSUM" not in header2
assert "DATASUM" not in header2
assert header2["FOO"] == "BAR"
assert (data2["TIME"][1:] == data["TIME"][1:]).all()
assert data2["TIME"][0] == 42
def test_overwrite_invalid(self):
"""
Tests that invalid checksum or datasum are overwritten when the file is
saved.
"""
reffile = self.temp("ref.fits")
with fits.open(self.data("tb.fits")) as hdul:
hdul.writeto(reffile, checksum=True)
testfile = self.temp("test.fits")
with fits.open(self.data("tb.fits")) as hdul:
hdul[0].header["DATASUM"] = "1 "
hdul[0].header["CHECKSUM"] = "8UgqATfo7TfoATfo"
hdul[1].header["DATASUM"] = "2349680925"
hdul[1].header["CHECKSUM"] = "11daD8bX98baA8bU"
hdul.writeto(testfile)
with fits.open(testfile) as hdul:
hdul.writeto(self.temp("test2.fits"), checksum=True)
with fits.open(self.temp("test2.fits")) as hdul:
with fits.open(reffile) as ref:
assert "CHECKSUM" in hdul[0].header
# These checksums were verified against CFITSIO
assert hdul[0].header["CHECKSUM"] == ref[0].header["CHECKSUM"]
assert "DATASUM" in hdul[0].header
assert hdul[0].header["DATASUM"] == "0"
assert "CHECKSUM" in hdul[1].header
assert hdul[1].header["CHECKSUM"] == ref[1].header["CHECKSUM"]
assert "DATASUM" in hdul[1].header
assert hdul[1].header["DATASUM"] == ref[1].header["DATASUM"]
def _check_checksums(self, hdu):
if not (hasattr(hdu, "_datasum") and hdu._datasum):
pytest.fail("Missing DATASUM keyword")
if not (hasattr(hdu, "_checksum") and hdu._checksum):
pytest.fail("Missing CHECKSUM keyword")
| TestChecksumFunctions |
python | walkccc__LeetCode | solutions/1550. Three Consecutive Odds/1550.py | {
"start": 0,
"end": 209
} | class ____:
def threeConsecutiveOdds(self, arr: list[int]) -> bool:
count = 0
for a in arr:
count = 0 if a % 2 == 0 else count + 1
if count == 3:
return True
return False
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_inline_schemas/pipeline.py | {
"start": 3092,
"end": 4378
} | class ____(Step):
context: ConnectorContext
title = "Restore original state"
def __init__(self, context: ConnectorContext) -> None:
super().__init__(context)
self.manifest_path = context.connector.manifest_path
self.original_manifest = None
if self.manifest_path.is_file():
self.original_manifest = self.manifest_path.read_text()
self.schemas_path = context.connector.python_source_dir_path / SCHEMAS_DIR_NAME
self.backup_schema_path = None
if self.schemas_path.is_dir():
self.backup_schema_path = Path(tempfile.mkdtemp())
copy_directory(self.schemas_path, self.backup_schema_path)
async def _run(self) -> StepResult:
if self.original_manifest:
self.manifest_path.write_text(self.original_manifest)
if self.backup_schema_path:
copy_directory(self.backup_schema_path, self.schemas_path)
return StepResult(
step=self,
status=StepStatus.SUCCESS,
)
async def _cleanup(self) -> StepResult:
if self.backup_schema_path:
shutil.rmtree(self.backup_schema_path)
return StepResult(
step=self,
status=StepStatus.SUCCESS,
)
| RestoreInlineState |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_ses.py | {
"start": 3038,
"end": 4703
} | class ____:
"""The mock_aws decorator uses `moto` which does not currently support async SES so we mock it manually."""
@pytest.fixture
def mock_async_client(self):
return mock.AsyncMock()
@pytest.fixture
def mock_get_async_conn(self, mock_async_client):
with mock.patch.object(SesHook, "get_async_conn") as mocked_conn:
mocked_conn.return_value.__aenter__.return_value = mock_async_client
yield mocked_conn
async def test_get_async_conn(self, mock_get_async_conn, mock_async_client):
hook = SesHook()
async with await hook.get_async_conn() as async_conn:
assert async_conn is mock_async_client
@pytest.mark.parametrize("to", TEST_TO_ADDRESSES)
@pytest.mark.parametrize("cc", TEST_CC_ADDRESSES)
@pytest.mark.parametrize("bcc", TEST_BCC_ADDRESSES)
async def test_asend_email(self, mock_get_async_conn, mock_async_client, to, cc, bcc):
_verify_address(TEST_FROM_ADDRESS)
hook = SesHook()
mock_async_client.send_raw_email.return_value = {"MessageId": "test_message_id"}
response = await hook.asend_email(
mail_from=TEST_FROM_ADDRESS,
to=to,
subject=TEST_SUBJECT,
html_content=TEST_HTML_CONTENT,
cc=cc,
bcc=bcc,
reply_to=TEST_REPLY_TO,
return_path=TEST_RETURN_PATH,
)
assert response is not None
assert isinstance(response, dict)
assert "MessageId" in response
assert response["MessageId"] == "test_message_id"
mock_async_client.send_raw_email.assert_called_once()
| TestAsyncSesHook |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 77469,
"end": 122770
} | class ____(MoshiPreTrainedModel, GenerationMixin):
config: MoshiConfig
output_modalities = ("audio", "text")
main_input_name = "input_ids"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
def __init__(self, config: MoshiConfig):
super().__init__(config)
# We have 2 * num_codebooks audio embedding layers because we have the user input channel and the model output channel.
self.embed_tokens = nn.ModuleList(
[nn.Embedding(config.audio_vocab_size + 1, config.hidden_size) for _ in range(2 * config.num_codebooks)]
)
self.audio_encoder = AutoModel.from_config(config.audio_encoder_config)
self.decoder = MoshiForCausalLM(config)
self.depth_decoder = MoshiDepthDecoder._from_config(config.depth_decoder_config)
self.num_codebooks = config.num_codebooks
self.post_init()
def get_depth_decoder(self):
return self.depth_decoder
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.BoolTensor] = None,
user_input_values: Optional[torch.FloatTensor] = None,
user_audio_codes: Optional[torch.Tensor] = None,
moshi_input_values: Optional[torch.FloatTensor] = None,
moshi_audio_codes: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
text_labels: Optional[torch.LongTensor] = None,
audio_labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, Seq2SeqLMOutput]:
r"""
user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*):
The audio waveforms used as audio user prompt for the generation.
user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder.
moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*):
The audio waveforms used as audio Moshi prompt for the generation.
moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `input_ids` and `inputs_embeds` are both unset, `inputs_embeds` takes the value
of `inputs_embeds`.
text_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for text language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
audio_labels (`torch.LongTensor` of shape `(batch_size, num_codebooks, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.audio_vocab_size]`
Examples:
```python
>>> from transformers import MoshiForConditionalGeneration
>>> import torch
>>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko")
>>> inputs = moshi.get_unconditional_inputs()
>>> logits = model(**inputs, ).logits
>>> logits.shape # (bsz, seq_len, text_vocab_size)
torch.Size([1, 1, 32000])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_audio_encoder = {
argument[len("audio_encoder_")]: value
for argument, value in kwargs.items()
if argument.startswith("audio_encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
kwargs_depth_decoder = {
argument[len("depth_decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("depth_decoder_")
}
# If inputs_embeds is provided, it has the priority over input_ids and audio_codes, which won't be used
if inputs_embeds is None:
if user_input_values is not None and user_audio_codes is None:
user_audio_codes = self.audio_encoder.encode(
user_input_values, num_quantizers=self.num_codebooks, **kwargs_audio_encoder
)[0]
if moshi_input_values is not None and moshi_audio_codes is None:
moshi_audio_codes = self.audio_encoder.encode(
moshi_input_values, num_quantizers=self.num_codebooks, **kwargs_audio_encoder
)[0]
audio_codes = torch.cat([moshi_audio_codes, user_audio_codes], dim=1)
if input_ids is None and audio_codes is None:
raise ValueError(
"You must provide at least one of `input_ids`, `inputs_embeds`, `input_values` and `audio_codes`."
)
if input_ids is not None:
inputs_embeds = self.decoder.model.embed_tokens(input_ids)
if audio_codes is not None:
audio_inputs_embeds = sum(
self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])
)
inputs_embeds = (
audio_inputs_embeds
if inputs_embeds is None
else audio_inputs_embeds + inputs_embeds.to(audio_inputs_embeds.device)
)
# Decode
decoder_outputs = self.decoder(
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=True,
labels=text_labels,
**kwargs_decoder,
)
decoder_last_hidden_state = decoder_outputs.last_hidden_state
depth_decoder_outputs = None
final_loss = decoder_outputs.loss
if text_labels is not None and audio_labels is not None:
# To use depth decoder forward here, we actually need oracle input ids since we're supposed to pass the true input ids
audio_labels = self.build_delay_pattern_mask(
audio_labels,
bos_token_id=self.config.audio_vocab_size,
pad_token_id=self.config.audio_vocab_size,
max_length=audio_labels.shape[-1] + 1,
)[0]
# (batch_size, sequence_length) -> (batch_size * sequence_length, 1)
text_labels = text_labels.view(-1, 1)
# (batch_size, num_codebooks, sequence_length) -> (batch_size * sequence_length, num_codebooks)
audio_labels = audio_labels.transpose(1, 2).reshape(-1, audio_labels.shape[1])
depth_input_ids = torch.cat([text_labels, audio_labels], dim=1)
# keep the last codebook out of input_ids
depth_input_ids = depth_input_ids[:, :-1]
# (batch_size, sequence_length, dim) -> (batch_size * sequence_length, 1, dim)
decoder_last_hidden_state = decoder_last_hidden_state.view(-1, 1, decoder_last_hidden_state.shape[-1])
depth_decoder_outputs = self.depth_decoder(
last_hidden_state=decoder_last_hidden_state,
input_ids=depth_input_ids,
attention_mask=attention_mask,
labels=audio_labels,
**kwargs_depth_decoder,
)
final_loss += depth_decoder_outputs.loss
if not return_dict:
outputs = decoder_outputs.to_tuple()
if depth_decoder_outputs is not None:
outputs += depth_decoder_outputs.to_tuple()
return outputs
return MoshiConditionalGenerationOutputWithPast(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
last_hidden_state=decoder_last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
depth_loss=None if depth_decoder_outputs is None else depth_decoder_outputs.loss,
audio_logits=None if depth_decoder_outputs is None else depth_decoder_outputs.logits,
depth_past_key_values=None if decoder_outputs is None else decoder_outputs.past_key_values,
depth_hidden_states=None if decoder_outputs is None else decoder_outputs.hidden_states,
depth_attentions=None if decoder_outputs is None else decoder_outputs.attentions,
)
def _prepare_attention_mask_for_generation(
self,
input_ids: torch.LongTensor,
generation_config: GenerationConfig,
kwargs: dict[str, Any],
) -> torch.LongTensor:
pad_token_id = generation_config.pad_token_id
eos_token_id = generation_config.eos_token_id
default_attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
if pad_token_id is None:
return default_attention_mask
is_pad_token_in_inputs = (pad_token_id is not None) and torch.isin(input_ids, pad_token_id).any()
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ~torch.isin(
eos_token_id, pad_token_id
).any()
can_infer_attention_mask = is_pad_token_in_inputs * is_pad_token_not_equal_to_eos_token_id
attention_mask_from_padding = input_ids.ne(pad_token_id).long()
attention_mask = (
attention_mask_from_padding * can_infer_attention_mask + default_attention_mask * ~can_infer_attention_mask
)
return attention_mask
def _prepare_inputs_embeds_for_generation(
self,
input_ids: Optional[torch.LongTensor] = None,
user_input_values: Optional[torch.FloatTensor] = None,
user_audio_codes: Optional[torch.Tensor] = None,
moshi_input_values: Optional[torch.FloatTensor] = None,
moshi_audio_codes: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
apply_delay_pattern_mask: bool = False,
concat_unconditional_inputs: bool = False,
):
user_delay_pattern_mask = None
moshi_delay_pattern_mask = None
if (
inputs_embeds is None
and input_ids is None
and user_input_values is None
and user_audio_codes is None
and moshi_input_values is None
and moshi_audio_codes is None
):
raise ValueError(
"You must provide at least one of `input_ids`, `user_input_values`, `moshi_input_values`, `user_audio_codes`, `moshi_audio_codes` or `inputs_embeds`."
)
# in case inputs_embeds is passed, we might still need to create delay pattern masks
if inputs_embeds is None or apply_delay_pattern_mask:
if user_input_values is not None and user_audio_codes is None:
user_audio_codes = self.audio_encoder.encode(user_input_values, num_quantizers=self.num_codebooks)[0]
if moshi_input_values is not None and moshi_audio_codes is None:
moshi_audio_codes = self.audio_encoder.encode(moshi_input_values, num_quantizers=self.num_codebooks)[0]
if inputs_embeds is None and concat_unconditional_inputs:
unconditional_inputs = self.get_unconditional_inputs(num_samples=user_audio_codes.shape[0])
moshi_audio_codes = torch.cat([unconditional_inputs.moshi_audio_codes, moshi_audio_codes], dim=2)
user_audio_codes = torch.cat([unconditional_inputs.user_audio_codes, user_audio_codes], dim=2)
input_ids = torch.cat([unconditional_inputs.input_ids, input_ids], dim=1)
if attention_mask is not None:
attention_mask = torch.cat([unconditional_inputs.attention_mask, attention_mask], dim=1)
if inputs_embeds is None or apply_delay_pattern_mask:
if apply_delay_pattern_mask and user_audio_codes is not None:
user_audio_codes, user_delay_pattern_mask = self.build_delay_pattern_mask(
user_audio_codes,
bos_token_id=self.config.audio_vocab_size,
pad_token_id=self.config.audio_vocab_size,
max_length=generation_config.max_length,
)
if apply_delay_pattern_mask and moshi_audio_codes is not None:
moshi_audio_codes, moshi_delay_pattern_mask = self.build_delay_pattern_mask(
moshi_audio_codes,
bos_token_id=self.config.audio_vocab_size,
pad_token_id=self.config.audio_vocab_size,
max_length=generation_config.max_length,
)
# If inputs_embeds is provided, it has the priority over input_ids and audio_codes, which won't be used
if inputs_embeds is None:
audio_inputs_embeds = None
if user_audio_codes is not None and moshi_audio_codes is not None:
audio_codes = torch.cat([moshi_audio_codes, user_audio_codes], dim=1)
audio_inputs_embeds = sum(
self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])
)
elif moshi_audio_codes is not None:
audio_codes = moshi_audio_codes
audio_inputs_embeds = sum(
self.embed_tokens[codebook](audio_codes[:, codebook]) for codebook in range(audio_codes.shape[1])
)
elif user_audio_codes is not None:
audio_codes = user_audio_codes
audio_inputs_embeds = sum(
self.embed_tokens[codebook](audio_codes[:, codebook + self.num_codebooks])
for codebook in range(audio_codes.shape[1])
)
if input_ids is not None:
inputs_embeds = self.decoder.model.embed_tokens(input_ids)
if audio_inputs_embeds is not None:
inputs_embeds = (
audio_inputs_embeds
if inputs_embeds is None
else audio_inputs_embeds + inputs_embeds.to(audio_inputs_embeds.device)
)
return (
inputs_embeds,
input_ids,
user_audio_codes,
moshi_audio_codes,
user_delay_pattern_mask,
moshi_delay_pattern_mask,
attention_mask,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
user_input_values: Optional[torch.FloatTensor] = None,
user_audio_codes: Optional[torch.Tensor] = None,
moshi_input_values: Optional[torch.FloatTensor] = None,
moshi_audio_codes: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
return_audio_waveforms: Optional[bool] = True,
return_audio_codes: Optional[bool] = None,
concat_unconditional_inputs: Optional[bool] = True,
**kwargs,
) -> torch.LongTensor:
"""
Generates sequences of text token ids and audio tokens ids.
Parameters:
input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*):
The sequence used as a text prompt for the generation.
user_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*):
The audio waveforms used as audio user prompt for the generation.
user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder.
moshi_input_values (`torch.Tensor `of shape `(batch_size, 1, audio_sequence_length), *optional*):
The audio waveforms used as audio Moshi prompt for the generation.
moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` and the audio inputs you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert the inputs into associated vectors than the
model's internal embedding lookup matrix.
return_audio_waveforms (`bool`, *optional*, defaults to `True`):
If `False`, won't generate the audio waveforms.
return_audio_codes (`bool`, *optional*):
If `True`, will also returns the generated audio codes, i.e the intermediate audio "tokens" which transforms to `audio_sequences` once passed through the audio decoder.
concat_unconditional_inputs (`bool`, *optional*, defaults to `True`):
If `False`, won't concatenate initial audio and text tokens.
kwargs (`dict[str, Any]`, *optional*):
Remaining dictionary of keyword arguments that are passed to the `generate` method. Refers to the
original [`generate` docstrings](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationMixin.generate)
for more information on how to use them.
Note that keywords with a *depth_* prefix will be input for the `generate` method of the
depth decoder. Otherwise, the latter will use its default generation config.
Return:
[`MoshiConditionalGenerationGenerateOutput`]
"""
# multiple generate -> need to create/update device map
if hasattr(self, "hf_device_map") and not hasattr(self.depth_decoder, "hf_device_map"):
self.depth_decoder.hf_device_map = {}
if "" in self.hf_device_map:
self.depth_decoder.hf_device_map = self.hf_device_map
else:
main_device = [d for d in self.hf_device_map.values() if d not in ["cpu", "disk"]][0]
self.depth_decoder.hf_device_map = {
key[len("depth_decoder") :]: main_device if value in ["cpu", "disk"] else value
for key, value in self.hf_device_map.items()
if key.startswith("depth_decoder")
}
# need to remove depth_decoder from the top device_map so that we assign correctly the device for each layer idx in the cache
self.hf_device_map = {
key: value for key, value in self.hf_device_map.items() if not key.startswith("depth_decoder")
}
# retrieve depth decoder kwargs
depth_decoder_kwargs_keys = {argument for argument in kwargs if argument.startswith("depth_decoder_")}
kwargs_depth_decoder = {
argument[len("depth_decoder_") :]: kwargs.pop(argument) for argument in depth_decoder_kwargs_keys
}
# needs to prepare generation config, even though it'll be done again in `generate`
generation_config, kwargs = self._prepare_generation_config(kwargs.pop("generation_config", None), **kwargs)
input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs = (
self._check_and_maybe_initialize_inputs(
input_ids=input_ids,
user_input_values=user_input_values,
user_audio_codes=user_audio_codes,
moshi_input_values=moshi_input_values,
moshi_audio_codes=moshi_audio_codes,
inputs_embeds=inputs_embeds,
concat_unconditional_inputs=concat_unconditional_inputs,
)
)
inputs = inputs_embeds if input_ids is None else input_ids
input_ids_length = inputs.shape[-1] + 1 if concat_unconditional_inputs else inputs.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name="inputs_embeds" if input_ids is None else "input_ids",
inputs_tensor=inputs,
input_ids_length=input_ids_length,
)
# retrieve depth decoder generation config if it exists
if hasattr(generation_config, "depth_decoder_config"):
depth_decoder_generation_config = generation_config.depth_decoder_config
else:
# we need to control the number of tokens generated by the depth decoder
depth_decoder_generation_config = {
"min_length": self.num_codebooks + 1,
"max_length": self.num_codebooks + 1,
"cache_implementation": "static",
}
# update kwargs_depth_decoder: kwargs_depth_decoder have priority over depth_decoder_generation_config
depth_decoder_generation_config.update(kwargs_depth_decoder)
kwargs_depth_decoder = depth_decoder_generation_config
attention_mask = kwargs.pop("attention_mask", None)
if attention_mask is None:
attention_mask = self._prepare_attention_mask_for_generation(
input_ids=input_ids,
generation_config=generation_config,
kwargs=kwargs,
)
(
inputs_embeds,
input_ids,
user_audio_codes,
moshi_audio_codes,
user_delay_pattern_mask,
moshi_delay_pattern_mask,
attention_mask,
) = self._prepare_inputs_embeds_for_generation(
input_ids=input_ids,
user_input_values=user_input_values,
user_audio_codes=user_audio_codes,
moshi_input_values=moshi_input_values,
moshi_audio_codes=moshi_audio_codes,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
generation_config=generation_config,
apply_delay_pattern_mask=True,
concat_unconditional_inputs=concat_unconditional_inputs,
)
# create blank user inputs - moshi needs a constant stream of user inputs
blank_input_values = torch.zeros(
(inputs_embeds.shape[0], 1, int(self.config.sampling_rate / self.config.audio_encoder_config.frame_rate)),
dtype=self.dtype,
device=self.device,
)
blank_user_audio_codes = self.audio_encoder.encode(blank_input_values, num_quantizers=self.num_codebooks)[0]
# set delay pattern mask for the rest of the generation
kwargs["user_delay_pattern_mask"] = (
user_delay_pattern_mask if user_delay_pattern_mask is not None else kwargs.get("user_delay_pattern_mask")
)
kwargs["moshi_delay_pattern_mask"] = (
moshi_delay_pattern_mask
if moshi_delay_pattern_mask is not None
else kwargs.get("moshi_delay_pattern_mask")
)
self.generated_audio_codes = torch.repeat_interleave(
moshi_audio_codes, max(generation_config.num_beams, generation_config.num_return_sequences), dim=0
)
return_dict_in_generate = generation_config.num_beams > 1 or generation_config.return_dict_in_generate
output_scores = generation_config.num_beams > 1 or generation_config.output_scores
outputs = super().generate(
inputs_embeds=inputs_embeds,
input_ids=input_ids,
generation_config=generation_config,
blank_user_audio_codes=blank_user_audio_codes,
kwargs_depth_decoder=kwargs_depth_decoder,
return_dict_in_generate=return_dict_in_generate,
output_scores=output_scores,
attention_mask=attention_mask,
**kwargs,
)
if not return_audio_waveforms and not return_audio_codes:
if return_dict_in_generate and not generation_config.return_dict_in_generate:
return outputs.sequences
return outputs
# check if outputs is a dict or tokens
if not return_dict_in_generate:
output_text_ids = outputs
else:
output_text_ids = outputs.sequences
if generation_config.num_return_sequences > 1:
moshi_delay_pattern_mask = torch.repeat_interleave(
moshi_delay_pattern_mask, generation_config.num_return_sequences, dim=0
)
if generation_config.num_beams > 1:
# we need to reorganize self.last_hidden_states and generated audio codes according to the beam_indices
# Beam indices are of shape `input_length + number_generated_tokens` but actually starts
# indexing indices at index 0 instead of index `input_length-1`.
# We thus discard the last `input_length` indices that are never used.
beam_indices = outputs.beam_indices[:, : -moshi_audio_codes.shape[-1]]
generated_audio_codes = self.generated_audio_codes[:, :, moshi_audio_codes.shape[-1] :]
# we've generated audio tokens `number_generated_tokens-1` times, so we use the corresponding beam indices to
# retrieve the right audio tokens
expanded_beam_indices = beam_indices[:, :-1].unsqueeze(1).expand(-1, self.num_codebooks, -1)
generated_audio_codes = torch.gather(generated_audio_codes, dim=0, index=expanded_beam_indices)
# now, rebuild generated audio codes, this time with the right beam tracking
moshi_audio_codes = torch.repeat_interleave(
moshi_audio_codes, generation_config.num_return_sequences, dim=0
)
self.generated_audio_codes = torch.cat((moshi_audio_codes, generated_audio_codes), dim=2)
# use the last beam indice to retrieve the right self.last_hidden_state
self.last_hidden_state = torch.index_select(self.last_hidden_state, dim=0, index=beam_indices[:, -1])
# we need to make a last generation with the latest generated tokens
last_hidden_state = self.last_hidden_state.view(-1, 1, self.last_hidden_state.shape[-1])
last_generated_audio_codes = self.depth_decoder.generate(
last_hidden_state=last_hidden_state,
input_ids=output_text_ids[:, -1:].view(-1, 1),
**kwargs_depth_decoder,
)
last_generated_audio_codes = last_generated_audio_codes[:, 1:].unsqueeze(2)
self.generated_audio_codes = torch.cat([self.generated_audio_codes, last_generated_audio_codes], dim=2)
# apply the pattern mask to the final audio ids
output_audio_codes = self.apply_delay_pattern_mask(self.generated_audio_codes, moshi_delay_pattern_mask)
# revert the pattern delay mask by filtering the pad token id and bos token ids
mask = moshi_delay_pattern_mask != self.config.audio_vocab_size
output_audio_codes = output_audio_codes[mask].reshape(mask.shape[0], self.num_codebooks, -1)
output_values = None
if return_audio_waveforms:
output_values = self.audio_encoder.decode(
output_audio_codes,
).audio_values
output_audio_codes = output_audio_codes if return_audio_codes else None
if generation_config.return_dict_in_generate:
return MoshiConditionalGenerationGenerateOutput(
audio_sequences=output_values, audio_codes=output_audio_codes, **outputs
)
return MoshiConditionalGenerationGenerateOutput(
audio_sequences=output_values, sequences=output_text_ids, audio_codes=output_audio_codes
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
logits_to_keep=None,
user_delay_pattern_mask=None,
moshi_delay_pattern_mask=None,
kwargs_depth_decoder=None,
blank_user_audio_codes: Optional[torch.FloatTensor] = None,
**kwargs,
):
# Overwritten -- Moshi has custom post-processing on the prepared inputs.
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if past_key_values is not None:
if (
inputs_embeds is not None # Exception 1
or cache_position[-1] >= input_ids.shape[1] # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and cache_position[0] == 0:
model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": None}
else:
model_inputs = {"input_ids": input_ids, "inputs_embeds": None}
if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
if model_inputs["inputs_embeds"] is not None:
batch_size, sequence_length, _ = inputs_embeds.shape
device = inputs_embeds.device
else:
batch_size, sequence_length = input_ids.shape
device = input_ids.device
attention_mask = self.decoder.model._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=past_key_values.get_max_cache_shape(),
dtype=self.decoder.lm_head.weight.dtype,
device=device,
cache_position=cache_position,
batch_size=batch_size,
config=self.config,
past_key_values=past_key_values,
)
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"cache_position": cache_position,
}
)
# 2. Now that everything is prepared, generate audio_codes using the depth decoder
# we want to do it after a first token has been generated
if model_inputs["input_ids"] is not None:
last_hidden_state = kwargs.pop("last_hidden_state")
# (batch_size, sequence_length, dim) -> (batch_size * sequence_length, 1, dim)
last_hidden_state = last_hidden_state.view(-1, 1, last_hidden_state.shape[-1])
input_ids = model_inputs.pop("input_ids")
generated_audio_codes = self.depth_decoder.generate(
last_hidden_state=last_hidden_state,
input_ids=input_ids.view(-1, 1),
**kwargs_depth_decoder,
)
# the first tokens are text tokens
generated_audio_codes = generated_audio_codes[:, 1:].unsqueeze(2)
user_audio_codes = self.apply_delay_pattern_mask(
torch.cat(
[self.generated_audio_codes, blank_user_audio_codes.to(self.generated_audio_codes.device)], dim=2
),
user_delay_pattern_mask,
)[:, :, -1:]
self.generated_audio_codes = self.apply_delay_pattern_mask(
torch.cat([self.generated_audio_codes, generated_audio_codes], dim=2), moshi_delay_pattern_mask
)
inputs_embeds, _, _, _, _, _, _ = self._prepare_inputs_embeds_for_generation(
input_ids, moshi_audio_codes=self.generated_audio_codes[:, :, -1:], user_audio_codes=user_audio_codes
)
model_inputs["input_ids"] = None
model_inputs["inputs_embeds"] = inputs_embeds
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: dict[str, Any],
is_encoder_decoder: bool = False,
num_new_tokens: int = 1,
) -> dict[str, Any]:
model_kwargs = super()._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder, num_new_tokens
)
# update last_hidden_state that'll be used in the depth decoder
model_kwargs["last_hidden_state"] = outputs.get("last_hidden_state")[:, -1:]
# dirty, but we need to make a last depth_decoder.generate
self.last_hidden_state = outputs.get("last_hidden_state")[:, -1:]
return model_kwargs
def get_input_embeddings(self):
return self.decoder.get_input_embeddings()
def set_input_embeddings(self, value):
self.decoder.set_input_embeddings(value)
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.decoder.set_output_embeddings(new_embeddings)
def freeze_audio_encoder(self):
"""
Freeze the audio encoder weights.
"""
for param in self.audio_encoder.parameters():
param.requires_grad = False
self.audio_encoder._requires_grad = False
def freeze_depth_decoder(self):
"""
Freeze the depth encoder weights.
"""
for param in self.depth_decoder.parameters():
param.requires_grad = False
self.depth_decoder._requires_grad = False
@staticmethod
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM.apply_delay_pattern_mask
def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask):
"""Apply a delay pattern mask to the decoder input ids, only preserving predictions where
the mask is set to -1, and otherwise setting to the value detailed in the mask."""
seq_len = input_ids.shape[-1]
decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len]
input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask)
return input_ids
def build_delay_pattern_mask(
self, input_ids: torch.LongTensor, bos_token_id: int, pad_token_id: int, max_length: Optional[int] = None
):
"""Build a delayed pattern mask to the input_ids. Each codebook, except the first one, is offset by
one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
are 4 codebooks and a max sequence length of 6, we have the delayed pattern mask of shape `(codebooks,
seq_len)`:
- [-1, -1, -1, -1, -1, P]
- [ B, -1, -1, -1, -1, -1]
- [ B, -1, -1, -1, -1, -1]
- [ B, -1, -1, -1, -1, -1]
where B is the beginning-of-sentence token, P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
a prompt (input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
mask is set to the value in the prompt:
- [ a0, a1, -1, -1, -1, P]
- [ B, b0, b1, -1, -1, -1]
- [ B, c0, c1, -1, -1, -1]
- [ B, d0, d1, -1, -1, -1]
where a-d indicate the codebook channel and 0/1 indicates the temporality. Now, we only override the -1
tokens in our prediction.
"""
bsz, num_codebooks, seq_len = input_ids.shape
max_length = max_length if max_length is not None else self.generation_config.max_length
input_ids_shifted = (
torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1
)
# the first codebook channel is not shifted
seq_len_to_keep = min(seq_len, max_length - 1)
input_ids_shifted[:, 0, :seq_len_to_keep] = input_ids[:, 0, :seq_len_to_keep]
# fill the shifted ids with the prompt entries
input_ids_shifted[:, 1:, 1 : seq_len_to_keep + 1] = input_ids[:, 1:, :seq_len_to_keep]
# fill with BOS and PAD
input_ids_shifted[:, 1:, 0] = bos_token_id
input_ids_shifted[:, 0, -1] = pad_token_id
# construct a pattern mask that indicates the positions of BOS and PAD tokens for each codebook
pattern_mask = input_ids_shifted
input_ids = input_ids_shifted[..., :seq_len_to_keep]
return input_ids, pattern_mask
def get_unconditional_inputs(self, num_samples=1):
"""
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
feature extractor or tokenizer.
Args:
num_samples (int, *optional*):
Number of audio samples to unconditionally generate.
max_new_tokens (int, *optional*):
Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of
longer inference (since more audio tokens need to be generated per sample).
Example:
```python
>>> from transformers import MoshiForConditionalGeneration
>>> model = MoshiForConditionalGeneration.from_pretrained("kmhf/hf-moshiko-pytorch-bf16")
>>> # get the unconditional (or 'null') inputs for the model
>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
```"""
input_ids = torch.ones((num_samples, 1), device=self.device, dtype=torch.int64) * self.config.vocab_size
user_audio_codes = (
torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64)
* self.config.audio_vocab_size
)
moshi_audio_codes = (
torch.ones((num_samples, self.num_codebooks, 1), device=self.device, dtype=torch.int64)
* self.config.audio_vocab_size
)
attention_mask = torch.ones((num_samples, 1), device=self.device, dtype=torch.long)
return MoshiUnconditionalInput(
input_ids=input_ids,
user_audio_codes=user_audio_codes,
moshi_audio_codes=moshi_audio_codes,
attention_mask=attention_mask,
)
def _check_and_maybe_initialize_inputs(
self,
input_ids=None,
user_input_values=None,
user_audio_codes=None,
moshi_input_values=None,
moshi_audio_codes=None,
inputs_embeds=None,
concat_unconditional_inputs=None,
):
inputs = input_ids if inputs_embeds is None else inputs_embeds
user_input = user_audio_codes if user_input_values is None else user_input_values
moshi_input = moshi_audio_codes if moshi_input_values is None else moshi_input_values
one_input_has_been_passed = (user_input is not None) or (moshi_input is not None) or (inputs is not None)
# concat_unconditional_inputs will be False if inputs_embeds is used
concat_unconditional_inputs = concat_unconditional_inputs and not (
inputs_embeds is not None and input_ids is None
)
# if one or two of the three required inputs have been passed, throws an error
if one_input_has_been_passed and (user_input is None):
raise ValueError(
"No user audio inputs have been passed alongside the other inputs. Make sure either `user_input_values` or `user_audio_codes` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information."
)
elif one_input_has_been_passed and (moshi_input is None):
raise ValueError(
"No Moshi audio inputs have been passed alongside the other inputs. Make sure either `moshi_input_values` or `moshi_audio_codes` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information."
)
elif one_input_has_been_passed and (inputs is None):
raise ValueError(
"No `input_ids` or `inputs_embeds` have been passed alongside the other inputs. Make sure `input_ids` is passed or use `MoshiForConditionalGeneration.get_unconditional_inputs`. Check the `MoshiForConditionalGeneration` docstrings for more information."
)
elif not one_input_has_been_passed:
# if no inputs have been passed, use default values
unconditional_inputs = self.get_unconditional_inputs()
input_ids = unconditional_inputs.input_ids
user_audio_codes = unconditional_inputs.user_audio_codes
moshi_audio_codes = unconditional_inputs.moshi_audio_codes
# in that case, no need to concat unconditional inputs
concat_unconditional_inputs = False
else:
# check if same sequence length
user_seq_length = user_input.shape[-1]
moshi_seq_length = moshi_input.shape[-1]
tokens_seq_length = inputs.shape[1]
ratio = self.config.audio_encoder_config.frame_rate / self.config.sampling_rate
moshi_seq_length = math.ceil(moshi_seq_length * ratio) if moshi_audio_codes is None else moshi_seq_length
user_seq_length = math.ceil(user_seq_length * ratio) if user_audio_codes is None else user_seq_length
if tokens_seq_length != moshi_seq_length or tokens_seq_length != user_seq_length:
raise ValueError(
"At least one of the 3 inputs of `MoshiForConditionalGeneration` doesn't have the same sequence length as the others."
"Make sure that they all have the same sequence length. Check the `MoshiForConditionalGeneration` docstrings for more information."
)
return input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs
__all__ = ["MoshiForCausalLM", "MoshiForConditionalGeneration", "MoshiModel", "MoshiPreTrainedModel"]
| MoshiForConditionalGeneration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 472966,
"end": 473434
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ArchiveProjectV2Item"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "item")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
item = sgqlc.types.Field("ProjectV2Item", graphql_name="item")
"""The item archived from the project."""
| ArchiveProjectV2ItemPayload |
python | getsentry__sentry | src/sentry/api/serializers/models/team.py | {
"start": 5686,
"end": 11086
} | class ____(Serializer):
expand: Sequence[str] | None
collapse: Sequence[str] | None
access: Access | None
def __init__(
self,
collapse: Sequence[str] | None = None,
expand: Sequence[str] | None = None,
access: Access | None = None,
):
self.collapse = collapse
self.expand = expand
self.access = access
def _expand(self, key: str) -> bool:
if self.expand is None:
return False
return key in self.expand
def _collapse(self, key: str) -> bool:
if self.collapse is None:
return False
return key in self.collapse
def get_attrs(
self, item_list: Sequence[Team], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> dict[Team, dict[str, Any]]:
from sentry.api.serializers.models.project import ProjectSerializer
request = env.request
org_ids = {t.organization_id for t in item_list}
assert len(org_ids) == 1, "Cross organization query for teams"
optimization = (
maybe_singular_rpc_access_org_context(self.access, org_ids) if self.access else None
)
roles_by_org = get_org_roles(org_ids, user, optimization=optimization)
member_totals = get_member_totals(item_list, user)
team_memberships = _get_team_memberships(item_list, user, optimization=optimization)
access_requests = get_access_requests(item_list, user)
is_superuser = request and is_active_superuser(request) and request.user == user
result: dict[Team, dict[str, Any]] = {}
organization = Organization.objects.get_from_cache(id=list(org_ids)[0])
for team in item_list:
is_member = team.id in team_memberships
org_role = roles_by_org.get(team.organization_id)
team_role_id = team_memberships.get(team.id)
team_role_scopes: frozenset[str] = frozenset()
has_access = bool(
is_member
or is_superuser
or organization.flags.allow_joinleave
or (org_role and roles.get(org_role).is_global)
)
if has_access:
effective_team_role = (
team_roles.get(team_role_id) if team_role_id else team_roles.get_default()
)
if is_superuser:
org_role = organization_roles.get_top_dog().id
if org_role:
minimum_team_role = roles.get_minimum_team_role(org_role)
if minimum_team_role.priority > effective_team_role.priority:
effective_team_role = minimum_team_role
team_role_scopes = effective_team_role.scopes
team_role_id = effective_team_role.id
result[team] = {
"pending_request": team.id in access_requests,
"is_member": is_member,
"team_role": team_role_id if is_member else None,
"access": team_role_scopes,
"has_access": has_access,
"member_count": member_totals.get(team.id, 0),
}
if self._expand("projects"):
project_teams = ProjectTeam.objects.get_for_teams_with_org_cache(item_list)
projects = [pt.project for pt in project_teams]
projects_by_id = {
project.id: data
for project, data in zip(
projects,
serialize(projects, user, ProjectSerializer(collapse=["unusedFeatures"])),
)
}
project_map = defaultdict(list)
for project_team in project_teams:
project_map[project_team.team_id].append(projects_by_id[project_team.project_id])
for team in item_list:
result[team]["projects"] = project_map[team.id]
if self._expand("externalTeams"):
external_actors = list(
ExternalActor.objects.filter(team_id__in={team.id for team in item_list})
)
external_teams_map = defaultdict(list)
serialized_list = serialize(external_actors, user, key="team")
for serialized in serialized_list:
external_teams_map[serialized["teamId"]].append(serialized)
for team in item_list:
result[team]["externalTeams"] = external_teams_map[str(team.id)]
return result
def serialize(
self,
obj: Team,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> BaseTeamSerializerResponse:
return {
"id": str(obj.id),
"slug": obj.slug,
"name": obj.name,
"dateCreated": obj.date_added,
"isMember": attrs["is_member"],
"teamRole": attrs["team_role"],
"flags": {"idp:provisioned": bool(obj.idp_provisioned)},
"access": attrs["access"],
"hasAccess": attrs["has_access"],
"isPending": attrs["pending_request"],
"memberCount": attrs["member_count"],
# Teams only have letter avatars.
"avatar": {"avatarType": "letter_avatar", "avatarUuid": None},
}
# See TeamSerializerResponse for explanation as to why this is needed
| BaseTeamSerializer |
python | PrefectHQ__prefect | src/prefect/events/clients.py | {
"start": 5372,
"end": 5542
} | class ____(EventsClient):
"""A Prefect Events client implementation that does nothing"""
async def _emit(self, event: Event) -> None:
pass
| NullEventsClient |
python | crytic__slither | slither/slithir/operations/high_level_call.py | {
"start": 768,
"end": 7178
} | class ____(Call, OperationWithLValue):
"""
High level message call
"""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
destination: SourceMapping,
function_name: Constant,
nbr_arguments: int,
result: Optional[Union[TemporaryVariable, TupleVariable, TemporaryVariableSSA]],
type_call: str,
names: Optional[List[str]] = None,
) -> None:
"""
#### Parameters
names -
For calls of the form f({argName1 : arg1, ...}), the names of parameters listed in call order.
Otherwise, None.
"""
assert isinstance(function_name, Constant)
assert is_valid_lvalue(result) or result is None
self._check_destination(destination)
super().__init__(names=names)
# Contract is only possible for library call, which inherits from highlevelcall
self._destination: Union[Variable, SolidityVariable, Contract] = destination # type: ignore
self._function_name = function_name
self._nbr_arguments = nbr_arguments
self._type_call = type_call
self._lvalue = result
self._callid = None # only used if gas/value != 0
self._function_instance = None
self._call_value = None
self._call_gas = None
# Development function, to be removed once the code is stable
# It is overridden by LibraryCall
def _check_destination(self, destination: Union[Variable, SolidityVariable, Contract]) -> None:
assert isinstance(destination, (Variable, SolidityVariable))
@property
def call_id(self):
return self._callid
@call_id.setter
def call_id(self, c):
self._callid = c
@property
def call_value(self):
return self._call_value
@call_value.setter
def call_value(self, v):
self._call_value = v
@property
def call_gas(self):
return self._call_gas
@call_gas.setter
def call_gas(self, v):
self._call_gas = v
@property
def read(self) -> List[SourceMapping]:
all_read = [self.destination, self.call_gas, self.call_value] + self._unroll(self.arguments)
# remove None
return [x for x in all_read if x]
@property
def destination(self) -> Union[Variable, SolidityVariable, Contract]:
"""
Return a variable or a solidityVariable
Contract is only possible for LibraryCall
Returns:
"""
return self._destination
@property
def function_name(self) -> Constant:
return self._function_name
@property
def function(self) -> Union[Function, Variable]:
return self._function_instance
@function.setter
def function(self, function):
self._function_instance = function
@property
def nbr_arguments(self) -> int:
return self._nbr_arguments
@property
def type_call(self) -> str:
return self._type_call
###################################################################################
###################################################################################
# region Analyses
###################################################################################
###################################################################################
def is_static_call(self) -> bool:
# If solidity >0.5, STATICCALL is used
if self.compilation_unit.solc_version and self.compilation_unit.solc_version >= "0.5.0":
if isinstance(self.function, Function) and (self.function.view or self.function.pure):
return True
if isinstance(self.function, Variable):
return True
return False
def can_reenter(self, callstack: Optional[List[Union[Function, Variable]]] = None) -> bool:
"""
Must be called after slithIR analysis pass
For Solidity > 0.5, filter access to public variables and constant/pure/view
For call to this. check if the destination can re-enter
:param callstack: check for recursion
:return: bool
"""
if self.is_static_call():
return False
# If there is a call to itself
# We can check that the function called is
# reentrancy-safe
if self.destination == SolidityVariable("this"):
if isinstance(self.function, Variable):
return False
# In case of recursion, return False
callstack = [] if callstack is None else callstack
if self.function in callstack:
return False
callstack = callstack + [self.function]
if self.function.can_reenter(callstack):
return True
if isinstance(self.destination, Variable):
if not self.destination.is_reentrant:
return False
return True
def can_send_eth(self) -> bool:
"""
Must be called after slithIR analysis pass
:return: bool
"""
return self._call_value is not None
# endregion
###################################################################################
###################################################################################
# region Built in
###################################################################################
###################################################################################
def __str__(self):
value = ""
gas = ""
if self.call_value:
value = f"value:{self.call_value}"
if self.call_gas:
gas = f"gas:{self.call_gas}"
arguments = []
if self.arguments:
arguments = self.arguments
txt = "{}HIGH_LEVEL_CALL, dest:{}({}), function:{}, arguments:{} {} {}"
if not self.lvalue:
lvalue = ""
elif isinstance(self.lvalue.type, (list,)):
lvalue = f"{self.lvalue}({','.join(str(x) for x in self.lvalue.type)}) = "
else:
lvalue = f"{self.lvalue}({self.lvalue.type}) = "
return txt.format(
lvalue,
self.destination,
self.destination.type,
self.function_name,
[str(x) for x in arguments],
value,
gas,
)
| HighLevelCall |
python | Netflix__metaflow | metaflow/plugins/pypi/micromamba.py | {
"start": 327,
"end": 773
} | class ____(MetaflowException):
headline = "Micromamba ran into an error while setting up environment"
def __init__(self, error):
if isinstance(error, (list,)):
error = "\n".join(error)
msg = "{error}".format(error=error)
super(MicromambaException, self).__init__(msg)
GLIBC_VERSION = os.environ.get("CONDA_OVERRIDE_GLIBC", "2.38")
_double_equal_match = re.compile("==(?=[<=>!~])")
| MicromambaException |
python | pytest-dev__pytest | testing/code/test_excinfo.py | {
"start": 18060,
"end": 68277
} | class ____:
@pytest.fixture
def importasmod(self, tmp_path: Path, _sys_snapshot):
def importasmod(source):
source = textwrap.dedent(source)
modpath = tmp_path.joinpath("mod.py")
tmp_path.joinpath("__init__.py").touch()
modpath.write_text(source, encoding="utf-8")
importlib.invalidate_caches()
return import_path(
modpath, root=tmp_path, consider_namespace_packages=False
)
return importasmod
def test_repr_source(self):
pr = FormattedExcinfo()
source = _pytest._code.Source(
"""\
def f(x):
pass
"""
).strip()
pr.flow_marker = "|" # type: ignore[misc]
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_out_of_bounds(self):
pr = FormattedExcinfo()
source = _pytest._code.Source(
"""\
def f(x):
pass
"""
).strip()
pr.flow_marker = "|" # type: ignore[misc]
lines = pr.get_source(source, 100)
assert len(lines) == 1
assert lines[0] == "| ???"
lines = pr.get_source(source, -100)
assert len(lines) == 1
assert lines[0] == "| ???"
def test_repr_source_excinfo(self) -> None:
"""Check if indentation is right."""
try:
def f():
_ = 1 / 0
f()
except BaseException:
excinfo = _pytest._code.ExceptionInfo.from_current()
else:
assert False, "did not raise"
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
assert source is not None
lines = pr.get_source(source, 1, excinfo)
for line in lines:
print(line)
assert lines == [
" def f():",
"> _ = 1 / 0",
"E ZeroDivisionError: division by zero",
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec(co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo.from_current()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
assert repr.chain[0][0].reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile(
"""
a = 1
raise ValueError()
""",
"",
"exec",
)
try:
exec(co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo.from_current()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
assert repr.chain[0][0].reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self, monkeypatch) -> None:
pr = FormattedExcinfo()
try:
_ = 1 / 0
except ZeroDivisionError:
excinfo = ExceptionInfo.from_current()
with monkeypatch.context() as m:
m.setattr(_pytest._code.Code, "fullsource", property(lambda self: None))
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
def test_repr_local(self) -> None:
p = FormattedExcinfo(showlocals=True)
loc = {"y": 5, "z": 7, "x": 3, "@x": 2, "__builtins__": {}}
reprlocals = p.repr_locals(loc)
assert reprlocals is not None
assert reprlocals.lines
assert reprlocals.lines[0] == "__builtins__ = <builtins>"
assert reprlocals.lines[1] == "x = 3"
assert reprlocals.lines[2] == "y = 5"
assert reprlocals.lines[3] == "z = 7"
def test_repr_local_with_error(self) -> None:
class ObjWithErrorInRepr:
def __repr__(self):
raise NotImplementedError
p = FormattedExcinfo(showlocals=True, truncate_locals=False)
loc = {"x": ObjWithErrorInRepr(), "__builtins__": {}}
reprlocals = p.repr_locals(loc)
assert reprlocals is not None
assert reprlocals.lines
assert reprlocals.lines[0] == "__builtins__ = <builtins>"
assert "[NotImplementedError() raised in repr()]" in reprlocals.lines[1]
def test_repr_local_with_exception_in_class_property(self) -> None:
class ExceptionWithBrokenClass(Exception):
# Type ignored because it's bypassed intentionally.
@property # type: ignore
def __class__(self):
raise TypeError("boom!")
class ObjWithErrorInRepr:
def __repr__(self):
raise ExceptionWithBrokenClass()
p = FormattedExcinfo(showlocals=True, truncate_locals=False)
loc = {"x": ObjWithErrorInRepr(), "__builtins__": {}}
reprlocals = p.repr_locals(loc)
assert reprlocals is not None
assert reprlocals.lines
assert reprlocals.lines[0] == "__builtins__ = <builtins>"
assert "[ExceptionWithBrokenClass() raised in repr()]" in reprlocals.lines[1]
def test_repr_local_truncated(self) -> None:
loc = {"l": [i for i in range(10)]}
p = FormattedExcinfo(showlocals=True)
truncated_reprlocals = p.repr_locals(loc)
assert truncated_reprlocals is not None
assert truncated_reprlocals.lines
assert truncated_reprlocals.lines[0] == "l = [0, 1, 2, 3, 4, 5, ...]"
q = FormattedExcinfo(showlocals=True, truncate_locals=False)
full_reprlocals = q.repr_locals(loc)
assert full_reprlocals is not None
assert full_reprlocals.lines
assert full_reprlocals.lines[0] == "l = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"
def test_repr_args_not_truncated(self, importasmod) -> None:
mod = importasmod(
"""
def func1(m):
raise ValueError("hello\\nworld")
"""
)
excinfo = pytest.raises(ValueError, mod.func1, "m" * 500)
excinfo.traceback = excinfo.traceback.filter(excinfo)
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True, truncate_args=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs is not None
arg1 = cast(str, reprfuncargs.args[0][1])
assert len(arg1) < 500
assert "..." in arg1
# again without truncate
p = FormattedExcinfo(funcargs=True, truncate_args=False)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs is not None
assert reprfuncargs.args[0] == ("m", repr("m" * 500))
assert "..." not in cast(str, reprfuncargs.args[0][1])
def test_repr_tracebackentry_lines(self, importasmod) -> None:
mod = importasmod(
"""
def func1():
raise ValueError("hello\\nworld")
"""
)
excinfo = pytest.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter(excinfo)
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == " def func1():"
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == " def func1():"
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == "E ValueError: hello"
assert lines[3] == "E world"
assert not lines[4:]
loc = repr_entry.reprfileloc
assert loc is not None
assert loc.path == mod.__file__
assert loc.lineno == 3
# assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines2(self, importasmod, tw_mock) -> None:
mod = importasmod(
"""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
"""
)
excinfo = pytest.raises(ValueError, mod.func1, "m" * 90, 5, 13, "z" * 120)
excinfo.traceback = excinfo.traceback.filter(excinfo)
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs is not None
assert reprfuncargs.args[0] == ("m", repr("m" * 90))
assert reprfuncargs.args[1] == ("x", "5")
assert reprfuncargs.args[2] == ("y", "13")
assert reprfuncargs.args[3] == ("z", repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs is not None
assert repr_entry.reprfuncargs.args == reprfuncargs.args
repr_entry.toterminal(tw_mock)
assert tw_mock.lines[0] == "m = " + repr("m" * 90)
assert tw_mock.lines[1] == "x = 5, y = 13"
assert tw_mock.lines[2] == "z = " + repr("z" * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod, tw_mock) -> None:
mod = importasmod(
"""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
"""
)
excinfo = pytest.raises(ValueError, mod.func1, "a", "b", c="d")
excinfo.traceback = excinfo.traceback.filter(excinfo)
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs is not None
assert reprfuncargs.args[0] == ("x", repr("a"))
assert reprfuncargs.args[1] == ("y", repr(("b",)))
assert reprfuncargs.args[2] == ("z", repr({"c": "d"}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs
assert repr_entry.reprfuncargs.args == reprfuncargs.args
repr_entry.toterminal(tw_mock)
assert tw_mock.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod) -> None:
mod = importasmod(
"""
def func1():
raise ValueError("hello")
def entry():
func1()
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = Path(mod.__file__).name
assert lines[0] == " func1()"
assert reprtb.reprfileloc is not None
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == "E ValueError: hello"
assert reprtb.reprfileloc is not None
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
@pytest.mark.skipif(
"sys.version_info < (3,11)",
reason="Column level traceback info added in python 3.11",
)
def test_repr_traceback_entry_short_carets(self, importasmod) -> None:
mod = importasmod(
"""
def div_by_zero():
return 1 / 0
def func1():
return 42 + div_by_zero()
def entry():
func1()
"""
)
excinfo = pytest.raises(ZeroDivisionError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-3])
assert len(reprtb.lines) == 1
assert reprtb.lines[0] == " func1()"
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
assert len(reprtb.lines) == 2
assert reprtb.lines[0] == " return 42 + div_by_zero()"
assert reprtb.lines[1] == " ^^^^^^^^^^^^^"
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
assert len(reprtb.lines) == 2
assert reprtb.lines[0] == " return 1 / 0"
assert reprtb.lines[1] == " ^^^^^"
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod(
"""
def func1():
raise ValueError("hello")
def entry():
func1()
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == "E ValueError: hello"
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod(
"""
def f(x):
raise ValueError(x)
def entry():
f(0)
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(
self,
importasmod,
monkeypatch: pytest.MonkeyPatch,
) -> None:
mod = importasmod(
"""
def func1():
raise ValueError("hello")
def entry():
func1()
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
from _pytest._code.code import Code
with monkeypatch.context() as mp:
mp.setattr(Code, "path", "bogus")
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
assert lines[0] == " func1()"
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == "E ValueError: hello"
def test_repr_traceback_and_excinfo(self, importasmod) -> None:
mod = importasmod(
"""
def f(x):
raise ValueError(x)
def entry():
f(0)
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
styles: tuple[TracebackStyle, ...] = ("long", "short")
for style in styles:
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.chain[0][0]
assert len(repr.chain[0][0].reprentries) == len(reprtb.reprentries)
assert repr.reprcrash is not None
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch) -> None:
mod = importasmod(
"""
def f(x):
raise ValueError(x)
def entry():
f(0)
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(abspath=False)
raised = 0
orig_path_cwd = Path.cwd
def raiseos():
nonlocal raised
upframe = sys._getframe().f_back
assert upframe is not None
if upframe.f_code.co_name == "_makepath":
# Only raise with expected calls, and not accidentally via 'inspect'
# See 79ae86cc3f76d69460e1c7beca4ce95e68ab80a6
raised += 1
raise OSError(2, "custom_oserror")
return orig_path_cwd()
monkeypatch.setattr(Path, "cwd", raiseos)
assert p._makepath(Path(__file__)) == __file__
assert raised == 1
repr_tb = p.repr_traceback(excinfo)
matcher = LineMatcher(str(repr_tb).splitlines())
matcher.fnmatch_lines(
[
"def entry():",
"> f(0)",
"",
f"{mod.__file__}:5: ",
"_ _ *",
"",
" def f(x):",
"> raise ValueError(x)",
"E ValueError: 0",
"",
f"{mod.__file__}:3: ValueError",
]
)
assert raised == 3
def test_repr_excinfo_addouterr(self, importasmod, tw_mock):
mod = importasmod(
"""
def entry():
raise ValueError()
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
repr.toterminal(tw_mock)
assert tw_mock.lines[-1] == "content"
assert tw_mock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod) -> None:
mod = importasmod(
"""
def entry():
raise ValueError()
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash is not None
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod(
"""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
"""
)
excinfo = pytest.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_reprexcinfo_getrepr(self, importasmod) -> None:
mod = importasmod(
"""
def f(x):
raise ValueError(x)
def entry():
f(0)
"""
)
excinfo = pytest.raises(ValueError, mod.entry)
styles: tuple[TracebackStyle, ...] = ("short", "long", "no")
for style in styles:
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert repr.reprtraceback.style == style
assert isinstance(repr, ExceptionChainRepr)
for r in repr.chain:
assert r[0].style == style
def test_reprexcinfo_unicode(self):
from _pytest._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw: TerminalWriter) -> None:
tw.line("я")
x = str(MyRepr())
assert x == "я"
def test_toterminal_long(self, importasmod, tw_mock):
mod = importasmod(
"""
def g(x):
raise ValueError(x)
def f():
g(3)
"""
)
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter(excinfo)
repr = excinfo.getrepr()
repr.toterminal(tw_mock)
assert tw_mock.lines[0] == ""
tw_mock.lines.pop(0)
assert tw_mock.lines[0] == " def f():"
assert tw_mock.lines[1] == "> g(3)"
assert tw_mock.lines[2] == ""
line = tw_mock.get_write_msg(3)
assert line.endswith("mod.py")
assert tw_mock.lines[4] == (":5: ")
assert tw_mock.lines[5] == ("_ ", None)
assert tw_mock.lines[6] == ""
assert tw_mock.lines[7] == " def g(x):"
assert tw_mock.lines[8] == "> raise ValueError(x)"
assert tw_mock.lines[9] == "E ValueError: 3"
assert tw_mock.lines[10] == ""
line = tw_mock.get_write_msg(11)
assert line.endswith("mod.py")
assert tw_mock.lines[12] == ":3: ValueError"
def test_toterminal_long_missing_source(
self, importasmod, tmp_path: Path, tw_mock
) -> None:
mod = importasmod(
"""
def g(x):
raise ValueError(x)
def f():
g(3)
"""
)
excinfo = pytest.raises(ValueError, mod.f)
tmp_path.joinpath("mod.py").unlink()
excinfo.traceback = excinfo.traceback.filter(excinfo)
repr = excinfo.getrepr()
repr.toterminal(tw_mock)
assert tw_mock.lines[0] == ""
tw_mock.lines.pop(0)
assert tw_mock.lines[0] == "> ???"
assert tw_mock.lines[1] == ""
line = tw_mock.get_write_msg(2)
assert line.endswith("mod.py")
assert tw_mock.lines[3] == ":5: "
assert tw_mock.lines[4] == ("_ ", None)
assert tw_mock.lines[5] == ""
assert tw_mock.lines[6] == "> ???"
assert tw_mock.lines[7] == "E ValueError: 3"
assert tw_mock.lines[8] == ""
line = tw_mock.get_write_msg(9)
assert line.endswith("mod.py")
assert tw_mock.lines[10] == ":3: ValueError"
def test_toterminal_long_incomplete_source(
self, importasmod, tmp_path: Path, tw_mock
) -> None:
mod = importasmod(
"""
def g(x):
raise ValueError(x)
def f():
g(3)
"""
)
excinfo = pytest.raises(ValueError, mod.f)
tmp_path.joinpath("mod.py").write_text("asdf", encoding="utf-8")
excinfo.traceback = excinfo.traceback.filter(excinfo)
repr = excinfo.getrepr()
repr.toterminal(tw_mock)
assert tw_mock.lines[0] == ""
tw_mock.lines.pop(0)
assert tw_mock.lines[0] == "> ???"
assert tw_mock.lines[1] == ""
line = tw_mock.get_write_msg(2)
assert line.endswith("mod.py")
assert tw_mock.lines[3] == ":5: "
assert tw_mock.lines[4] == ("_ ", None)
assert tw_mock.lines[5] == ""
assert tw_mock.lines[6] == "> ???"
assert tw_mock.lines[7] == "E ValueError: 3"
assert tw_mock.lines[8] == ""
line = tw_mock.get_write_msg(9)
assert line.endswith("mod.py")
assert tw_mock.lines[10] == ":3: ValueError"
def test_toterminal_long_filenames(
self, importasmod, tw_mock, monkeypatch: MonkeyPatch
) -> None:
mod = importasmod(
"""
def f():
raise ValueError()
"""
)
excinfo = pytest.raises(ValueError, mod.f)
path = Path(mod.__file__)
monkeypatch.chdir(path.parent)
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw_mock)
x = bestrelpath(Path.cwd(), path)
if len(x) < len(str(path)):
msg = tw_mock.get_write_msg(-2)
assert msg == "mod.py"
assert tw_mock.lines[-1] == ":3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw_mock)
msg = tw_mock.get_write_msg(-2)
assert msg == str(path)
line = tw_mock.lines[-1]
assert line == ":3: ValueError"
def test_toterminal_value(self, importasmod, tw_mock):
mod = importasmod(
"""
def g(x):
raise ValueError(x)
def f():
g('some_value')
"""
)
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter(excinfo)
repr = excinfo.getrepr(style="value")
repr.toterminal(tw_mock)
assert tw_mock.get_write_msg(0) == "some_value"
assert tw_mock.get_write_msg(1) == "\n"
@pytest.mark.parametrize(
"reproptions",
[
pytest.param(
{
"style": style,
"showlocals": showlocals,
"funcargs": funcargs,
"tbfilter": tbfilter,
},
id=f"style={style},showlocals={showlocals},funcargs={funcargs},tbfilter={tbfilter}",
)
for style in ["long", "short", "line", "no", "native", "value", "auto"]
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)
],
)
def test_format_excinfo(self, reproptions: dict[str, Any]) -> None:
def bar():
assert False, "some error"
def foo():
bar()
# using inline functions as opposed to importasmod so we get source code lines
# in the tracebacks (otherwise getinspect doesn't find the source code).
with pytest.raises(AssertionError) as excinfo:
foo()
file = io.StringIO()
tw = TerminalWriter(file=file)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert file.getvalue()
def test_traceback_repr_style(self, importasmod, tw_mock):
mod = importasmod(
"""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
"""
)
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter(excinfo)
excinfo.traceback = _pytest._code.Traceback(
entry if i not in (1, 2) else entry.with_repr_style("short")
for i, entry in enumerate(excinfo.traceback)
)
r = excinfo.getrepr(style="long")
r.toterminal(tw_mock)
for line in tw_mock.lines:
print(line)
assert tw_mock.lines[0] == ""
assert tw_mock.lines[1] == " def f():"
assert tw_mock.lines[2] == "> g()"
assert tw_mock.lines[3] == ""
msg = tw_mock.get_write_msg(4)
assert msg.endswith("mod.py")
assert tw_mock.lines[5] == ":3: "
assert tw_mock.lines[6] == ("_ ", None)
tw_mock.get_write_msg(7)
assert tw_mock.lines[8].endswith("in g")
assert tw_mock.lines[9] == " h()"
tw_mock.get_write_msg(10)
assert tw_mock.lines[11].endswith("in h")
assert tw_mock.lines[12] == " i()"
assert tw_mock.lines[13] == ("_ ", None)
assert tw_mock.lines[14] == ""
assert tw_mock.lines[15] == " def i():"
assert tw_mock.lines[16] == "> raise ValueError()"
assert tw_mock.lines[17] == "E ValueError"
assert tw_mock.lines[18] == ""
msg = tw_mock.get_write_msg(19)
msg.endswith("mod.py")
assert tw_mock.lines[20] == ":9: ValueError"
def test_exc_chain_repr(self, importasmod, tw_mock):
mod = importasmod(
"""
class Err(Exception):
pass
def f():
try:
g()
except Exception as e:
raise Err() from e
finally:
h()
def g():
raise ValueError()
def h():
if True: raise AttributeError()
"""
)
excinfo = pytest.raises(AttributeError, mod.f)
r = excinfo.getrepr(style="long")
r.toterminal(tw_mock)
for line in tw_mock.lines:
print(line)
assert tw_mock.lines[0] == ""
assert tw_mock.lines[1] == " def f():"
assert tw_mock.lines[2] == " try:"
assert tw_mock.lines[3] == "> g()"
assert tw_mock.lines[4] == ""
line = tw_mock.get_write_msg(5)
assert line.endswith("mod.py")
assert tw_mock.lines[6] == ":6: "
assert tw_mock.lines[7] == ("_ ", None)
assert tw_mock.lines[8] == ""
assert tw_mock.lines[9] == " def g():"
assert tw_mock.lines[10] == "> raise ValueError()"
assert tw_mock.lines[11] == "E ValueError"
assert tw_mock.lines[12] == ""
line = tw_mock.get_write_msg(13)
assert line.endswith("mod.py")
assert tw_mock.lines[14] == ":12: ValueError"
assert tw_mock.lines[15] == ""
assert (
tw_mock.lines[16]
== "The above exception was the direct cause of the following exception:"
)
assert tw_mock.lines[17] == ""
assert tw_mock.lines[18] == " def f():"
assert tw_mock.lines[19] == " try:"
assert tw_mock.lines[20] == " g()"
assert tw_mock.lines[21] == " except Exception as e:"
assert tw_mock.lines[22] == "> raise Err() from e"
assert tw_mock.lines[23] == "E test_exc_chain_repr0.mod.Err"
assert tw_mock.lines[24] == ""
line = tw_mock.get_write_msg(25)
assert line.endswith("mod.py")
assert tw_mock.lines[26] == ":8: Err"
assert tw_mock.lines[27] == ""
assert (
tw_mock.lines[28]
== "During handling of the above exception, another exception occurred:"
)
assert tw_mock.lines[29] == ""
assert tw_mock.lines[30] == " def f():"
assert tw_mock.lines[31] == " try:"
assert tw_mock.lines[32] == " g()"
assert tw_mock.lines[33] == " except Exception as e:"
assert tw_mock.lines[34] == " raise Err() from e"
assert tw_mock.lines[35] == " finally:"
assert tw_mock.lines[36] == "> h()"
assert tw_mock.lines[37] == ""
line = tw_mock.get_write_msg(38)
assert line.endswith("mod.py")
assert tw_mock.lines[39] == ":10: "
assert tw_mock.lines[40] == ("_ ", None)
assert tw_mock.lines[41] == ""
assert tw_mock.lines[42] == " def h():"
# On python 3.11 and greater, check for carets in the traceback.
if sys.version_info >= (3, 11):
assert tw_mock.lines[43] == "> if True: raise AttributeError()"
assert tw_mock.lines[44] == " ^^^^^^^^^^^^^^^^^^^^^^"
assert tw_mock.lines[45] == "E AttributeError"
assert tw_mock.lines[46] == ""
line = tw_mock.get_write_msg(47)
assert line.endswith("mod.py")
assert tw_mock.lines[48] == ":15: AttributeError"
else:
assert tw_mock.lines[43] == "> if True: raise AttributeError()"
assert tw_mock.lines[44] == "E AttributeError"
assert tw_mock.lines[45] == ""
line = tw_mock.get_write_msg(46)
assert line.endswith("mod.py")
assert tw_mock.lines[47] == ":15: AttributeError"
@pytest.mark.parametrize("mode", ["from_none", "explicit_suppress"])
def test_exc_repr_chain_suppression(self, importasmod, mode, tw_mock):
"""Check that exc repr does not show chained exceptions in Python 3.
- When the exception is raised with "from None"
- Explicitly suppressed with "chain=False" to ExceptionInfo.getrepr().
"""
raise_suffix = " from None" if mode == "from_none" else ""
mod = importasmod(
f"""
def f():
try:
g()
except Exception:
raise AttributeError(){raise_suffix}
def g():
raise ValueError()
"""
)
excinfo = pytest.raises(AttributeError, mod.f)
r = excinfo.getrepr(style="long", chain=mode != "explicit_suppress")
r.toterminal(tw_mock)
for line in tw_mock.lines:
print(line)
assert tw_mock.lines[0] == ""
assert tw_mock.lines[1] == " def f():"
assert tw_mock.lines[2] == " try:"
assert tw_mock.lines[3] == " g()"
assert tw_mock.lines[4] == " except Exception:"
assert tw_mock.lines[5] == f"> raise AttributeError(){raise_suffix}"
assert tw_mock.lines[6] == "E AttributeError"
assert tw_mock.lines[7] == ""
line = tw_mock.get_write_msg(8)
assert line.endswith("mod.py")
assert tw_mock.lines[9] == ":6: AttributeError"
assert len(tw_mock.lines) == 10
@pytest.mark.parametrize(
"reason, description",
[
pytest.param(
"cause",
"The above exception was the direct cause of the following exception:",
id="cause",
),
pytest.param(
"context",
"During handling of the above exception, another exception occurred:",
id="context",
),
],
)
def test_exc_chain_repr_without_traceback(self, importasmod, reason, description):
"""
Handle representation of exception chains where one of the exceptions doesn't have a
real traceback, such as those raised in a subprocess submitted by the multiprocessing
module (#1984).
"""
exc_handling_code = " from e" if reason == "cause" else ""
mod = importasmod(
f"""
def f():
try:
g()
except Exception as e:
raise RuntimeError('runtime problem'){exc_handling_code}
def g():
raise ValueError('invalid value')
"""
)
with pytest.raises(RuntimeError) as excinfo:
mod.f()
# emulate the issue described in #1984
attr = f"__{reason}__"
getattr(excinfo.value, attr).__traceback__ = None
r = excinfo.getrepr()
file = io.StringIO()
tw = TerminalWriter(file=file)
tw.hasmarkup = False
r.toterminal(tw)
matcher = LineMatcher(file.getvalue().splitlines())
matcher.fnmatch_lines(
[
"ValueError: invalid value",
description,
"* except Exception as e:",
"> * raise RuntimeError('runtime problem')" + exc_handling_code,
"E *RuntimeError: runtime problem",
]
)
def test_exc_chain_repr_cycle(self, importasmod, tw_mock):
mod = importasmod(
"""
class Err(Exception):
pass
def fail():
return 0 / 0
def reraise():
try:
fail()
except ZeroDivisionError as e:
raise Err() from e
def unreraise():
try:
reraise()
except Err as e:
raise e.__cause__
"""
)
excinfo = pytest.raises(ZeroDivisionError, mod.unreraise)
r = excinfo.getrepr(style="short")
r.toterminal(tw_mock)
out = "\n".join(line for line in tw_mock.lines if isinstance(line, str))
# Assert highlighting carets in python3.11+
if sys.version_info >= (3, 11):
expected_out = textwrap.dedent(
"""\
:13: in unreraise
reraise()
:10: in reraise
raise Err() from e
E test_exc_chain_repr_cycle0.mod.Err
During handling of the above exception, another exception occurred:
:15: in unreraise
raise e.__cause__
:8: in reraise
fail()
:5: in fail
return 0 / 0
^^^^^
E ZeroDivisionError: division by zero"""
)
else:
expected_out = textwrap.dedent(
"""\
:13: in unreraise
reraise()
:10: in reraise
raise Err() from e
E test_exc_chain_repr_cycle0.mod.Err
During handling of the above exception, another exception occurred:
:15: in unreraise
raise e.__cause__
:8: in reraise
fail()
:5: in fail
return 0 / 0
E ZeroDivisionError: division by zero"""
)
assert out == expected_out
def test_exec_type_error_filter(self, importasmod):
"""See #7742"""
mod = importasmod(
"""\
def f():
exec("a = 1", {}, [])
"""
)
with pytest.raises(TypeError) as excinfo:
mod.f()
# previously crashed with `AttributeError: list has no attribute get`
excinfo.traceback.filter(excinfo)
@pytest.mark.parametrize("style", ["short", "long"])
@pytest.mark.parametrize("encoding", [None, "utf8", "utf16"])
def test_repr_traceback_with_unicode(style, encoding):
if encoding is None:
msg: str | bytes = "☹"
else:
msg = "☹".encode(encoding)
try:
raise RuntimeError(msg)
except RuntimeError:
e_info = ExceptionInfo.from_current()
formatter = FormattedExcinfo(style=style)
repr_traceback = formatter.repr_traceback(e_info)
assert repr_traceback is not None
def test_cwd_deleted(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os
def test(tmp_path):
os.chdir(tmp_path)
tmp_path.unlink()
assert False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 failed in *"])
result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stderr.no_fnmatch_line("*INTERNALERROR*")
def test_regression_negative_line_index(pytester: Pytester) -> None:
"""
With Python 3.10 alphas, there was an INTERNALERROR reported in
https://github.com/pytest-dev/pytest/pull/8227
This test ensures it does not regress.
"""
pytester.makepyfile(
"""
import ast
import pytest
def test_literal_eval():
with pytest.raises(ValueError, match="^$"):
ast.literal_eval("pytest")
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 failed in *"])
result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stderr.no_fnmatch_line("*INTERNALERROR*")
@pytest.mark.usefixtures("limited_recursion_depth")
def test_exception_repr_extraction_error_on_recursion():
"""
Ensure we can properly detect a recursion error even
if some locals raise error on comparison (#2459).
"""
class numpy_like:
def __eq__(self, other):
if type(other) is numpy_like:
raise ValueError(
"The truth value of an array "
"with more than one element is ambiguous."
)
def a(x):
return b(numpy_like())
def b(x):
return a(numpy_like())
with pytest.raises(RuntimeError) as excinfo:
a(numpy_like())
matcher = LineMatcher(str(excinfo.getrepr()).splitlines())
matcher.fnmatch_lines(
[
"!!! Recursion error detected, but an error occurred locating the origin of recursion.",
"*The following exception happened*",
"*ValueError: The truth value of an array*",
]
)
@pytest.mark.usefixtures("limited_recursion_depth")
def test_no_recursion_index_on_recursion_error():
"""
Ensure that we don't break in case we can't find the recursion index
during a recursion error (#2486).
"""
class RecursionDepthError:
def __getattr__(self, attr):
return getattr(self, "_" + attr)
with pytest.raises(RuntimeError) as excinfo:
_ = RecursionDepthError().trigger
assert "maximum recursion" in str(excinfo.getrepr())
def _exceptiongroup_common(
pytester: Pytester,
outer_chain: str,
inner_chain: str,
native: bool,
) -> None:
pre_raise = "exceptiongroup." if not native else ""
pre_catch = pre_raise if sys.version_info < (3, 11) else ""
filestr = f"""
{"import exceptiongroup" if not native else ""}
import pytest
def f(): raise ValueError("From f()")
def g(): raise BaseException("From g()")
def inner(inner_chain):
excs = []
for callback in [f, g]:
try:
callback()
except BaseException as err:
excs.append(err)
if excs:
if inner_chain == "none":
raise {pre_raise}BaseExceptionGroup("Oops", excs)
try:
raise SyntaxError()
except SyntaxError as e:
if inner_chain == "from":
raise {pre_raise}BaseExceptionGroup("Oops", excs) from e
else:
raise {pre_raise}BaseExceptionGroup("Oops", excs)
def outer(outer_chain, inner_chain):
try:
inner(inner_chain)
except {pre_catch}BaseExceptionGroup as e:
if outer_chain == "none":
raise
if outer_chain == "from":
raise IndexError() from e
else:
raise IndexError()
def test():
outer("{outer_chain}", "{inner_chain}")
"""
pytester.makepyfile(test_excgroup=filestr)
result = pytester.runpytest()
match_lines = []
if inner_chain in ("another", "from"):
match_lines.append(r"SyntaxError: <no detail available>")
match_lines += [
r" + Exception Group Traceback (most recent call last):",
rf" \| {pre_catch}BaseExceptionGroup: Oops \(2 sub-exceptions\)",
r" \| ValueError: From f\(\)",
r" \| BaseException: From g\(\)",
r"=* short test summary info =*",
]
if outer_chain in ("another", "from"):
match_lines.append(r"FAILED test_excgroup.py::test - IndexError")
else:
match_lines.append(
rf"FAILED test_excgroup.py::test - {pre_catch}BaseExceptionGroup: Oops \(2.*"
)
result.stdout.re_match_lines(match_lines)
# Check for traceback filtering of pytest internals.
result.stdout.no_fnmatch_line("*, line *, in pytest_pyfunc_call")
result.stdout.no_fnmatch_line("*, line *, in pytest_runtest_call")
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="Native ExceptionGroup not implemented"
)
@pytest.mark.parametrize("outer_chain", ["none", "from", "another"])
@pytest.mark.parametrize("inner_chain", ["none", "from", "another"])
def test_native_exceptiongroup(pytester: Pytester, outer_chain, inner_chain) -> None:
_exceptiongroup_common(pytester, outer_chain, inner_chain, native=True)
@pytest.mark.parametrize("outer_chain", ["none", "from", "another"])
@pytest.mark.parametrize("inner_chain", ["none", "from", "another"])
def test_exceptiongroup(pytester: Pytester, outer_chain, inner_chain) -> None:
# with py>=3.11 does not depend on exceptiongroup, though there is a toxenv for it
pytest.importorskip("exceptiongroup")
_exceptiongroup_common(pytester, outer_chain, inner_chain, native=False)
def test_exceptiongroup_short_summary_info(pytester: Pytester):
pytester.makepyfile(
"""
import sys
if sys.version_info < (3, 11):
from exceptiongroup import BaseExceptionGroup, ExceptionGroup
def test_base() -> None:
raise BaseExceptionGroup("NOT IN SUMMARY", [SystemExit("a" * 10)])
def test_nonbase() -> None:
raise ExceptionGroup("NOT IN SUMMARY", [ValueError("a" * 10)])
def test_nested() -> None:
raise ExceptionGroup(
"NOT DISPLAYED", [
ExceptionGroup("NOT IN SUMMARY", [ValueError("a" * 10)])
]
)
def test_multiple() -> None:
raise ExceptionGroup(
"b" * 10,
[
ValueError("NOT IN SUMMARY"),
TypeError("NOT IN SUMMARY"),
]
)
def test_nested_multiple() -> None:
raise ExceptionGroup(
"b" * 10,
[
ExceptionGroup(
"c" * 10,
[
ValueError("NOT IN SUMMARY"),
TypeError("NOT IN SUMMARY"),
]
)
]
)
"""
)
# run with -vv to not truncate summary info, default width in tests is very low
result = pytester.runpytest("-vv")
assert result.ret == 1
backport_str = "exceptiongroup." if sys.version_info < (3, 11) else ""
result.stdout.fnmatch_lines(
[
"*= short test summary info =*",
(
"FAILED test_exceptiongroup_short_summary_info.py::test_base - "
"SystemExit('aaaaaaaaaa') [single exception in BaseExceptionGroup]"
),
(
"FAILED test_exceptiongroup_short_summary_info.py::test_nonbase - "
"ValueError('aaaaaaaaaa') [single exception in ExceptionGroup]"
),
(
"FAILED test_exceptiongroup_short_summary_info.py::test_nested - "
"ValueError('aaaaaaaaaa') [single exception in ExceptionGroup]"
),
(
"FAILED test_exceptiongroup_short_summary_info.py::test_multiple - "
f"{backport_str}ExceptionGroup: bbbbbbbbbb (2 sub-exceptions)"
),
(
"FAILED test_exceptiongroup_short_summary_info.py::test_nested_multiple - "
f"{backport_str}ExceptionGroup: bbbbbbbbbb (1 sub-exception)"
),
"*= 5 failed in *",
]
)
@pytest.mark.parametrize("tbstyle", ("long", "short", "auto", "line", "native"))
def test_all_entries_hidden(pytester: Pytester, tbstyle: str) -> None:
"""Regression test for #10903."""
pytester.makepyfile(
"""
def test():
__tracebackhide__ = True
1 / 0
"""
)
result = pytester.runpytest("--tb", tbstyle)
assert result.ret == 1
if tbstyle != "line":
result.stdout.fnmatch_lines(["*ZeroDivisionError: division by zero"])
if tbstyle not in ("line", "native"):
result.stdout.fnmatch_lines(["All traceback entries are hidden.*"])
def test_hidden_entries_of_chained_exceptions_are_not_shown(pytester: Pytester) -> None:
"""Hidden entries of chained exceptions are not shown (#1904)."""
p = pytester.makepyfile(
"""
def g1():
__tracebackhide__ = True
str.does_not_exist
def f3():
__tracebackhide__ = True
1 / 0
def f2():
try:
f3()
except Exception:
g1()
def f1():
__tracebackhide__ = True
f2()
def test():
f1()
"""
)
result = pytester.runpytest(str(p), "--tb=short")
assert result.ret == 1
result.stdout.fnmatch_lines(
[
"*.py:11: in f2",
" f3()",
"E ZeroDivisionError: division by zero",
"",
"During handling of the above exception, another exception occurred:",
"*.py:20: in test",
" f1()",
"*.py:13: in f2",
" g1()",
"E AttributeError:*'does_not_exist'",
],
consecutive=True,
)
def add_note(err: BaseException, msg: str) -> None:
"""Adds a note to an exception inplace."""
if sys.version_info < (3, 11):
err.__notes__ = [*getattr(err, "__notes__", []), msg] # type: ignore[attr-defined]
else:
err.add_note(msg)
@pytest.mark.parametrize(
"error,notes,match",
[
(Exception("test"), [], "test"),
(AssertionError("foo"), ["bar"], "bar"),
(AssertionError("foo"), ["bar", "baz"], "bar"),
(AssertionError("foo"), ["bar", "baz"], "baz"),
(ValueError("foo"), ["bar", "baz"], re.compile(r"bar\nbaz", re.MULTILINE)),
(ValueError("foo"), ["bar", "baz"], re.compile(r"BAZ", re.IGNORECASE)),
],
)
def test_check_error_notes_success(
error: Exception, notes: list[str], match: str
) -> None:
for note in notes:
add_note(error, note)
with pytest.raises(Exception, match=match):
raise error
@pytest.mark.parametrize(
"error, notes, match",
[
(Exception("test"), [], "foo"),
(AssertionError("foo"), ["bar"], "baz"),
(AssertionError("foo"), ["bar"], "foo\nbaz"),
],
)
def test_check_error_notes_failure(
error: Exception, notes: list[str], match: str
) -> None:
for note in notes:
add_note(error, note)
with pytest.raises(AssertionError):
with pytest.raises(type(error), match=match):
raise error
| TestFormattedExcinfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.