language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_override.py | {
"start": 1242,
"end": 1957
} | class ____:
def test_create_default(self) -> None:
o = bcpo.Override(default=10)
assert o.default_overridden
assert o.default == 10
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpo, ALL)
| Test_Override |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ecs/test_utils.py | {
"start": 395,
"end": 2041
} | class ____(EcsRunLauncher):
def __init__(
self,
inst_data: Optional[ConfigurableClassData] = None,
task_definition=None,
container_name="run",
secrets=None,
secrets_tag="dagster",
env_vars=None,
include_sidecars=False,
):
super().__init__(
inst_data=inst_data,
task_definition=task_definition,
container_name=container_name,
secrets=secrets,
secrets_tag=secrets_tag,
env_vars=env_vars,
include_sidecars=include_sidecars,
)
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data, **config_value)
def get_cpu_and_memory_overrides(
self, container_context: EcsContainerContext, run: DagsterRun
) -> Mapping[str, str]:
return {"cpu": "4096", "memory": "16384"}
def _get_task_overrides(
self, container_context: EcsContainerContext, run: DagsterRun
) -> Mapping[str, Any]:
return {"ephemeralStorage": {"sizeInGiB": 128}}
def report_launch_events(
self, run: DagsterRun, arn: Optional[str] = None, cluster: Optional[str] = None
):
self._instance.report_engine_event(
message="Launching run in custom ECS task",
dagster_run=run,
engine_event_data=EngineEventData({"Run ID": run.run_id}),
)
| CustomECSRunLauncher |
python | astropy__astropy | astropy/units/tests/test_quantity_ufuncs.py | {
"start": 51549,
"end": 51616
} | class ____:
data: u.Quantity
@dataclasses.dataclass
| DuckQuantity1 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/duplicate_bases.py | {
"start": 177,
"end": 250
} | class ____(
A,
A,
):
...
# Duplicate base class is not last.
| F4 |
python | python__mypy | mypy/nodes.py | {
"start": 52873,
"end": 55190
} | class ____(Statement):
"""Assignment statement.
The same node class is used for single assignment, multiple assignment
(e.g. x, y = z) and chained assignment (e.g. x = y = z), assignments
that define new names, and assignments with explicit types ("# type: t"
or "x: t [= ...]").
An lvalue can be NameExpr, TupleExpr, ListExpr, MemberExpr, or IndexExpr.
"""
__slots__ = (
"lvalues",
"rvalue",
"type",
"unanalyzed_type",
"new_syntax",
"is_alias_def",
"is_final_def",
"invalid_recursive_alias",
)
__match_args__ = ("lvalues", "rvalues", "type")
lvalues: list[Lvalue]
# This is a TempNode if and only if no rvalue (x: t).
rvalue: Expression
# Declared type in a comment, may be None.
type: mypy.types.Type | None
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_type: mypy.types.Type | None
# This indicates usage of PEP 526 type annotation syntax in assignment.
new_syntax: bool
# Does this assignment define a type alias?
is_alias_def: bool
# Is this a final definition?
# Final attributes can't be re-assigned once set, and can't be overridden
# in a subclass. This flag is not set if an attempted declaration was found to
# be invalid during semantic analysis. It is still set to `True` if
# a final declaration overrides another final declaration (this is checked
# during type checking when MROs are known).
is_final_def: bool
# Stop further processing of this assignment, to prevent flipping back and forth
# during semantic analysis passes.
invalid_recursive_alias: bool
def __init__(
self,
lvalues: list[Lvalue],
rvalue: Expression,
type: mypy.types.Type | None = None,
new_syntax: bool = False,
) -> None:
super().__init__()
self.lvalues = lvalues
self.rvalue = rvalue
self.type = type
self.unanalyzed_type = type
self.new_syntax = new_syntax
self.is_alias_def = False
self.is_final_def = False
self.invalid_recursive_alias = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_assignment_stmt(self)
| AssignmentStmt |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip.py | {
"start": 13939,
"end": 16483
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = nn.Dropout(config.attention_dropout)
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = (
self.qkv(hidden_states)
.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.projection(context_layer)
return output, attention_probs
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip
| BlipAttention |
python | doocs__leetcode | lcci/08.09.Bracket/Solution.py | {
"start": 0,
"end": 377
} | class ____:
def generateParenthesis(self, n: int) -> List[str]:
def dfs(l, r, t):
if l > n or r > n or l < r:
return
if l == n and r == n:
ans.append(t)
return
dfs(l + 1, r, t + '(')
dfs(l, r + 1, t + ')')
ans = []
dfs(0, 0, '')
return ans
| Solution |
python | django__django | tests/m2m_intermediary/models.py | {
"start": 663,
"end": 776
} | class ____(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
| Article |
python | dask__dask | dask/blockwise.py | {
"start": 630,
"end": 2650
} | class ____:
"""Blockwise-IO argument
This is the base class for indexable Blockwise-IO arguments.
When constructing a ``Blockwise`` Layer, one or more of the
collection tuples passed in with ``indices`` may contain a
``BlockwiseDep`` instance (in place of a "real" collection name).
This allows a new collection to be created (via IO) within a
``Blockwise`` layer.
Parameters
----------
numblocks: tuple[int, ...]
The number of blocks/partitions the object can support
along each dimension.
produces_tasks: bool
Whether any nested tasks will be passed to the Blockwise
function.
See Also
--------
dask.blockwise.Blockwise
dask.blockwise.BlockwiseDepDict
"""
numblocks: tuple[int, ...]
produces_tasks: bool
def __getitem__(self, idx: tuple[int, ...]) -> Any:
"""Return Blockwise-function arguments for a specific index"""
raise NotImplementedError(
"Must define `__getitem__` for `BlockwiseDep` subclass."
)
def get(self, idx: tuple[int, ...], default) -> Any:
"""BlockwiseDep ``__getitem__`` Wrapper"""
try:
return self.__getitem__(idx)
except KeyError:
return default
@property
def produces_keys(self) -> bool:
"""Whether this object will produce external key dependencies.
An external key corresponds to a task key or ``Delayed``-object
key that does not originate from within the ``Blockwise`` layer
that is including this ``BlockwiseDep`` object in its ``indices``.
A ``BlockwiseDep`` object should only return external-key
dependencies when those dependencies do not correspond to a
blockwise-compatible Dask collection (otherwise the collection
name should just be included in ``indices`` list instead).
"""
return False
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.numblocks}>"
| BlockwiseDep |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/pg_catalog.py | {
"start": 1055,
"end": 1148
} | class ____(TypeDecorator[str]):
impl = Text(collation="C")
cache_ok = True
| PG_NODE_TREE |
python | PyCQA__pylint | tests/functional/c/class_members_py30.py | {
"start": 739,
"end": 879
} | class ____:
"""use object.__setattr__"""
def __init__(self):
self.__setattr__('toto', 'tutu')
from abc import ABCMeta
| NewClass |
python | davidhalter__jedi | test/completion/ordering.py | {
"start": 1646,
"end": 2080
} | class ____():
def __init__(self, a):
self.a = a
#? float()
a(1.0).a
#?
a().a
# -----------------
# imports
# -----------------
math = 3
import math
#? ['cosh']
math.cosh
#? []
math.real
math = 3
#? int()
math
#? []
math.cos
# do the same for star imports
cosh = 3
from math import *
# cosh doesn't work, but that's not a problem, star imports should be at the
# start of EVERY script!
cosh.real
cosh = 3
#? int()
cosh
| a |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 235082,
"end": 235634
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("first_day", "name", "total_weeks", "year")
first_day = sgqlc.types.Field(sgqlc.types.non_null(Date), graphql_name="firstDay")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
total_weeks = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalWeeks"
)
year = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="year")
| ContributionCalendarMonth |
python | ray-project__ray | python/ray/serve/tests/test_list_outbound_deployments.py | {
"start": 2528,
"end": 7037
} | class ____:
"""Test suite for list_outbound_deployments() method."""
async def test_stored_handles_in_init(self, serve_instance):
"""Test listing handles that are passed to __init__ and stored as attributes."""
app_name = "test_stored_handles"
# Build and deploy the app
handle_a = DownstreamA.bind()
handle_b = DownstreamB.bind()
app = UpstreamWithStoredHandles.bind(handle_a, handle_b)
serve.run(app, name=app_name)
# Get the replica actor for the upstream deployment
replica_actor = get_replica_actor_handle("UpstreamWithStoredHandles", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
assert len(outbound_deployments) == 2
# Verify app names match
for dep_id in outbound_deployments:
assert dep_id.app_name == app_name
async def test_nested_handles_in_dict_and_list(self, serve_instance):
"""Test listing handles stored in nested data structures (dict, list)."""
app_name = "test_nested_handles"
# Build and deploy the app
handle_a = DownstreamA.bind()
handle_b = DownstreamB.bind()
handles_dict = {"a": handle_a, "b": handle_b}
handles_list = [handle_a, handle_b]
app = UpstreamWithNestedHandles.bind(handles_dict, handles_list)
serve.run(app, name=app_name)
# Get the replica actor
replica_actor = get_replica_actor_handle("UpstreamWithNestedHandles", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results (should find handles despite being in nested structures)
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
# Verify no duplicates (handle_a and handle_b appear in both dict and list)
assert len(outbound_deployments) == 2
async def test_no_handles(self, serve_instance):
"""Test deployment with no outbound handles."""
app_name = "test_no_handles"
# Deploy a simple deployment with no handles
app = DownstreamA.bind()
serve.run(app, name=app_name)
# Get the replica actor
replica_actor = get_replica_actor_handle("DownstreamA", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Should be empty
assert len(outbound_deployments) == 0
async def test_dynamic_handles(self, serve_instance):
app1 = DownstreamA.bind()
app2 = DownstreamB.bind()
app3 = DynamicDeployment.bind()
serve.run(app1, name="app1", route_prefix="/app1")
serve.run(app2, name="app2", route_prefix="/app2")
handle = serve.run(app3, name="app3", route_prefix="/app3")
# Make requests to trigger dynamic handle creation
# x=1: DownstreamA returns 1*2=2, DownstreamB returns 1+10=11, total=2+11=13
results = [await handle.remote(1, "app1", "app2") for _ in range(10)]
for result in results:
assert result == 13
# Get the replica actor
replica_actor = get_replica_actor_handle("DynamicDeployment", "app3")
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Verify results - should include dynamically created handles
deployment_names = {dep_id.name for dep_id in outbound_deployments}
assert "DownstreamA" in deployment_names
assert "DownstreamB" in deployment_names
assert len(outbound_deployments) == 2
# Verify the app names are correct
app_names = {dep_id.app_name for dep_id in outbound_deployments}
assert "app1" in app_names
assert "app2" in app_names
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestListOutboundDeployments |
python | getsentry__sentry | src/sentry/identity/services/identity/service.py | {
"start": 545,
"end": 2768
} | class ____(RpcService):
key = "identity"
local_mode = SiloMode.CONTROL
@classmethod
def get_local_implementation(cls) -> RpcService:
from sentry.identity.services.identity.impl import DatabaseBackedIdentityService
return DatabaseBackedIdentityService()
@rpc_method
@abstractmethod
def get_provider(
self,
*,
provider_id: int | None = None,
provider_type: str | None = None,
provider_ext_id: str | None = None,
) -> RpcIdentityProvider | None:
"""
Returns an RpcIdentityProvider either by using the idp.id (provider_id), or a combination
of idp.type (provider_type) and idp.external_id (provider_ext_id)
"""
@rpc_method
@abstractmethod
def get_identities(self, *, filter: IdentityFilterArgs) -> list[RpcIdentity]:
"""
Returns a list of RpcIdentity based on the given filters.
"""
@rpc_method
@abstractmethod
def get_identity(self, *, filter: IdentityFilterArgs) -> RpcIdentity | None:
"""
Returns the first RpcIdentity based on the given filters.
"""
@rpc_method
@abstractmethod
def get_user_identities_by_provider_type(
self,
*,
user_id: int,
provider_type: str,
exclude_matching_external_ids: bool = False,
) -> list[RpcIdentity]:
"""
Returns a list of APIIdentities for a given user based on idp.type (provider_type).
If exclude_matching_external_ids is True, excludes entries with
identity.external_id == idp.external_id
"""
@rpc_method
@abstractmethod
def delete_identities(self, user_id: int, organization_id: int) -> None:
"""
Deletes the set of identities associated with a user and organization context.
:param user_id:
:param organization_id:
:return:
"""
@rpc_method
@abstractmethod
def update_data(self, *, identity_id: int, data: Any) -> RpcIdentity | None:
"""
Updates an Identity's data.
:param identity_id:
:return: RpcIdentity
"""
identity_service = IdentityService.create_delegation()
| IdentityService |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner/script_runner.py | {
"start": 5926,
"end": 32127
} | class ____:
def __init__(
self,
session_id: str,
main_script_path: str,
session_state: SessionState,
uploaded_file_mgr: UploadedFileManager,
script_cache: ScriptCache,
initial_rerun_data: RerunData,
user_info: dict[str, str | bool | None],
fragment_storage: FragmentStorage,
pages_manager: PagesManager,
) -> None:
"""Initialize the ScriptRunner.
(The ScriptRunner won't start executing until start() is called.)
Parameters
----------
session_id
The AppSession's id.
main_script_path
Path to our main app script.
session_state
The AppSession's SessionState instance.
uploaded_file_mgr
The File manager to store the data uploaded by the file_uploader widget.
script_cache
A ScriptCache instance.
initial_rerun_data
RerunData to initialize this ScriptRunner with.
user_info
A dict that contains information about the current user. For now,
it only contains the user's email address.
{
"email": "example@example.com"
}
Information about the current user is optionally provided when a
websocket connection is initialized via the "X-Streamlit-User" header.
fragment_storage
The AppSession's FragmentStorage instance.
"""
self._session_id = session_id
self._main_script_path = main_script_path
self._session_state = SafeSessionState(
session_state, yield_callback=self._maybe_handle_execution_control_request
)
self._uploaded_file_mgr = uploaded_file_mgr
self._script_cache = script_cache
self._user_info = user_info
self._fragment_storage = fragment_storage
self._pages_manager = pages_manager
self._requests = ScriptRequests()
self._requests.request_rerun(initial_rerun_data)
self.on_event = Signal(
doc="""Emitted when a ScriptRunnerEvent occurs.
This signal is generally emitted on the ScriptRunner's script
thread (which is *not* the same thread that the ScriptRunner was
created on).
Parameters
----------
sender: ScriptRunner
The sender of the event (this ScriptRunner).
event : ScriptRunnerEvent
forward_msg : ForwardMsg | None
The ForwardMsg to send to the frontend. Set only for the
ENQUEUE_FORWARD_MSG event.
exception : BaseException | None
Our compile error. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
widget_states : streamlit.proto.WidgetStates_pb2.WidgetStates | None
The ScriptRunner's final WidgetStates. Set only for the
SHUTDOWN event.
"""
)
# Set to true while we're executing. Used by
# _maybe_handle_execution_control_request.
self._execing = False
# This is initialized in the start() method
self._script_thread: threading.Thread | None = None
def __repr__(self) -> str:
return util.repr_(self)
def request_stop(self) -> None:
"""Request that the ScriptRunner stop running its script and
shut down. The ScriptRunner will handle this request when it reaches
an interrupt point.
Safe to call from any thread.
"""
self._requests.request_stop()
def request_rerun(self, rerun_data: RerunData) -> bool:
"""Request that the ScriptRunner interrupt its currently-running
script and restart it.
If the ScriptRunner has been stopped, this request can't be honored:
return False.
Otherwise, record the request and return True. The ScriptRunner will
handle the rerun request as soon as it reaches an interrupt point.
Safe to call from any thread.
"""
return self._requests.request_rerun(rerun_data)
def start(self) -> None:
"""Start a new thread to process the ScriptEventQueue.
This must be called only once.
"""
if self._script_thread is not None:
raise RuntimeError("ScriptRunner was already started")
self._script_thread = threading.Thread(
target=self._run_script_thread,
name="ScriptRunner.scriptThread",
)
self._script_thread.start()
def _get_script_run_ctx(self) -> ScriptRunContext:
"""Get the ScriptRunContext for the current thread.
Returns
-------
ScriptRunContext
The ScriptRunContext for the current thread.
Raises
------
AssertionError
If called outside of a ScriptRunner thread.
RuntimeError
If there is no ScriptRunContext for the current thread.
"""
if not self._is_in_script_thread():
raise RuntimeError(
"ScriptRunner._get_script_run_ctx must be called from the script thread."
)
ctx = get_script_run_ctx()
if ctx is None:
# This should never be possible on the script_runner thread.
raise RuntimeError(
"ScriptRunner thread has a null ScriptRunContext. "
"Something has gone very wrong!"
)
return ctx
def _run_script_thread(self) -> None:
"""The entry point for the script thread.
Processes the ScriptRequestQueue, which will at least contain the RERUN
request that will trigger the first script-run.
When the ScriptRequestQueue is empty, or when a SHUTDOWN request is
dequeued, this function will exit and its thread will terminate.
"""
if not self._is_in_script_thread():
raise RuntimeError(
"ScriptRunner._run_script_thread must be called from the script thread."
)
_LOGGER.debug("Beginning script thread")
# Create and attach the thread's ScriptRunContext
ctx = ScriptRunContext(
session_id=self._session_id,
_enqueue=self._enqueue_forward_msg,
script_requests=self._requests,
query_string="",
session_state=self._session_state,
uploaded_file_mgr=self._uploaded_file_mgr,
main_script_path=self._main_script_path,
user_info=self._user_info,
gather_usage_stats=bool(config.get_option("browser.gatherUsageStats")),
fragment_storage=self._fragment_storage,
pages_manager=self._pages_manager,
context_info=None,
)
add_script_run_ctx(threading.current_thread(), ctx)
request = self._requests.on_scriptrunner_ready()
while request.type == ScriptRequestType.RERUN:
# When the script thread starts, we'll have a pending rerun
# request that we'll handle immediately. When the script finishes,
# it's possible that another request has come in that we need to
# handle, which is why we call _run_script in a loop.
self._run_script(request.rerun_data)
request = self._requests.on_scriptrunner_ready()
if request.type != ScriptRequestType.STOP:
raise RuntimeError(
f"Unrecognized ScriptRequestType: {request.type}. This should never happen."
)
# Send a SHUTDOWN event before exiting, so some state can be saved
# for use in a future script run when not triggered by the client.
client_state = ClientState()
client_state.query_string = ctx.query_string
client_state.page_script_hash = ctx.page_script_hash
if ctx.context_info:
client_state.context_info.CopyFrom(ctx.context_info)
self.on_event.send(
self, event=ScriptRunnerEvent.SHUTDOWN, client_state=client_state
)
def _is_in_script_thread(self) -> bool:
"""True if the calling function is running in the script thread."""
return self._script_thread == threading.current_thread()
def _enqueue_forward_msg(self, msg: ForwardMsg) -> None:
"""Enqueue a ForwardMsg to our browser queue.
This private function is called by ScriptRunContext only.
It may be called from the script thread OR the main thread.
"""
# Whenever we enqueue a ForwardMsg, we also handle any pending
# execution control request. This means that a script can be
# cleanly interrupted and stopped inside most `st.foo` calls.
self._maybe_handle_execution_control_request()
# Pass the message to our associated AppSession.
self.on_event.send(
self, event=ScriptRunnerEvent.ENQUEUE_FORWARD_MSG, forward_msg=msg
)
def _maybe_handle_execution_control_request(self) -> None:
"""Check our current ScriptRequestState to see if we have a
pending STOP or RERUN request.
This function is called every time the app script enqueues a
ForwardMsg, which means that most `st.foo` commands - which generally
involve sending a ForwardMsg to the frontend - act as implicit
yield points in the script's execution.
"""
if not self._is_in_script_thread():
# We can only handle execution_control_request if we're on the
# script execution thread. However, it's possible for deltas to
# be enqueued (and, therefore, for this function to be called)
# in separate threads, so we check for that here.
return
if not self._execing:
# If the _execing flag is not set, we're not actually inside
# an exec() call. This happens when our script exec() completes,
# we change our state to STOPPED, and a statechange-listener
# enqueues a new ForwardEvent
return
request = self._requests.on_scriptrunner_yield()
if request is None:
# No RERUN or STOP request.
return
if request.type == ScriptRequestType.RERUN:
raise RerunException(request.rerun_data)
if request.type != ScriptRequestType.STOP:
raise RuntimeError(
f"Unrecognized ScriptRequestType: {request.type}. This should never happen."
)
raise StopException()
@contextmanager
def _set_execing_flag(self) -> Generator[None, None, None]:
"""A context for setting the ScriptRunner._execing flag.
Used by _maybe_handle_execution_control_request to ensure that
we only handle requests while we're inside an exec() call
"""
if self._execing:
raise RuntimeError("Nested set_execing_flag call")
self._execing = True
try:
yield
finally:
self._execing = False
def _run_script(self, rerun_data: RerunData) -> None:
"""Run our script.
Parameters
----------
rerun_data: RerunData
The RerunData to use.
"""
if not self._is_in_script_thread():
raise RuntimeError(
"ScriptRunner._run_script must be called from the script thread."
)
# An explicit loop instead of recursion to avoid stack overflows
while True:
_LOGGER.debug("Running script %s", rerun_data)
start_time: float = timer()
prep_time: float = 0 # This will be overwritten once preparations are done.
if not rerun_data.fragment_id_queue:
# Don't clear session refs for media files if we're running a fragment.
# Otherwise, we're likely to remove files that still have corresponding
# download buttons/links to them present in the app, which will result
# in a 404 should the user click on them.
runtime.get_instance().media_file_mgr.clear_session_refs()
self._pages_manager.set_script_intent(
rerun_data.page_script_hash, rerun_data.page_name
)
active_script = self._pages_manager.get_initial_active_script(
rerun_data.page_script_hash
)
main_page_info = self._pages_manager.get_main_page()
page_script_hash = (
active_script["page_script_hash"]
if active_script is not None
else main_page_info["page_script_hash"]
)
ctx = self._get_script_run_ctx()
# Clear widget state on page change. This normally happens implicitly
# in the script run cleanup steps, but doing it explicitly ensures
# it happens even if a script run was interrupted.
previous_page_script_hash = ctx.page_script_hash
if previous_page_script_hash != page_script_hash:
# Page changed, enforce reset widget state where possible.
# This enforcement matters when a new script thread is started
# before the previous script run is completed (from user
# interaction). Use the widget ids from the rerun data to
# maintain some widget state, as the rerun data should
# contain the latest widget ids from the frontend.
widget_ids: set[str] = set()
if (
rerun_data.widget_states is not None
and rerun_data.widget_states.widgets is not None
):
widget_ids = {w.id for w in rerun_data.widget_states.widgets}
self._session_state.on_script_finished(widget_ids)
fragment_ids_this_run: list[str] | None = (
rerun_data.fragment_id_queue or None
)
ctx.reset(
query_string=rerun_data.query_string,
page_script_hash=page_script_hash,
fragment_ids_this_run=fragment_ids_this_run,
cached_message_hashes=rerun_data.cached_message_hashes,
context_info=rerun_data.context_info,
)
self.on_event.send(
self,
event=ScriptRunnerEvent.SCRIPT_STARTED,
page_script_hash=page_script_hash,
fragment_ids_this_run=fragment_ids_this_run,
pages=self._pages_manager.get_pages(),
)
# Compile the script. Any errors thrown here will be surfaced
# to the user via a modal dialog in the frontend, and won't result
# in their previous script elements disappearing.
try:
if active_script is not None:
script_path = active_script["script_path"]
else:
# page must not be found
script_path = main_page_info["script_path"]
# At this point, we know that either
# * the script corresponding to the hash requested no longer
# exists, or
# * we were not able to find a script with the requested page
# name.
# In both of these cases, we want to send a page_not_found
# message to the frontend.
msg = ForwardMsg()
msg.page_not_found.page_name = rerun_data.page_name
ctx.enqueue(msg)
code = self._script_cache.get_bytecode(script_path)
except Exception as ex:
# We got a compile error. Send an error event and bail immediately.
_LOGGER.exception("Script compilation error", exc_info=ex)
self._session_state[SCRIPT_RUN_WITHOUT_ERRORS_KEY] = False
self.on_event.send(
self,
event=ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR,
exception=ex,
)
return
# If we get here, we've successfully compiled our script. The next step
# is to run it. Errors thrown during execution will be shown to the
# user as ExceptionElements.
# Create fake module. This gives us a name global namespace to
# execute the code in.
module = self._new_module("__main__")
# Install the fake module as the __main__ module. This allows
# the pickle module to work inside the user's code, since it now
# can know the module where the pickled objects stem from.
# IMPORTANT: This means we can't use "if __name__ == '__main__'" in
# our code, as it will point to the wrong module!!!
sys.modules["__main__"] = module
# Add special variables to the module's globals dict.
# Note: The following is a requirement for the CodeHasher to
# work correctly. The CodeHasher is scoped to
# files contained in the directory of __main__.__file__, which we
# assume is the main script directory.
module.__dict__["__file__"] = script_path
def code_to_exec(
code: str = code,
module: types.ModuleType = module,
ctx: ScriptRunContext = ctx,
rerun_data: RerunData = rerun_data,
) -> None:
with (
modified_sys_path(self._main_script_path),
self._set_execing_flag(),
):
# Run callbacks for widgets whose values have changed.
if rerun_data.widget_states is not None:
self._session_state.on_script_will_rerun(
rerun_data.widget_states
)
ctx.on_script_start()
if rerun_data.fragment_id_queue:
for fragment_id in rerun_data.fragment_id_queue:
try:
wrapped_fragment = self._fragment_storage.get(
fragment_id
)
wrapped_fragment()
except FragmentStorageKeyError: # noqa: PERF203
# This can happen if the fragment_id is removed from the
# storage before the script runner gets to it. In this
# case, the fragment is simply skipped.
# Also, only log an error if the fragment is not an
# auto_rerun to avoid noise. If it is an auto_rerun, we
# might have a race condition where the fragment_id is
# removed but the webapp sends a rerun request before
# the removal information has reached the web app
# (see https://github.com/streamlit/streamlit/issues/9080).
if not rerun_data.is_auto_rerun:
_LOGGER.warning(
"Couldn't find fragment with id %s."
" This can happen if the fragment does not"
" exist anymore when this request is processed,"
" for example because a full app rerun happened"
" that did not register the fragment."
" Usually this doesn't happen or no action is"
" required, so its mainly for debugging.",
fragment_id,
)
except (RerunException, StopException):
# The wrapped_fragment function is executed
# inside of a exec_func_with_error_handling call, so
# there is a correct handler for these exceptions.
raise
except Exception: # noqa: S110
# Ignore exceptions raised by fragments here as we don't
# want to stop the execution of other fragments. The
# error itself is already rendered within the wrapped
# fragment.
pass
else:
if PagesManager.uses_pages_directory:
_mpa_v1(self._main_script_path)
else:
exec(code, module.__dict__) # noqa: S102
self._fragment_storage.clear(
new_fragment_ids=ctx.new_fragment_ids
)
self._session_state.maybe_check_serializable()
# check for control requests, e.g. rerun requests have arrived
self._maybe_handle_execution_control_request()
prep_time = timer() - start_time
(
_,
run_without_errors,
rerun_exception_data,
premature_stop,
uncaught_exception,
) = exec_func_with_error_handling(code_to_exec, ctx)
# setting the session state here triggers a yield-callback call
# which reads self._requests and checks for rerun data
self._session_state[SCRIPT_RUN_WITHOUT_ERRORS_KEY] = run_without_errors
if rerun_exception_data:
# The handling for when a full script run or a fragment is stopped early
# is the same, so we only have one ScriptRunnerEvent for this scenario.
finished_event = ScriptRunnerEvent.SCRIPT_STOPPED_FOR_RERUN
elif rerun_data.fragment_id_queue:
finished_event = ScriptRunnerEvent.FRAGMENT_STOPPED_WITH_SUCCESS
else:
finished_event = ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
if ctx.gather_usage_stats:
try:
# Create and send page profile information
ctx.enqueue(
create_page_profile_message(
commands=ctx.tracked_commands,
exec_time=to_microseconds(timer() - start_time),
prep_time=to_microseconds(prep_time),
uncaught_exception=(
type(uncaught_exception).__name__
if uncaught_exception
else None
),
)
)
except Exception as ex:
# Always capture all exceptions since we want to make sure that
# the telemetry never causes any issues.
_LOGGER.debug("Failed to create page profile", exc_info=ex)
self._on_script_finished(ctx, finished_event, premature_stop)
# # Use _log_if_error() to make sure we never ever ever stop running the
# # script without meaning to.
_log_if_error(_clean_problem_modules)
if rerun_exception_data is not None:
rerun_data = rerun_exception_data
else:
break
def _on_script_finished(
self, ctx: ScriptRunContext, event: ScriptRunnerEvent, premature_stop: bool
) -> None:
"""Called when our script finishes executing, even if it finished
early with an exception. We perform post-run cleanup here.
"""
# Tell session_state to update itself in response
if not premature_stop:
self._session_state.on_script_finished(ctx.widget_ids_this_run)
# Signal that the script has finished. (We use SCRIPT_STOPPED_WITH_SUCCESS
# even if we were stopped with an exception.)
self.on_event.send(self, event=event)
# Remove orphaned files now that the script has run and files in use
# are marked as active.
runtime.get_instance().media_file_mgr.remove_orphaned_files()
# Force garbage collection to run, to help avoid memory use building up
# This is usually not an issue, but sometimes GC takes time to kick in and
# causes apps to go over resource limits, and forcing it to run between
# script runs is low cost, since we aren't doing much work anyway.
if config.get_option("runner.postScriptGC"):
gc.collect(2)
def _new_module(self, name: str) -> types.ModuleType:
"""Create a new module with the given name."""
return types.ModuleType(name)
def _clean_problem_modules() -> None:
"""Some modules are stateful, so we have to clear their state."""
if "keras" in sys.modules:
try:
keras = sys.modules["keras"]
cast("Any", keras).backend.clear_session()
except Exception: # noqa: S110
# We don't want to crash the app if we can't clear the Keras session.
pass
if "matplotlib.pyplot" in sys.modules:
try:
plt = sys.modules["matplotlib.pyplot"]
cast("Any", plt).close("all")
except Exception: # noqa: S110
# We don't want to crash the app if we can't close matplotlib
pass
# The reason this is not a decorator is because we want to make it clear at the
# calling location that this function is being used.
def _log_if_error(fn: Callable[[], None]) -> None:
try:
fn()
except Exception as e:
_LOGGER.warning(e)
| ScriptRunner |
python | tensorflow__tensorflow | tensorflow/python/saved_model/nested_structure_coder.py | {
"start": 8719,
"end": 9164
} | class ____:
"""Codec for None."""
def can_encode(self, pyobj):
return pyobj is None
def do_encode(self, none_value, encode_fn):
del encode_fn, none_value
value = struct_pb2.StructuredValue()
value.none_value.CopyFrom(struct_pb2.NoneValue())
return value
def can_decode(self, value):
return value.HasField("none_value")
def do_decode(self, value, decode_fn):
del decode_fn, value
return None
| _NoneCodec |
python | langchain-ai__langchain | libs/core/tests/unit_tests/messages/test_utils.py | {
"start": 20531,
"end": 52866
} | class ____(FakeChatModel):
@override
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Sequence[dict[str, Any] | type | Callable | BaseTool] | None = None,
) -> int:
return dummy_token_counter(messages)
def test_convert_to_messages() -> None:
message_like: list = [
# BaseMessage
SystemMessage("1"),
SystemMessage("1.1", additional_kwargs={"__openai_role__": "developer"}),
HumanMessage([{"type": "image_url", "image_url": {"url": "2.1"}}], name="2.2"),
AIMessage(
[
{"type": "text", "text": "3.1"},
{
"type": "tool_use",
"id": "3.2",
"name": "3.3",
"input": {"3.4": "3.5"},
},
]
),
AIMessage(
[
{"type": "text", "text": "4.1"},
{
"type": "tool_use",
"id": "4.2",
"name": "4.3",
"input": {"4.4": "4.5"},
},
],
tool_calls=[
{
"name": "4.3",
"args": {"4.4": "4.5"},
"id": "4.2",
"type": "tool_call",
}
],
),
ToolMessage("5.1", tool_call_id="5.2", name="5.3"),
# OpenAI dict
{"role": "system", "content": "6"},
{"role": "developer", "content": "6.1"},
{
"role": "user",
"content": [{"type": "image_url", "image_url": {"url": "7.1"}}],
"name": "7.2",
},
{
"role": "assistant",
"content": [{"type": "text", "text": "8.1"}],
"tool_calls": [
{
"type": "function",
"function": {
"arguments": json.dumps({"8.2": "8.3"}),
"name": "8.4",
},
"id": "8.5",
}
],
"name": "8.6",
},
{"role": "tool", "content": "10.1", "tool_call_id": "10.2"},
# Tuple/List
("system", "11.1"),
("developer", "11.2"),
("human", [{"type": "image_url", "image_url": {"url": "12.1"}}]),
(
"ai",
[
{"type": "text", "text": "13.1"},
{
"type": "tool_use",
"id": "13.2",
"name": "13.3",
"input": {"13.4": "13.5"},
},
],
),
# String
"14.1",
# LangChain dict
{
"role": "ai",
"content": [{"type": "text", "text": "15.1"}],
"tool_calls": [{"args": {"15.2": "15.3"}, "name": "15.4", "id": "15.5"}],
"name": "15.6",
},
]
expected = [
SystemMessage(content="1"),
SystemMessage(
content="1.1", additional_kwargs={"__openai_role__": "developer"}
),
HumanMessage(
content=[{"type": "image_url", "image_url": {"url": "2.1"}}], name="2.2"
),
AIMessage(
content=[
{"type": "text", "text": "3.1"},
{
"type": "tool_use",
"id": "3.2",
"name": "3.3",
"input": {"3.4": "3.5"},
},
]
),
AIMessage(
content=[
{"type": "text", "text": "4.1"},
{
"type": "tool_use",
"id": "4.2",
"name": "4.3",
"input": {"4.4": "4.5"},
},
],
tool_calls=[
{
"name": "4.3",
"args": {"4.4": "4.5"},
"id": "4.2",
"type": "tool_call",
}
],
),
ToolMessage(content="5.1", name="5.3", tool_call_id="5.2"),
SystemMessage(content="6"),
SystemMessage(
content="6.1", additional_kwargs={"__openai_role__": "developer"}
),
HumanMessage(
content=[{"type": "image_url", "image_url": {"url": "7.1"}}], name="7.2"
),
AIMessage(
content=[{"type": "text", "text": "8.1"}],
name="8.6",
tool_calls=[
{
"name": "8.4",
"args": {"8.2": "8.3"},
"id": "8.5",
"type": "tool_call",
}
],
),
ToolMessage(content="10.1", tool_call_id="10.2"),
SystemMessage(content="11.1"),
SystemMessage(
content="11.2", additional_kwargs={"__openai_role__": "developer"}
),
HumanMessage(content=[{"type": "image_url", "image_url": {"url": "12.1"}}]),
AIMessage(
content=[
{"type": "text", "text": "13.1"},
{
"type": "tool_use",
"id": "13.2",
"name": "13.3",
"input": {"13.4": "13.5"},
},
]
),
HumanMessage(content="14.1"),
AIMessage(
content=[{"type": "text", "text": "15.1"}],
name="15.6",
tool_calls=[
{
"name": "15.4",
"args": {"15.2": "15.3"},
"id": "15.5",
"type": "tool_call",
}
],
),
]
actual = convert_to_messages(message_like)
assert expected == actual
def test_convert_to_messages_openai_refusal() -> None:
actual = convert_to_messages(
[{"role": "assistant", "content": "", "refusal": "9.1"}]
)
expected = [AIMessage("", additional_kwargs={"refusal": "9.1"})]
assert actual == expected
# Raises error if content is missing.
with pytest.raises(
ValueError, match="Message dict must contain 'role' and 'content' keys"
):
convert_to_messages([{"role": "assistant", "refusal": "9.1"}])
def create_image_data() -> str:
return "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAABAAEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigD//2Q==" # noqa: E501
def create_base64_image(image_format: str = "jpeg") -> str:
data = create_image_data()
return f"data:image/{image_format};base64,{data}"
def test_convert_to_openai_messages_string() -> None:
message = "Hello"
result = convert_to_openai_messages(message)
assert result == {"role": "user", "content": "Hello"}
def test_convert_to_openai_messages_single_message() -> None:
message: BaseMessage = HumanMessage(content="Hello")
result = convert_to_openai_messages(message)
assert result == {"role": "user", "content": "Hello"}
# Test IDs
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "user", "content": "Hello"} # no ID
message = AIMessage(content="Hello", id="resp_123")
result = convert_to_openai_messages(message)
assert result == {"role": "assistant", "content": "Hello"}
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "assistant", "content": "Hello", "id": "resp_123"}
def test_convert_to_openai_messages_multiple_messages() -> None:
messages = [
SystemMessage(content="System message"),
HumanMessage(content="Human message"),
AIMessage(content="AI message"),
]
result = convert_to_openai_messages(messages)
expected = [
{"role": "system", "content": "System message"},
{"role": "user", "content": "Human message"},
{"role": "assistant", "content": "AI message"},
]
assert result == expected
def test_convert_to_openai_messages_openai_string() -> None:
messages = [
HumanMessage(
content=[
{"type": "text", "text": "Hello"},
{"type": "text", "text": "World"},
]
),
AIMessage(
content=[{"type": "text", "text": "Hi"}, {"type": "text", "text": "there"}]
),
]
result = convert_to_openai_messages(messages)
expected = [
{"role": "user", "content": "Hello\nWorld"},
{"role": "assistant", "content": "Hi\nthere"},
]
assert result == expected
def test_convert_to_openai_messages_openai_block() -> None:
messages = [HumanMessage(content="Hello"), AIMessage(content="Hi there")]
result = convert_to_openai_messages(messages, text_format="block")
expected = [
{"role": "user", "content": [{"type": "text", "text": "Hello"}]},
{"role": "assistant", "content": [{"type": "text", "text": "Hi there"}]},
]
assert result == expected
def test_convert_to_openai_messages_invalid_format() -> None:
with pytest.raises(ValueError, match="Unrecognized text_format="):
convert_to_openai_messages(
[HumanMessage(content="Hello")],
text_format="invalid", # type: ignore[arg-type]
)
def test_convert_to_openai_messages_openai_image() -> None:
base64_image = create_base64_image()
messages = [
HumanMessage(
content=[
{"type": "text", "text": "Here's an image:"},
{"type": "image_url", "image_url": {"url": base64_image}},
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
expected = [
{
"role": "user",
"content": [
{"type": "text", "text": "Here's an image:"},
{"type": "image_url", "image_url": {"url": base64_image}},
],
}
]
assert result == expected
def test_convert_to_openai_messages_anthropic() -> None:
image_data = create_image_data()
messages = [
HumanMessage(
content=[
{
"type": "text",
"text": "Here's an image:",
"cache_control": {"type": "ephemeral"},
},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data,
},
},
]
),
AIMessage(
content=[
{"type": "tool_use", "name": "foo", "input": {"bar": "baz"}, "id": "1"}
]
),
HumanMessage(
content=[
{
"type": "tool_result",
"tool_use_id": "1",
"is_error": False,
"content": [
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data,
},
},
],
}
]
),
]
result = convert_to_openai_messages(messages)
expected = [
{
"role": "user",
"content": [
{"type": "text", "text": "Here's an image:"},
{"type": "image_url", "image_url": {"url": create_base64_image()}},
],
},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"type": "function",
"function": {
"name": "foo",
"arguments": json.dumps({"bar": "baz"}),
},
"id": "1",
}
],
},
{
"role": "tool",
"content": [
{"type": "image_url", "image_url": {"url": create_base64_image()}}
],
"tool_call_id": "1",
},
]
assert result == expected
# Test thinking blocks (pass through)
thinking_block = {
"signature": "abc123",
"thinking": "Thinking text.",
"type": "thinking",
}
text_block = {"text": "Response text.", "type": "text"}
messages = [AIMessage([thinking_block, text_block])]
result = convert_to_openai_messages(messages)
expected = [{"role": "assistant", "content": [thinking_block, text_block]}]
assert result == expected
def test_convert_to_openai_messages_bedrock_converse_image() -> None:
image_data = create_image_data()
messages = [
HumanMessage(
content=[
{"type": "text", "text": "Here's an image:"},
{
"image": {
"format": "jpeg",
"source": {"bytes": base64.b64decode(image_data)},
}
},
]
)
]
result = convert_to_openai_messages(messages)
assert result[0]["content"][1]["type"] == "image_url"
assert result[0]["content"][1]["image_url"]["url"] == create_base64_image()
def test_convert_to_openai_messages_vertexai_image() -> None:
image_data = create_image_data()
messages = [
HumanMessage(
content=[
{"type": "text", "text": "Here's an image:"},
{
"type": "media",
"mime_type": "image/jpeg",
"data": base64.b64decode(image_data),
},
]
)
]
result = convert_to_openai_messages(messages)
assert result[0]["content"][1]["type"] == "image_url"
assert result[0]["content"][1]["image_url"]["url"] == create_base64_image()
def test_convert_to_openai_messages_tool_message() -> None:
tool_message = ToolMessage(content="Tool result", tool_call_id="123")
result = convert_to_openai_messages([tool_message], text_format="block")
assert len(result) == 1
assert result[0]["content"] == [{"type": "text", "text": "Tool result"}]
assert result[0]["tool_call_id"] == "123"
def test_convert_to_openai_messages_tool_use() -> None:
messages = [
AIMessage(
content=[
{
"type": "tool_use",
"id": "123",
"name": "calculator",
"input": {"a": "b"},
}
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
assert result[0]["tool_calls"][0]["type"] == "function"
assert result[0]["tool_calls"][0]["id"] == "123"
assert result[0]["tool_calls"][0]["function"]["name"] == "calculator"
assert result[0]["tool_calls"][0]["function"]["arguments"] == json.dumps({"a": "b"})
def test_convert_to_openai_messages_tool_use_unicode() -> None:
"""Test that Unicode characters in tool call args are preserved correctly."""
messages = [
AIMessage(
content=[
{
"type": "tool_use",
"id": "123",
"name": "create_customer",
"input": {"customer_name": "你好啊集团"},
}
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
assert result[0]["tool_calls"][0]["type"] == "function"
assert result[0]["tool_calls"][0]["id"] == "123"
assert result[0]["tool_calls"][0]["function"]["name"] == "create_customer"
# Ensure Unicode characters are preserved, not escaped as \\uXXXX
arguments_str = result[0]["tool_calls"][0]["function"]["arguments"]
parsed_args = json.loads(arguments_str)
assert parsed_args["customer_name"] == "你好啊集团"
# Also ensure the raw JSON string contains Unicode, not escaped sequences
assert "你好啊集团" in arguments_str
assert "\\u4f60" not in arguments_str # Should not contain escaped Unicode
def test_convert_to_openai_messages_json() -> None:
json_data = {"key": "value"}
messages = [HumanMessage(content=[{"type": "json", "json": json_data}])]
result = convert_to_openai_messages(messages, text_format="block")
assert result[0]["content"][0]["type"] == "text"
assert json.loads(result[0]["content"][0]["text"]) == json_data
def test_convert_to_openai_messages_guard_content() -> None:
messages = [
HumanMessage(
content=[
{
"type": "guard_content",
"guard_content": {"text": "Protected content"},
}
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
assert result[0]["content"][0]["type"] == "text"
assert result[0]["content"][0]["text"] == "Protected content"
def test_convert_to_openai_messages_invalid_block() -> None:
messages = [HumanMessage(content=[{"type": "invalid", "foo": "bar"}])]
with pytest.raises(ValueError, match="Unrecognized content block"):
convert_to_openai_messages(messages, text_format="block")
def test_convert_to_openai_messages_empty_message() -> None:
result = convert_to_openai_messages(HumanMessage(content=""))
assert result == {"role": "user", "content": ""}
def test_convert_to_openai_messages_empty_list() -> None:
result = convert_to_openai_messages([])
assert result == []
def test_convert_to_openai_messages_mixed_content_types() -> None:
messages = [
HumanMessage(
content=[
"Text message",
{"type": "text", "text": "Structured text"},
{"type": "image_url", "image_url": {"url": create_base64_image()}},
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
assert len(result[0]["content"]) == 3
assert isinstance(result[0]["content"][0], dict)
assert isinstance(result[0]["content"][1], dict)
assert isinstance(result[0]["content"][2], dict)
def test_convert_to_openai_messages_developer() -> None:
messages: list = [
SystemMessage("a", additional_kwargs={"__openai_role__": "developer"}),
{"role": "developer", "content": "a"},
]
result = convert_to_openai_messages(messages)
assert result == [{"role": "developer", "content": "a"}] * 2
def test_convert_to_openai_messages_multimodal() -> None:
"""v0 and v1 content to OpenAI messages conversion."""
messages = [
HumanMessage(
content=[
# Prior v0 blocks
{"type": "text", "text": "Text message"},
{
"type": "image",
"url": "https://example.com/test.png",
},
{
"type": "image",
"source_type": "base64",
"data": "<base64 string>",
"mime_type": "image/png",
},
{
"type": "file",
"source_type": "base64",
"data": "<base64 string>",
"mime_type": "application/pdf",
"filename": "test.pdf",
},
{
# OpenAI Chat Completions file format
"type": "file",
"file": {
"filename": "draconomicon.pdf",
"file_data": "data:application/pdf;base64,<base64 string>",
},
},
{
"type": "file",
"source_type": "id",
"id": "file-abc123",
},
{
"type": "audio",
"source_type": "base64",
"data": "<base64 string>",
"mime_type": "audio/wav",
},
{
"type": "input_audio",
"input_audio": {
"data": "<base64 string>",
"format": "wav",
},
},
# v1 Additions
{
"type": "image",
"source_type": "url", # backward compatibility v0 block field
"url": "https://example.com/test.png",
},
{
"type": "image",
"base64": "<base64 string>",
"mime_type": "image/png",
},
{
"type": "file",
"base64": "<base64 string>",
"mime_type": "application/pdf",
"filename": "test.pdf", # backward compatibility v0 block field
},
{
"type": "file",
"file_id": "file-abc123",
},
{
"type": "audio",
"base64": "<base64 string>",
"mime_type": "audio/wav",
},
]
)
]
result = convert_to_openai_messages(messages, text_format="block")
assert len(result) == 1
message = result[0]
assert len(message["content"]) == 13
# Test auto-adding filename
messages = [
HumanMessage(
content=[
{
"type": "file",
"base64": "<base64 string>",
"mime_type": "application/pdf",
},
]
)
]
with pytest.warns(match="filename"):
result = convert_to_openai_messages(messages, text_format="block")
assert len(result) == 1
message = result[0]
assert len(message["content"]) == 1
block = message["content"][0]
assert block == {
# OpenAI Chat Completions file format
"type": "file",
"file": {
"file_data": "data:application/pdf;base64,<base64 string>",
"filename": "LC_AUTOGENERATED",
},
}
def test_count_tokens_approximately_empty_messages() -> None:
# Test with empty message list
assert count_tokens_approximately([]) == 0
# Test with empty content
messages = [HumanMessage(content="")]
# 4 role chars -> 1 + 3 = 4 tokens
assert count_tokens_approximately(messages) == 4
def test_count_tokens_approximately_with_names() -> None:
messages = [
# 5 chars + 4 role chars -> 3 + 3 = 6 tokens
# (with name: extra 4 name chars, so total = 4 + 3 = 7 tokens)
HumanMessage(content="Hello", name="user"),
# 8 chars + 9 role chars -> 5 + 3 = 8 tokens
# (with name: extra 9 name chars, so total = 7 + 3 = 10 tokens)
AIMessage(content="Hi there", name="assistant"),
]
# With names included (default)
assert count_tokens_approximately(messages) == 17
# Without names
without_names = count_tokens_approximately(messages, count_name=False)
assert without_names == 14
def test_count_tokens_approximately_openai_format() -> None:
# same as test_count_tokens_approximately_with_names, but in OpenAI format
messages = [
{"role": "user", "content": "Hello", "name": "user"},
{"role": "assistant", "content": "Hi there", "name": "assistant"},
]
# With names included (default)
assert count_tokens_approximately(messages) == 17
# Without names
without_names = count_tokens_approximately(messages, count_name=False)
assert without_names == 14
def test_count_tokens_approximately_string_content() -> None:
messages = [
# 5 chars + 4 role chars -> 3 + 3 = 6 tokens
HumanMessage(content="Hello"),
# 8 chars + 9 role chars -> 5 + 3 = 8 tokens
AIMessage(content="Hi there"),
# 12 chars + 4 role chars -> 4 + 3 = 7 tokens
HumanMessage(content="How are you?"),
]
assert count_tokens_approximately(messages) == 21
def test_count_tokens_approximately_list_content() -> None:
messages = [
# '[{"foo": "bar"}]' -> 16 chars + 4 role chars -> 5 + 3 = 8 tokens
HumanMessage(content=[{"foo": "bar"}]),
# '[{"test": 123}]' -> 15 chars + 9 role chars -> 6 + 3 = 9 tokens
AIMessage(content=[{"test": 123}]),
]
assert count_tokens_approximately(messages) == 17
def test_count_tokens_approximately_tool_calls() -> None:
tool_calls = [{"name": "test_tool", "args": {"foo": "bar"}, "id": "1"}]
messages = [
# tool calls json -> 79 chars + 9 role chars -> 22 + 3 = 25 tokens
AIMessage(content="", tool_calls=tool_calls),
# 15 chars + 4 role chars -> 5 + 3 = 8 tokens
HumanMessage(content="Regular message"),
]
assert count_tokens_approximately(messages) == 33
# AI message w/ both content and tool calls
# 94 chars + 9 role chars -> 26 + 3 = 29 tokens
messages = [
AIMessage(content="Regular message", tool_calls=tool_calls),
]
assert count_tokens_approximately(messages) == 29
def test_count_tokens_approximately_custom_token_length() -> None:
messages = [
# 11 chars + 4 role chars -> (4 tokens of length 4 / 8 tokens of length 2) + 3
HumanMessage(content="Hello world"),
# 7 chars + 9 role chars -> (4 tokens of length 4 / 8 tokens of length 2) + 3
AIMessage(content="Testing"),
]
assert count_tokens_approximately(messages, chars_per_token=4) == 14
assert count_tokens_approximately(messages, chars_per_token=2) == 22
def test_count_tokens_approximately_large_message_content() -> None:
# Test with large content to ensure no issues
large_text = "x" * 10000
messages = [HumanMessage(content=large_text)]
# 10,000 chars + 4 role chars -> 2501 + 3 = 2504 tokens
assert count_tokens_approximately(messages) == 2504
def test_count_tokens_approximately_large_number_of_messages() -> None:
# Test with large content to ensure no issues
messages = [HumanMessage(content="x")] * 1_000
# 1 chars + 4 role chars -> 2 + 3 = 5 tokens
assert count_tokens_approximately(messages) == 5_000
def test_count_tokens_approximately_mixed_content_types() -> None:
# Test with a variety of content types in the same message list
tool_calls = [{"name": "test_tool", "args": {"foo": "bar"}, "id": "1"}]
messages = [
# 13 chars + 6 role chars -> 5 + 3 = 8 tokens
SystemMessage(content="System prompt"),
# '[{"foo": "bar"}]' -> 16 chars + 4 role chars -> 5 + 3 = 8 tokens
HumanMessage(content=[{"foo": "bar"}]),
# tool calls json -> 79 chars + 9 role chars -> 22 + 3 = 25 tokens
AIMessage(content="", tool_calls=tool_calls),
# 13 chars + 4 role chars + 9 name chars + 1 tool call ID char ->
# 7 + 3 = 10 tokens
ToolMessage(content="Tool response", name="test_tool", tool_call_id="1"),
]
token_count = count_tokens_approximately(messages)
assert token_count == 51
# Ensure that count is consistent if we do one message at a time
assert sum(count_tokens_approximately([m]) for m in messages) == token_count
def test_get_buffer_string_with_structured_content() -> None:
"""Test get_buffer_string with structured content in messages."""
messages = [
HumanMessage(content=[{"type": "text", "text": "Hello, world!"}]),
AIMessage(content=[{"type": "text", "text": "Hi there!"}]),
SystemMessage(content=[{"type": "text", "text": "System message"}]),
]
expected = "Human: Hello, world!\nAI: Hi there!\nSystem: System message"
actual = get_buffer_string(messages)
assert actual == expected
def test_get_buffer_string_with_mixed_content() -> None:
"""Test get_buffer_string with mixed content types in messages."""
messages = [
HumanMessage(content="Simple text"),
AIMessage(content=[{"type": "text", "text": "Structured text"}]),
SystemMessage(content=[{"type": "text", "text": "Another structured text"}]),
]
expected = (
"Human: Simple text\nAI: Structured text\nSystem: Another structured text"
)
actual = get_buffer_string(messages)
assert actual == expected
def test_get_buffer_string_with_function_call() -> None:
"""Test get_buffer_string with function call in additional_kwargs."""
messages = [
HumanMessage(content="Hello"),
AIMessage(
content="Hi",
additional_kwargs={
"function_call": {
"name": "test_function",
"arguments": '{"arg": "value"}',
}
},
),
]
# TODO: consider changing this
expected = (
"Human: Hello\n"
"AI: Hi{'name': 'test_function', 'arguments': '{\"arg\": \"value\"}'}"
)
actual = get_buffer_string(messages)
assert actual == expected
def test_get_buffer_string_with_empty_content() -> None:
"""Test get_buffer_string with empty content in messages."""
messages = [
HumanMessage(content=[]),
AIMessage(content=""),
SystemMessage(content=[]),
]
expected = "Human: \nAI: \nSystem: "
actual = get_buffer_string(messages)
assert actual == expected
def test_convert_to_openai_messages_reasoning_content() -> None:
"""Test convert_to_openai_messages with reasoning content blocks."""
# Test reasoning block with empty summary
msg = AIMessage(content=[{"type": "reasoning", "summary": []}])
result = convert_to_openai_messages(msg, text_format="block")
expected = {"role": "assistant", "content": [{"type": "reasoning", "summary": []}]}
assert result == expected
# Test reasoning block with summary content
msg_with_summary = AIMessage(
content=[
{
"type": "reasoning",
"summary": [
{"type": "text", "text": "First thought"},
{"type": "text", "text": "Second thought"},
],
}
]
)
result_with_summary = convert_to_openai_messages(
msg_with_summary, text_format="block"
)
expected_with_summary = {
"role": "assistant",
"content": [
{
"type": "reasoning",
"summary": [
{"type": "text", "text": "First thought"},
{"type": "text", "text": "Second thought"},
],
}
],
}
assert result_with_summary == expected_with_summary
# Test mixed content with reasoning and text
mixed_msg = AIMessage(
content=[
{"type": "text", "text": "Regular response"},
{
"type": "reasoning",
"summary": [{"type": "text", "text": "My reasoning process"}],
},
]
)
mixed_result = convert_to_openai_messages(mixed_msg, text_format="block")
expected_mixed = {
"role": "assistant",
"content": [
{"type": "text", "text": "Regular response"},
{
"type": "reasoning",
"summary": [{"type": "text", "text": "My reasoning process"}],
},
],
}
assert mixed_result == expected_mixed
| FakeTokenCountingModel |
python | PrefectHQ__prefect | src/prefect/cache_policies.py | {
"start": 9900,
"end": 10357
} | class ____(CachePolicy):
"""
Policy that computes the cache key based on a hash of the flow parameters.
"""
def compute_key(
self,
task_ctx: TaskRunContext,
inputs: dict[str, Any],
flow_parameters: dict[str, Any],
**kwargs: Any,
) -> Optional[str]:
if not flow_parameters:
return None
return hash_objects(flow_parameters, raise_on_failure=True)
@dataclass
| FlowParameters |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 8816,
"end": 9097
} | class ____(PyMethod):
"""Description of a classmethod."""
option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()
def run(self) -> list[Node]:
self.name = 'py:method'
self.options['classmethod'] = True
return super().run()
| PyClassMethod |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 35045,
"end": 40239
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (SpeechT5ForTextToSpeech,) if is_torch_available() else ()
all_generative_model_classes = ()
is_encoder_decoder = True
def setUp(self):
self.model_tester = SpeechT5ForTextToSpeechTester(self)
self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_can_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertTrue(model.can_generate())
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_model_forward_with_labels(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
decoder_attention_mask = inputs_dict["decoder_attention_mask"]
labels = inputs_dict["decoder_input_values"]
result = model(
input_ids, attention_mask=attention_mask, labels=labels, decoder_attention_mask=decoder_attention_mask
)
self.assertEqual(
result.spectrogram.shape,
(self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins),
)
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_decoder_model_past_with_large_inputs(self):
pass
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_determinism(self):
pass
@unittest.skip(reason="skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
def test_batching_equivalence(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_values",
"decoder_attention_mask",
]
expected_arg_names.extend(["encoder_outputs"])
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip(reason="Model has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_model_outputs_equivalence(self):
pass
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_save_load(self):
pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
| SpeechT5ForTextToSpeechTest |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 2294,
"end": 2535
} | class ____(TorchDynamoException):
restart_reason: Optional[str]
def __init__(self, *args: Any, restart_reason: Optional[str] = None) -> None:
self.restart_reason = restart_reason
super().__init__(*args)
| RestartAnalysis |
python | uqfoundation__dill | dill/tests/test_selected.py | {
"start": 872,
"end": 3069
} | class ____(object):
def _method(self):
pass
from dill import objects
from dill import load_types
load_types(pickleable=True,unpickleable=False)
_newclass = objects['ClassObjectType']
# some clean-up #FIXME: should happen internal to dill
objects['TemporaryFileType'].close()
objects['FileType'].close()
del objects
# getset_descriptor for new-style classes (fails on '_method', if not __main__)
def test_class_descriptors():
d = _d.__dict__
for i in d.values():
ok = dill.pickles(i)
if verbose: print ("%s: %s, %s" % (ok, type(i), i))
assert ok
if verbose: print ("")
od = _newclass.__dict__
for i in od.values():
ok = dill.pickles(i)
if verbose: print ("%s: %s, %s" % (ok, type(i), i))
assert ok
if verbose: print ("")
# (__main__) class instance for new-style classes
def test_class():
o = _d()
oo = _newclass()
ok = dill.pickles(o)
if verbose: print ("%s: %s, %s" % (ok, type(o), o))
assert ok
ok = dill.pickles(oo)
if verbose: print ("%s: %s, %s" % (ok, type(oo), oo))
assert ok
if verbose: print ("")
# frames, generators, and tracebacks (all depend on frame)
def test_frame_related():
g = _g(1)
f = g.gi_frame
e,t = _f()
_is = lambda ok: ok
ok = dill.pickles(f)
if verbose: print ("%s: %s, %s" % (ok, type(f), f))
assert not ok
ok = dill.pickles(g)
if verbose: print ("%s: %s, %s" % (ok, type(g), g))
assert _is(not ok) #XXX: dill fails
ok = dill.pickles(t)
if verbose: print ("%s: %s, %s" % (ok, type(t), t))
assert not ok #XXX: dill fails
ok = dill.pickles(e)
if verbose: print ("%s: %s, %s" % (ok, type(e), e))
assert ok
if verbose: print ("")
def test_typing():
import typing
x = typing.Any
assert x == dill.copy(x)
x = typing.Dict[int, str]
assert x == dill.copy(x)
x = typing.List[int]
assert x == dill.copy(x)
x = typing.Tuple[int, str]
assert x == dill.copy(x)
x = typing.Tuple[int]
assert x == dill.copy(x)
x = typing.Tuple[()]
assert x == dill.copy(x)
x = typing.Tuple[()].copy_with(())
assert x == dill.copy(x)
return
if __name__ == '__main__':
test_frame_related()
test_dict_contents()
test_class()
test_class_descriptors()
test_typing()
| _d |
python | fastai__fastai | fastai/tabular/core.py | {
"start": 13480,
"end": 13750
} | class ____:
"Namespace containing the various filling strategies."
def median (c,fill): return c.median()
def constant(c,fill): return fill
def mode (c,fill): return c.dropna().value_counts().idxmax()
# %% ../../nbs/40_tabular.core.ipynb 81
| FillStrategy |
python | ray-project__ray | release/train_tests/xgboost_lightgbm/train_batch_inference_benchmark.py | {
"start": 1325,
"end": 1531
} | class ____(BasePredictor):
def __call__(self, data: pd.DataFrame) -> Dict[str, np.ndarray]:
dmatrix = xgb.DMatrix(data)
return {"predictions": self.model.predict(dmatrix)}
| XGBoostPredictor |
python | getsentry__sentry | src/sentry/interfaces/message.py | {
"start": 308,
"end": 1282
} | class ____(Interface):
"""
A message consisting of either a ``formatted`` arg, or an optional
``message`` with a list of ``params``.
- ``message`` and ``formatted`` are limited to 1000 characters.
>>> {
>>> "message": "My raw message with interpreted strings like %s",
>>> "formatted": "My raw message with interpreted strings like this",
>>> "params": ["this"]
>>> }
"""
score = 0
display_score = 2050
path = "logentry"
external_type = "message"
@classmethod
def to_python(cls, data, **kwargs):
for key in ("message", "formatted", "params"):
data.setdefault(key, None)
return super().to_python(data, **kwargs)
def to_json(self):
return prune_empty_keys(
{"message": self.message, "formatted": self.formatted, "params": self.params or None}
)
def to_string(self, event) -> str:
return self.formatted or self.message
| Message |
python | doocs__leetcode | solution/1800-1899/1889.Minimum Space Wasted From Packaging/Solution.py | {
"start": 0,
"end": 550
} | class ____:
def minWastedSpace(self, packages: List[int], boxes: List[List[int]]) -> int:
mod = 10**9 + 7
ans = inf
packages.sort()
for box in boxes:
box.sort()
if packages[-1] > box[-1]:
continue
s = i = 0
for b in box:
j = bisect_right(packages, b, lo=i)
s += (j - i) * b
i = j
ans = min(ans, s)
if ans == inf:
return -1
return (ans - sum(packages)) % mod
| Solution |
python | vyperlang__vyper | vyper/venom/memory_location.py | {
"start": 438,
"end": 3388
} | class ____:
# Initialize after class definition
EMPTY: ClassVar[MemoryLocation]
UNDEFINED: ClassVar[MemoryLocation]
@classmethod
def from_operands(
cls, offset: IROperand | int, size: IROperand | int, var_base_pointers: dict
) -> MemoryLocation:
if isinstance(size, IRLiteral):
_size = size.value
elif isinstance(size, IRVariable):
_size = None
elif isinstance(size, int):
_size = size
else: # pragma: nocover
raise CompilerPanic(f"invalid size: {size} ({type(size)})")
if isinstance(offset, IRLiteral):
return MemoryLocationSegment(offset.value, size=_size)
elif isinstance(offset, IRVariable):
op = var_base_pointers.get(offset, None)
if op is None:
return MemoryLocationSegment(offset=None, size=_size)
else:
segment = MemoryLocationSegment(offset=None, size=_size)
return MemoryLocationAbstract(op=op, segment=segment)
elif isinstance(offset, IRAbstractMemLoc):
op = offset
segment = MemoryLocationSegment(offset=op.offset, size=_size)
return MemoryLocationAbstract(op=op, segment=segment)
else: # pragma: nocover
raise CompilerPanic(f"invalid offset: {offset} ({type(offset)})")
def is_empty(self) -> bool: # pragma: nocover
raise NotImplementedError
@property
def is_offset_fixed(self) -> bool: # pragma: nocover
raise NotImplementedError
@property
def is_size_fixed(self) -> bool: # pragma: nocover
raise NotImplementedError
@property
def is_fixed(self) -> bool: # pragma: nocover
raise NotImplementedError
@property
def is_volatile(self) -> bool: # pragma: nocover
raise NotImplementedError
@staticmethod
def may_overlap(loc1: MemoryLocation, loc2: MemoryLocation) -> bool:
if loc1.is_empty() or loc2.is_empty():
return False
if not loc1.is_offset_fixed or not loc2.is_offset_fixed:
return True
if loc1 is MemoryLocation.UNDEFINED or loc2 is MemoryLocation.UNDEFINED:
return True
if type(loc1) is not type(loc2):
return False
if isinstance(loc1, MemoryLocationSegment):
assert isinstance(loc2, MemoryLocationSegment)
return MemoryLocationSegment.may_overlap_concrete(loc1, loc2)
if isinstance(loc1, MemoryLocationAbstract):
assert isinstance(loc2, MemoryLocationAbstract)
return MemoryLocationAbstract.may_overlap_abstract(loc1, loc2)
return False
def completely_contains(self, other: MemoryLocation) -> bool: # pragma: nocover
raise NotImplementedError
def mk_volatile(self) -> MemoryLocation: # pragma: nocover
raise NotImplementedError
@dataclass(frozen=True)
| MemoryLocation |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_inlinehilite.py | {
"start": 756,
"end": 3137
} | class ____(util.MdCase):
"""Test general cases for inline highlight."""
extension = [
'markdown.extensions.attr_list',
'pymdownx.highlight',
'pymdownx.inlinehilite',
]
extension_configs = {
'pymdownx.inlinehilite': {
'style_plain_text': True,
'css_class': 'inlinehilite'
}
}
def test_language(self):
"""Test language handling."""
# Test #! original syntax
self.check_markdown(
r'`#!python import module`.',
R'<p><code class="inlinehilite">'
R'<span class="kn">import</span><span class="w"> </span><span class="nn">module</span>'
R'</code>.</p>'
)
# Test ::: syntax
self.check_markdown(
r'`:::python import module`.',
r'<p><code class="inlinehilite"><span class="kn">import</span><span class="w"> </span><span class="nn">module</span></code>.</p>' # noqa: E501
)
# Test escaping language with space
self.check_markdown(
r'` #!python import module`.',
r'<p><code class="inlinehilite">#!python import module</code>.</p>'
)
# Test bad language
self.check_markdown(
r'`#!bad import module`.',
r'<p><code class="inlinehilite">import module</code>.</p>'
)
def test_escape(self):
"""Test backtick escape logic."""
self.check_markdown(
r'`Code`',
r'<p><code class="inlinehilite">Code</code></p>'
)
self.check_markdown(
r'\`Not code`',
r'<p>`Not code`</p>'
)
self.check_markdown(
r'\\`Code`',
r'<p>\<code class="inlinehilite">Code</code></p>'
)
self.check_markdown(
r'\\\`Not code`',
r'<p>\`Not code`</p>'
)
self.check_markdown(
r'\\\\`Code`',
r'<p>\\<code class="inlinehilite">Code</code></p>'
)
def test_attributes(self):
"""Test with attribute extension."""
self.check_markdown(
r'`#!python import module`{: .test}',
r'<p><code class="inlinehilite test">'
'<span class="kn">import</span><span class="w"> </span><span class="nn">module</span>'
'</code></p>'
)
| TestInlineHilite |
python | great-expectations__great_expectations | tests/data_context/test_workspace_aware_context.py | {
"start": 10019,
"end": 13268
} | class ____:
"""Test context behavior when GX_CLOUD_WORKSPACE_ID environment variable is set."""
@pytest.mark.unit
def test_get_context_uses_env_workspace_id(
self,
unset_gx_env_variables: None,
monkeypatch: pytest.MonkeyPatch,
mock_cloud_config_params: dict[str, Any],
sample_user_with_multiple_workspaces: CloudUserInfo,
mock_project_config: dict[str, Any],
):
"""Test that get_context() uses workspace_id from environment variable."""
expected_workspace_id = "env-workspace-123"
monkeypatch.setenv(GXCloudEnvironmentVariable.WORKSPACE_ID, expected_workspace_id)
with (
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._get_cloud_user_info",
return_value=sample_user_with_multiple_workspaces,
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext.retrieve_data_context_config_from_cloud",
return_value=mock_project_config,
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._save_project_config"
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._check_if_latest_version"
),
):
context = gx.get_context(mode="cloud", **mock_cloud_config_params)
assert isinstance(context, CloudDataContext)
assert context._cloud_config.workspace_id == expected_workspace_id
@pytest.mark.unit
def test_cloud_data_context_uses_env_workspace_id(
self,
unset_gx_env_variables: None,
monkeypatch: pytest.MonkeyPatch,
mock_cloud_config_params: dict[str, Any],
sample_user_with_multiple_workspaces: CloudUserInfo,
mock_project_config: dict[str, Any],
):
"""Test that CloudDataContext uses workspace_id from environment variable."""
expected_workspace_id = "env-workspace-456"
monkeypatch.setenv(GXCloudEnvironmentVariable.WORKSPACE_ID, expected_workspace_id)
with (
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._get_cloud_user_info",
return_value=sample_user_with_multiple_workspaces,
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext.retrieve_data_context_config_from_cloud",
return_value=mock_project_config,
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._save_project_config"
),
patch(
"great_expectations.data_context.data_context.cloud_data_context.CloudDataContext._check_if_latest_version"
),
):
context = CloudDataContext(**mock_cloud_config_params)
assert isinstance(context, CloudDataContext)
assert context._cloud_config.workspace_id == expected_workspace_id
| TestContextWithWorkspaceIdEnvironmentVariable |
python | numpy__numpy | numpy/ma/tests/test_old_ma.py | {
"start": 29518,
"end": 33075
} | class ____:
def _create_data(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
return x, X, XX, m, mx, mX, mXX
def test_trace(self):
_, X, _, _, _, mX, _ = self._create_data()
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
x, _, _, _, mx, _, _ = self._create_data()
clipped = mx.clip(2, 8)
assert_(eq(clipped.mask, mx.mask))
assert_(eq(clipped._data, x.clip(2, 8)))
assert_(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
_, X, _, m, mx, mX, _ = self._create_data()
n, m = X.shape
# print(type(mx), mx.compressed())
# raise Exception()
assert_equal(mx.ptp(), np.ptp(mx.compressed()))
rows = np.zeros(n, np.float64)
cols = np.zeros(m, np.float64)
for k in range(m):
cols[k] = np.ptp(mX[:, k].compressed())
for k in range(n):
rows[k] = np.ptp(mX[k].compressed())
assert_(eq(mX.ptp(0), cols))
assert_(eq(mX.ptp(1), rows))
def test_swapaxes(self):
_, _, _, _, _, mX, mXX = self._create_data()
mXswapped = mX.swapaxes(0, 1)
assert_(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
mX = self._create_data()[5]
mXcp = mX.cumprod(0)
assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
mX = self._create_data()[5]
mXcp = mX.cumsum(0)
assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
_, X, XX, _, _, mX, mXX = self._create_data()
assert_(eq(mX.var(axis=None), mX.compressed().var()))
assert_(eq(mX.std(axis=None), mX.compressed().std()))
assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
assert_(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
assert_(eq(mXvar1[k], mX[k].compressed().var()))
assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
assert_(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
def eqmask(m1, m2):
if m1 is nomask:
return m2 is nomask
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
| TestArrayMethods |
python | kamyu104__LeetCode-Solutions | Python/print-binary-tree.py | {
"start": 41,
"end": 999
} | class ____(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
def getWidth(root):
if not root:
return 0
return 2 * max(getWidth(root.left), getWidth(root.right)) + 1
def getHeight(root):
if not root:
return 0
return max(getHeight(root.left), getHeight(root.right)) + 1
def preorderTraversal(root, level, left, right, result):
if not root:
return
mid = left + (right-left)/2
result[level][mid] = str(root.val)
preorderTraversal(root.left, level+1, left, mid-1, result)
preorderTraversal(root.right, level+1, mid+1, right, result)
h, w = getHeight(root), getWidth(root)
result = [[""] * w for _ in xrange(h)]
preorderTraversal(root, 0, 0, w-1, result)
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 22161,
"end": 22689
} | class ____(StateMigration):
"""
Migrates global state to include use_global_cursor key. Previously legacy GlobalSubstreamCursor was used.
"""
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
return stream_state and not stream_state.get("use_global_cursor")
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
stream_state["use_global_cursor"] = True
return stream_state
@dataclass(repr=False, eq=False, frozen=True)
| GoogleAdsGlobalStateMigration |
python | huggingface__transformers | src/transformers/pipelines/audio_classification.py | {
"start": 2039,
"end": 11074
} | class ____(Pipeline):
"""
Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
formats.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
>>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
[{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"audio-classification"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
"""
_load_processor = False
_load_image_processor = False
_load_feature_extractor = True
_load_tokenizer = False
def __init__(self, *args, **kwargs):
# Only set default top_k if explicitly provided
if "top_k" in kwargs and kwargs["top_k"] is None:
kwargs["top_k"] = None
elif "top_k" not in kwargs:
kwargs["top_k"] = 5
super().__init__(*args, **kwargs)
self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)
def __call__(self, inputs: np.ndarray | bytes | str | dict, **kwargs: Any) -> list[dict[str, Any]]:
"""
Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
information.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
"raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
`"array"` is used to denote the raw audio waveform.
top_k (`int`, *optional*, defaults to None):
The number of top labels that will be returned by the pipeline. If the provided number is `None` or
higher than the number of labels available in the model configuration, it will default to the number of
labels.
function_to_apply(`str`, *optional*, defaults to "softmax"):
The function to apply to the model output. By default, the pipeline will apply the softmax function to
the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
post-processing.
Return:
A list of `dict` with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
"""
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(self, top_k=None, function_to_apply=None, **kwargs):
postprocess_params = {}
# If top_k is None, use all labels
if top_k is None:
postprocess_params["top_k"] = self.model.config.num_labels
else:
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
postprocess_params["top_k"] = top_k
if function_to_apply is not None:
if function_to_apply not in ["softmax", "sigmoid", "none"]:
raise ValueError(
f"Invalid value for `function_to_apply`: {function_to_apply}. "
"Valid options are ['softmax', 'sigmoid', 'none']"
)
postprocess_params["function_to_apply"] = function_to_apply
else:
postprocess_params["function_to_apply"] = "softmax"
return {}, {}, postprocess_params
def preprocess(self, inputs):
if isinstance(inputs, str):
if inputs.startswith("http://") or inputs.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
inputs = httpx.get(inputs, follow_redirects=True).content
else:
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
if is_torch_available():
import torch
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu().numpy()
if is_torchcodec_available():
import torch
import torchcodec
if isinstance(inputs, torchcodec.decoders.AudioDecoder):
_audio_samples = inputs.get_all_samples()
_array = _audio_samples.data
inputs = {"array": _array, "sampling_rate": _audio_samples.sample_rate}
if isinstance(inputs, dict):
inputs = inputs.copy() # So we don't mutate the original dictionary outside the pipeline
# Accepting `"array"` which is the key defined in `datasets` for
# better integration
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
raise ValueError(
"When passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "
'"raw" key containing the numpy array or torch tensor representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
)
_inputs = inputs.pop("raw", None)
if _inputs is None:
# Remove path which will not be used from `datasets`.
inputs.pop("path", None)
_inputs = inputs.pop("array", None)
in_sampling_rate = inputs.pop("sampling_rate")
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
import torch
if is_torchaudio_available():
from torchaudio import functional as F
else:
raise ImportError(
"torchaudio is required to resample audio samples in AudioClassificationPipeline. "
"The torchaudio package can be installed through: `pip install torchaudio`."
)
inputs = F.resample(
torch.from_numpy(inputs) if isinstance(inputs, np.ndarray) else inputs,
in_sampling_rate,
self.feature_extractor.sampling_rate,
).numpy()
if not isinstance(inputs, np.ndarray):
raise TypeError("We expect a numpy ndarray or torch tensor as input")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")
processed = self.feature_extractor(
inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
)
if self.dtype is not None:
processed = processed.to(dtype=self.dtype)
return processed
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"):
if function_to_apply == "softmax":
probs = model_outputs.logits[0].softmax(-1)
elif function_to_apply == "sigmoid":
probs = model_outputs.logits[0].sigmoid()
else:
probs = model_outputs.logits[0]
scores, ids = probs.topk(top_k)
scores = scores.tolist()
ids = ids.tolist()
labels = [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
return labels
| AudioClassificationPipeline |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_forms.py | {
"start": 297,
"end": 1302
} | class ____(LiveWhooshRoundTripTestCase):
fixtures = ["base_data"]
def setUp(self):
self.old_spelling_setting = settings.HAYSTACK_CONNECTIONS["whoosh"].get(
"INCLUDE_SPELLING", False
)
settings.HAYSTACK_CONNECTIONS["whoosh"]["INCLUDE_SPELLING"] = True
super().setUp()
def tearDown(self):
settings.HAYSTACK_CONNECTIONS["whoosh"][
"INCLUDE_SPELLING"
] = self.old_spelling_setting
super().tearDown()
def test_form_suggestion(self):
form = SearchForm({"q": "exampl"}, searchqueryset=SearchQuerySet("whoosh"))
self.assertEqual(form.get_suggestion(), "example")
def test_view_suggestion(self):
view = SearchView(
template="test_suggestion.html", searchqueryset=SearchQuerySet("whoosh")
)
mock = HttpRequest()
mock.GET["q"] = "exampl"
resp = view(mock)
self.assertEqual(resp.content, b"Suggestion: example\n")
| SpellingSuggestionTestCase |
python | pypa__pipenv | pipenv/patched/pip/_vendor/typing_extensions.py | {
"start": 59103,
"end": 75938
} | class ____(type):
def __instancecheck__(cls, __instance: Any) -> bool:
return isinstance(__instance, cls._backported_typevarlike)
if _PEP_696_IMPLEMENTED:
from typing import TypeVar
else:
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(metaclass=_TypeVarLikeMeta):
"""Type variable."""
_backported_typevarlike = typing.TypeVar
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=NoDefault, infer_variance=False):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant,
infer_variance=infer_variance)
else:
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant)
if infer_variance and (covariant or contravariant):
raise ValueError("Variance cannot be specified with infer_variance.")
typevar.__infer_variance__ = infer_variance
_set_default(typevar, default)
_set_module(typevar)
def _tvar_prepare_subst(alias, args):
if (
typevar.has_default()
and alias.__parameters__.index(typevar) == len(args)
):
args += (typevar.__default__,)
return args
typevar.__typing_prepare_subst__ = _tvar_prepare_subst
return typevar
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type")
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.8-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
if _PEP_696_IMPLEMENTED:
from typing import ParamSpec
# 3.10+
elif hasattr(typing, 'ParamSpec'):
# Add default parameter - PEP 696
class ParamSpec(metaclass=_TypeVarLikeMeta):
"""Parameter specification."""
_backported_typevarlike = typing.ParamSpec
def __new__(cls, name, *, bound=None,
covariant=False, contravariant=False,
infer_variance=False, default=NoDefault):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented, can pass infer_variance to typing.TypeVar
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant,
infer_variance=infer_variance)
else:
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant)
paramspec.__infer_variance__ = infer_variance
_set_default(paramspec, default)
_set_module(paramspec)
def _paramspec_prepare_subst(alias, args):
params = alias.__parameters__
i = params.index(paramspec)
if i == len(args) and paramspec.has_default():
args = [*args, paramspec.__default__]
if i >= len(args):
raise TypeError(f"Too few arguments for {alias}")
# Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612.
if len(params) == 1 and not typing._is_param_expr(args[0]):
assert i == 0
args = (args,)
# Convert lists to tuples to help other libraries cache the results.
elif isinstance(args[i], list):
args = (*args[:i], tuple(args[i]), *args[i + 1:])
return args
paramspec.__typing_prepare_subst__ = _paramspec_prepare_subst
return paramspec
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type")
# 3.8-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
infer_variance=False, default=NoDefault):
list.__init__(self, [self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
self.__infer_variance__ = bool(infer_variance)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__infer_variance__:
prefix = ''
elif self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.8-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
# 3.9.0-1
if not hasattr(typing, '_type_convert'):
def _type_convert(arg, module=None, *, allow_special_forms=False):
"""For converting None to type(None), and strings to ForwardRef."""
if arg is None:
return type(None)
if isinstance(arg, str):
if sys.version_info <= (3, 9, 6):
return ForwardRef(arg)
if sys.version_info <= (3, 9, 7):
return ForwardRef(arg, module=module)
return ForwardRef(arg, module=module, is_class=allow_special_forms)
return arg
else:
_type_convert = typing._type_convert
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.8; needed for typing._subst_tvars
# 3.9 used by __getitem__ below
def copy_with(self, params):
if isinstance(params[-1], _ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
elif (not (params[-1] is ... or isinstance(params[-1], ParamSpec))):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
return self.__class__(self.__origin__, params)
# 3.9; accessed during GenericAlias.__getitem__ when substituting
def __getitem__(self, args):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not self.__parameters__:
raise TypeError(f"{self} is not a generic class")
if not isinstance(args, tuple):
args = (args,)
args = _unpack_args(*(_type_convert(p) for p in args))
params = self.__parameters__
for param in params:
prepare = getattr(param, "__typing_prepare_subst__", None)
if prepare is not None:
args = prepare(self, args)
# 3.8 - 3.9 & typing.ParamSpec
elif isinstance(param, ParamSpec):
i = params.index(param)
if (
i == len(args)
and getattr(param, '__default__', NoDefault) is not NoDefault
):
args = [*args, param.__default__]
if i >= len(args):
raise TypeError(f"Too few arguments for {self}")
# Special case for Z[[int, str, bool]] == Z[int, str, bool]
if len(params) == 1 and not _is_param_expr(args[0]):
assert i == 0
args = (args,)
elif (
isinstance(args[i], list)
# 3.8 - 3.9
# This class inherits from list do not convert
and not isinstance(args[i], _ConcatenateGenericAlias)
):
args = (*args[:i], tuple(args[i]), *args[i + 1:])
alen = len(args)
plen = len(params)
if alen != plen:
raise TypeError(
f"Too {'many' if alen > plen else 'few'} arguments for {self};"
f" actual {alen}, expected {plen}"
)
subst = dict(zip(self.__parameters__, args))
# determine new args
new_args = []
for arg in self.__args__:
if isinstance(arg, type):
new_args.append(arg)
continue
if isinstance(arg, TypeVar):
arg = subst[arg]
if (
(isinstance(arg, typing._GenericAlias) and _is_unpack(arg))
or (
hasattr(_types, "GenericAlias")
and isinstance(arg, _types.GenericAlias)
and getattr(arg, "__unpacked__", False)
)
):
raise TypeError(f"{arg} is not valid as type argument")
elif isinstance(arg,
typing._GenericAlias
if not hasattr(_types, "GenericAlias") else
(typing._GenericAlias, _types.GenericAlias)
):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
new_args.append(arg)
return self.copy_with(tuple(new_args))
# 3.10+
else:
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias
# 3.10
if sys.version_info < (3, 11):
class _ConcatenateGenericAlias(typing._ConcatenateGenericAlias, _root=True):
# needed for checks in collections.abc.Callable to accept this class
__module__ = "typing"
def copy_with(self, params):
if isinstance(params[-1], (list, tuple)):
return (*params[:-1], *params[-1])
if isinstance(params[-1], typing._ConcatenateGenericAlias):
params = (*params[:-1], *params[-1].__args__)
elif not (params[-1] is ... or isinstance(params[-1], ParamSpec)):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
return super(typing._ConcatenateGenericAlias, self).copy_with(params)
def __getitem__(self, args):
value = super().__getitem__(args)
if isinstance(value, tuple) and any(_is_unpack(t) for t in value):
return tuple(_unpack_args(*(n for n in value)))
return value
# 3.8-3.9.2
| _TypeVarLikeMeta |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 853,
"end": 1083
} | class ____(str, Enum):
"""The status of a backup."""
STARTED = "STARTED"
TRANSFERRING = "TRANSFERRING"
TRANSFERRED = "TRANSFERRED"
SUCCESS = "SUCCESS"
FAILED = "FAILED"
CANCELED = "CANCELED"
| BackupStatus |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 85065,
"end": 86778
} | class ____(PerceiverAbstractDecoder):
"""
Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config, **decoder_kwargs):
super().__init__()
self.num_labels = config.num_labels
self.decoder = PerceiverBasicDecoder(
config,
output_num_channels=self.num_labels,
output_index_dims=1, # Predict a single logit array.
**decoder_kwargs,
)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points
)
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
# B x 1 x num_classes -> B x num_classes
logits = decoder_outputs.logits[:, 0, :]
return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
| PerceiverClassificationDecoder |
python | doocs__leetcode | solution/2200-2299/2210.Count Hills and Valleys in an Array/Solution.py | {
"start": 0,
"end": 404
} | class ____:
def countHillValley(self, nums: List[int]) -> int:
ans = j = 0
for i in range(1, len(nums) - 1):
if nums[i] == nums[i + 1]:
continue
if nums[i] > nums[j] and nums[i] > nums[i + 1]:
ans += 1
if nums[i] < nums[j] and nums[i] < nums[i + 1]:
ans += 1
j = i
return ans
| Solution |
python | facebook__pyre-check | client/commands/servers.py | {
"start": 3878,
"end": 8761
} | class ____:
running: List[RunningServerStatus] = dataclasses.field(default_factory=list)
defunct: List[DefunctServerStatus] = dataclasses.field(default_factory=list)
def to_json(self) -> List[Dict[str, Any]]:
return [status.to_json() for status in self.running] + [
status.to_json() for status in self.defunct
]
def _get_server_status(
socket_path: Path,
flavor: identifiers.PyreFlavor,
) -> Union[RunningServerStatus, DefunctServerStatus]:
try:
with connections.connect(socket_path) as (
input_channel,
output_channel,
):
output_channel.write('["GetInfo"]\n')
return RunningServerStatus.from_server_response(
input_channel.readline(), flavor
)
except connections.ConnectionFailure:
return DefunctServerStatus(str(socket_path))
def _print_running_server_status(running_status: Sequence[RunningServerStatus]) -> None:
if len(running_status) == 0:
log.stdout.write("No server is currently running.\n")
else:
log.stdout.write("Running Servers:\n\n")
log.stdout.write(
tabulate.tabulate(
[
[
status.pid,
status.global_root,
status.relative_local_root or "",
status.version,
status.flavor,
]
for status in running_status
],
headers=[
"PID",
"Global Root",
"Relative Local Root",
"Version",
"Flavor",
],
),
)
log.stdout.write("\n")
log.stdout.write("\n")
def _print_defunct_server_status(defunct_status: Sequence[DefunctServerStatus]) -> None:
defunct_count = len(defunct_status)
if defunct_count > 0:
plurality = "" if defunct_count == 1 else "s"
log.stdout.write(f"Found {defunct_count} defunct server{plurality} at:\n")
for status in defunct_status:
log.stdout.write(f" {status.socket_path}\n")
log.stdout.write("\n")
def _print_server_status_json(server_status: AllServerStatus) -> None:
log.stdout.write(json.dumps(server_status.to_json()))
log.stdout.write("\n")
def _print_server_status(server_status: AllServerStatus, output_format: str) -> None:
if output_format == command_arguments.TEXT:
_print_running_server_status(server_status.running)
_print_defunct_server_status(server_status.defunct)
elif output_format == command_arguments.JSON:
_print_server_status_json(server_status)
def _stop_server(socket_path: Path, flavor: identifiers.PyreFlavor) -> None:
try:
LOG.info(f"Stopping server at `{socket_path}...`")
stop.stop_server(socket_path, flavor)
LOG.info(f"Successfully stopped `{socket_path}.`")
except connections.ConnectionFailure:
LOG.info(f"Failed to connect to `{socket_path}`. Removing it...")
stop.remove_socket_if_exists(socket_path)
except Exception as error:
LOG.warning(
f"Exception occurred when trying to stop server at `{socket_path}`: {error}"
)
def _find_server_flavor(socket_path: Path) -> identifiers.PyreFlavor:
# Socket paths are of the form `/tmp/pyre_{md5}[__{flavor}].sock`.
serialized_path = str(socket_path)
for flavor in identifiers.PyreFlavor:
if flavor.value in serialized_path:
return flavor
# No suffix indicates a classic server.
return identifiers.PyreFlavor.CLASSIC
def find_all_servers(socket_paths: Iterable[Path]) -> AllServerStatus:
running_servers = []
defunct_servers = []
for socket_path in socket_paths:
flavor = _find_server_flavor(socket_path)
status = _get_server_status(socket_path, flavor)
if isinstance(status, RunningServerStatus):
running_servers.append(status)
else:
defunct_servers.append(status)
return AllServerStatus(running_servers, defunct_servers)
def find_all_servers_under(socket_root: Path) -> AllServerStatus:
return find_all_servers(daemon_socket.find_socket_files(socket_root))
def run_list(output_format: str) -> commands.ExitCode:
server_status = find_all_servers_under(daemon_socket.get_default_socket_root())
_print_server_status(server_status, output_format)
return commands.ExitCode.SUCCESS
def run_stop() -> commands.ExitCode:
for socket_path in daemon_socket.find_socket_files(
daemon_socket.get_default_socket_root()
):
flavor = _find_server_flavor(socket_path)
_stop_server(socket_path, flavor)
LOG.info("Done\n")
return commands.ExitCode.SUCCESS
| AllServerStatus |
python | PrefectHQ__prefect | tests/server/models/test_variables.py | {
"start": 3085,
"end": 3421
} | class ____:
async def test_read_variable(
self,
session,
variable,
):
model = await read_variable(session, variable.id) # type: ignore
assert model
assert model.id == variable.id
assert model.name == variable.name
assert model.tags == variable.tags
| TestReadVariable |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 63737,
"end": 64290
} | class ____(parser_test_base.ParserTestBase):
def test_canonical_version(self):
src = textwrap.dedent("""
from typing import Any
def foo(x: int = 0) -> Any: ...
def foo(x: str) -> Any: ...
""")
expected = textwrap.dedent("""
from typing import Any, overload
@overload
def foo(x: int = ...) -> Any: ...
@overload
def foo(x: str) -> Any: ...
""").strip()
self.assertMultiLineEqual(
parser.canonical_pyi(src, options=self.options), expected
)
| CanonicalPyiTest |
python | neetcode-gh__leetcode | python/0567-permutation-in-string.py | {
"start": 0,
"end": 1049
} | class ____:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s1) > len(s2):
return False
s1Count, s2Count = [0] * 26, [0] * 26
for i in range(len(s1)):
s1Count[ord(s1[i]) - ord("a")] += 1
s2Count[ord(s2[i]) - ord("a")] += 1
matches = 0
for i in range(26):
matches += 1 if s1Count[i] == s2Count[i] else 0
l = 0
for r in range(len(s1), len(s2)):
if matches == 26:
return True
index = ord(s2[r]) - ord("a")
s2Count[index] += 1
if s1Count[index] == s2Count[index]:
matches += 1
elif s1Count[index] + 1 == s2Count[index]:
matches -= 1
index = ord(s2[l]) - ord("a")
s2Count[index] -= 1
if s1Count[index] == s2Count[index]:
matches += 1
elif s1Count[index] - 1 == s2Count[index]:
matches -= 1
l += 1
return matches == 26
| Solution |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 46788,
"end": 61687
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.SparseCategoricalCrossentropy(name="scce")
)
def test_all_correct_unweighted(self):
y_true = np.array([[0], [1], [2]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.0, 3)
# Test with logits.
logits = np.array(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([0, 1, 2])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.3239, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.7449, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.1317, 3)
def test_sample_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = np.array([[0], [1], [2]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
sample_weight = np.array([[1.2], [3.4], [5.6]]).reshape((3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 1.0696, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.31829, 3)
def test_no_reduction(self):
y_true = np.array([[0], [1], [2]])
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), loss)
def test_ignore_class(self):
y_true = np.array([[-1, 2]])
logits = np.array([[[0.854, 0.698, 0.598], [0.088, 0.86, 0.018]]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, ignore_class=-1, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose([[0.0, 1.480129]], loss)
y_true = np.array([[[-1], [2]]])
logits = np.array([[[0.854, 0.698, 0.598], [0.088, 0.86, 0.018]]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, ignore_class=-1, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose([[0.0, 1.480129]], loss)
def test_binary_segmentation(self):
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
]
)
output = losses.SparseCategoricalCrossentropy()(y_true, y_pred)
self.assertAllClose(output, 0.0)
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.2, 0.8]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.6, 0.4]],
]
)
expected = np.array([-np.log(0.2), -np.log(0.4)])
output = losses.SparseCategoricalCrossentropy()(y_true, y_pred)
self.assertAllClose(output, expected.sum() / 16.0) # 16 pixels
def test_binary_segmentation_different_axis(self):
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
]
)
y_pred_reshaped = np.moveaxis(y_pred, source=2, destination=0)
if backend.backend() == "tensorflow":
expected_message = (
"Only axis=-1 is currently supported. Received: axis=0"
)
escaped_message = re.escape(expected_message)
with pytest.raises(ValueError, match=escaped_message):
losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
elif backend.backend() == "jax":
expected_message = (
"Arguments `target` and `output` "
"must have the same shape up until"
" the last dimension: target.shape=(4, 4),"
" output.shape=(2, 4, 4)"
)
escaped_message = re.escape(expected_message)
with pytest.raises(ValueError, match=escaped_message):
losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
elif backend.backend() == "torch":
output = losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, 0.0)
if backend.backend() == "torch":
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.2, 0.8]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.6, 0.4]],
]
)
y_pred_reshaped = np.moveaxis(y_pred, source=2, destination=0)
expected = np.array([-np.log(0.2), -np.log(0.4)])
output = losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, expected.sum() / 16.0)
y_true = np.array([y_true, y_true, y_true])
y_pred_reshaped = np.array(
[y_pred_reshaped, y_pred_reshaped, y_pred_reshaped]
)
output = losses.SparseCategoricalCrossentropy(axis=1)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, expected.sum() / 16.0)
def test_multi_class_segmentation(self):
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
],
]
)
output = losses.SparseCategoricalCrossentropy()(y_true, y_pred)
self.assertAllClose(output, 0.0)
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.2, 0.0, 0.8],
],
[
[0.7, 0.3, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 1.0, 0.0],
],
]
)
expected = np.array(
[
-np.log(0.2),
-np.log(0.3),
-np.log(0.5),
]
)
output = losses.SparseCategoricalCrossentropy()(y_true, y_pred)
self.assertAllClose(output, expected.sum() / 16.0) # 16 pixels
def test_multi_class_segmentation_different_axis(self):
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
],
]
)
y_pred_reshaped = np.moveaxis(y_pred, source=2, destination=0)
if backend.backend() == "tensorflow":
expected_message = (
"Only axis=-1 is currently supported. Received: axis=0"
)
escaped_message = re.escape(expected_message)
with pytest.raises(ValueError, match=escaped_message):
losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
elif backend.backend() == "jax":
expected_message = (
"Arguments `target` and `output` "
"must have the same shape up until"
" the last dimension: target.shape=(4, 4),"
" output.shape=(3, 4, 4)"
)
escaped_message = re.escape(expected_message)
with pytest.raises(ValueError, match=escaped_message):
losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
elif backend.backend() == "torch":
output = losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, 0.0)
if backend.backend() == "torch":
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.2, 0.0, 0.8],
],
[
[0.7, 0.3, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 1.0, 0.0],
],
]
)
expected = np.array(
[
-np.log(0.2),
-np.log(0.3),
-np.log(0.5),
]
)
y_pred_reshaped = np.moveaxis(y_pred, source=2, destination=0)
output = losses.SparseCategoricalCrossentropy(axis=0)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, expected.sum() / 16.0)
y_true = np.array([y_true, y_true, y_true])
y_pred_reshaped = np.array(
[y_pred_reshaped, y_pred_reshaped, y_pred_reshaped]
)
output = losses.SparseCategoricalCrossentropy(axis=1)(
y_true, y_pred_reshaped
)
self.assertAllClose(output, expected.sum() / 16.0)
def test_dtype_arg(self):
y_true = np.array([[0], [1], [2]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.SparseCategoricalCrossentropy(dtype="bfloat16")
loss = cce_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
| SparseCategoricalCrossentropyTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 7600,
"end": 14687
} | class ____(BaseComponent):
"""
Base node Object.
Generic abstract interface for retrievable nodes
"""
# hash is computed on local field, during the validation process
model_config = ConfigDict(populate_by_name=True, validate_assignment=True)
id_: str = Field(
default_factory=lambda: str(uuid.uuid4()), description="Unique ID of the node."
)
embedding: Optional[List[float]] = Field(
default=None, description="Embedding of the node."
)
""""
metadata fields
- injected as part of the text shown to LLMs as context
- injected as part of the text for generating embeddings
- used by vector DBs for metadata filtering
"""
metadata: Dict[str, Any] = Field(
default_factory=dict,
description="A flat dictionary of metadata fields",
alias="extra_info",
)
excluded_embed_metadata_keys: List[str] = Field(
default_factory=list,
description="Metadata keys that are excluded from text for the embed model.",
)
excluded_llm_metadata_keys: List[str] = Field(
default_factory=list,
description="Metadata keys that are excluded from text for the LLM.",
)
relationships: Dict[
Annotated[NodeRelationship, EnumNameSerializer],
RelatedNodeType,
] = Field(
default_factory=dict,
description="A mapping of relationships to other node information.",
)
metadata_template: str = Field(
default=DEFAULT_METADATA_TMPL,
description=(
"Template for how metadata is formatted, with {key} and "
"{value} placeholders."
),
)
metadata_separator: str = Field(
default="\n",
description="Separator between metadata fields when converting to string.",
alias="metadata_seperator",
)
@classmethod
@abstractmethod
def get_type(cls) -> str:
"""Get Object type."""
@abstractmethod
def get_content(self, metadata_mode: MetadataMode = MetadataMode.ALL) -> str:
"""Get object content."""
def get_metadata_str(self, mode: MetadataMode = MetadataMode.ALL) -> str:
"""Metadata info string."""
if mode == MetadataMode.NONE:
return ""
usable_metadata_keys = set(self.metadata.keys())
if mode == MetadataMode.LLM:
for key in self.excluded_llm_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
elif mode == MetadataMode.EMBED:
for key in self.excluded_embed_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
return self.metadata_separator.join(
[
self.metadata_template.format(key=key, value=str(value))
for key, value in self.metadata.items()
if key in usable_metadata_keys
]
)
@abstractmethod
def set_content(self, value: Any) -> None:
"""Set the content of the node."""
@property
@abstractmethod
def hash(self) -> str:
"""Get hash of node."""
@property
def node_id(self) -> str:
return self.id_
@node_id.setter
def node_id(self, value: str) -> None:
self.id_ = value
@property
def source_node(self) -> Optional[RelatedNodeInfo]:
"""
Source object node.
Extracted from the relationships field.
"""
if NodeRelationship.SOURCE not in self.relationships:
return None
relation = self.relationships[NodeRelationship.SOURCE]
if isinstance(relation, list):
raise ValueError("Source object must be a single RelatedNodeInfo object")
return relation
@property
def prev_node(self) -> Optional[RelatedNodeInfo]:
"""Prev node."""
if NodeRelationship.PREVIOUS not in self.relationships:
return None
relation = self.relationships[NodeRelationship.PREVIOUS]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Previous object must be a single RelatedNodeInfo object")
return relation
@property
def next_node(self) -> Optional[RelatedNodeInfo]:
"""Next node."""
if NodeRelationship.NEXT not in self.relationships:
return None
relation = self.relationships[NodeRelationship.NEXT]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Next object must be a single RelatedNodeInfo object")
return relation
@property
def parent_node(self) -> Optional[RelatedNodeInfo]:
"""Parent node."""
if NodeRelationship.PARENT not in self.relationships:
return None
relation = self.relationships[NodeRelationship.PARENT]
if not isinstance(relation, RelatedNodeInfo):
raise ValueError("Parent object must be a single RelatedNodeInfo object")
return relation
@property
def child_nodes(self) -> Optional[List[RelatedNodeInfo]]:
"""Child nodes."""
if NodeRelationship.CHILD not in self.relationships:
return None
relation = self.relationships[NodeRelationship.CHILD]
if not isinstance(relation, list):
raise ValueError("Child objects must be a list of RelatedNodeInfo objects.")
return relation
@property
def ref_doc_id(self) -> Optional[str]: # pragma: no cover
"""Deprecated: Get ref doc id."""
source_node = self.source_node
if source_node is None:
return None
return source_node.node_id
@property
@deprecated(
version="0.12.2",
reason="'extra_info' is deprecated, use 'metadata' instead.",
)
def extra_info(self) -> dict[str, Any]: # pragma: no coverde
return self.metadata
@extra_info.setter
@deprecated(
version="0.12.2",
reason="'extra_info' is deprecated, use 'metadata' instead.",
)
def extra_info(self, extra_info: dict[str, Any]) -> None: # pragma: no coverde
self.metadata = extra_info
def __str__(self) -> str:
source_text_truncated = truncate_text(
self.get_content().strip(), TRUNCATE_LENGTH
)
source_text_wrapped = textwrap.fill(
f"Text: {source_text_truncated}\n", width=WRAP_WIDTH
)
return f"Node ID: {self.node_id}\n{source_text_wrapped}"
def get_embedding(self) -> List[float]:
"""
Get embedding.
Errors if embedding is None.
"""
if self.embedding is None:
raise ValueError("embedding not set.")
return self.embedding
def as_related_node_info(self) -> RelatedNodeInfo:
"""Get node as RelatedNodeInfo."""
return RelatedNodeInfo(
node_id=self.node_id,
node_type=self.get_type(),
metadata=self.metadata,
hash=self.hash,
)
EmbeddingKind = Literal["sparse", "dense"]
| BaseNode |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/_tags.py | {
"start": 197,
"end": 468
} | class ____(_datatag.AnsibleDatatagBase):
msg: str
help_text: t.Optional[str] = None
date: t.Optional[str] = None
version: t.Optional[str] = None
deprecator: t.Optional[_messages.PluginInfo] = None
formatted_traceback: t.Optional[str] = None
| Deprecated |
python | Pylons__pyramid | tests/test_httpexceptions.py | {
"start": 17885,
"end": 17928
} | class ____:
exception = None
| DummyRequest |
python | tensorflow__tensorflow | tensorflow/python/training/supervisor_test.py | {
"start": 2387,
"end": 36181
} | class ____(test.TestCase):
def _test_dir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if checkpoint_management.checkpoint_exists(pattern):
return
else:
if len(gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = self._test_dir("basics")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in range(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._test_dir("managed_session")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(""):
for _ in range(10):
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._test_dir("managed_user_error")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegex(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in range(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._test_dir("managed_out_of_range")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in range(10):
last_step = step
if step == 3:
raise errors_impl.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
self.evaluate(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._test_dir("managed_not_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEqual(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = summary_iterator.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEqual(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = self._test_dir("managed_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegex(RuntimeError, "fail at step 3"):
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._test_dir("session_config")
with ops.Graph().as_default():
with ops.device("/cpu:1"):
my_op = constant_op.constant([1.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=config_pb2.ConfigProto(device_count={"CPU": 2}))
for _ in range(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._test_dir("can_write")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEqual(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegex(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._test_dir("explicit_no_summary_writer")
with ops.Graph().as_default():
variable_v1.VariableV1([1.0], name="foo")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegex(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._test_dir("explicit_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sw = writer.FileWriter(logdir)
sv = supervisor.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEqual(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with ops.Graph().as_default():
variable_v1.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with ops.Graph().as_default():
variable_v1.VariableV1([1.0, 2.0, 3.0])
sm = session_manager_lib.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = supervisor.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testInitOp(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variable_v1.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testInitFn(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variable_v1.VariableV1([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = supervisor.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testInitOpWithFeedDict(self):
logdir = self._test_dir("feed_dict_init_op")
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variable_v1.VariableV1(p, name="v")
sv = supervisor.Supervisor(
logdir=logdir,
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testReadyForLocalInitOp(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:localhost"):
v = variable_v1.VariableV1(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = variable_v1.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variable_v1.VariableV1(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:localhost"):
v = variable_v1.VariableV1(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variable_v1.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = self._test_dir("default_local_init_op")
with ops.Graph().as_default():
# A local variable.
v = variable_v1.VariableV1([1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = variable_v1.VariableV1([4, 5, 6], trainable=False, collections=[])
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEqual(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._test_dir("default_local_init_op_non_chief")
with ops.Graph().as_default():
with ops.device("/job:localhost"):
# A local variable.
v = variable_v1.VariableV1([1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEqual(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails")
with ops.Graph().as_default():
v = variable_v1.VariableV1([1.0, 2.0, 3.0], name="v")
variable_v1.VariableV1([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegex(RuntimeError, "Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails_for_local_variable")
with ops.Graph().as_default():
v = variable_v1.VariableV1([1.0, 2.0, 3.0],
name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
variable_v1.VariableV1([1.0, 2.0, 3.0],
name="w",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegex(RuntimeError, "Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testSetupFail(self):
logdir = self._test_dir("setup_fail")
with ops.Graph().as_default():
variable_v1.VariableV1([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegex(ValueError, "must have their device set"):
supervisor.Supervisor(logdir=logdir, is_chief=False)
with ops.Graph().as_default(), ops.device("/job:ps"):
variable_v1.VariableV1([1.0, 2.0, 3.0], name="v")
supervisor.Supervisor(logdir=logdir, is_chief=False)
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testDefaultGlobalStep(self):
logdir = self._test_dir("default_global_step")
with ops.Graph().as_default():
variable_v1.VariableV1(287, name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEqual(287, sess.run(sv.global_step))
sv.stop()
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testRestoreFromMetaGraph(self):
logdir = self._test_dir("restore_from_meta_graph")
with ops.Graph().as_default():
variable_v1.VariableV1(1, name="v0")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with ops.Graph().as_default():
new_saver = saver_lib.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = supervisor.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEqual(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testStandardServicesWithoutGlobalStep(self):
logdir = self._test_dir("standard_services_without_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variable_v1.VariableV1([1.0], name="foo")
summary.scalar("v", v[0])
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEqual(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variable_v1.VariableV1([10.10], name="foo")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, self.evaluate(v)[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
@test_util.run_v1_only("train.Supervisor is for v1 only")
def testStandardServicesWithGlobalStep(self):
logdir = self._test_dir("standard_services_with_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variable_v1.VariableV1([123], name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEqual("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEqual(123, ev.step)
self.assertEqual(event_pb2.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", first.summary)
self.assertEqual(123, second.step)
self.assertEqual(event_pb2.SessionLog.CHECKPOINT,
second.session_log.status)
else:
self.assertEqual(123, first.step)
self.assertEqual(event_pb2.SessionLog.CHECKPOINT,
first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", second.summary)
ev = next(rr)
self.assertEqual(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variable_v1.VariableV1([-12], name="global_step")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, self.evaluate(v)[0])
def testNoQueueRunners(self):
with ops.Graph().as_default(), self.cached_session() as sess:
sv = supervisor.Supervisor(logdir=self._test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._test_dir("prepare_after_stop_chief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._test_dir("prepare_after_stop_nonchief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
test.main()
| SupervisorTest |
python | viewflow__viewflow | tests/workflow/test_flow_viewset__workflow.py | {
"start": 2072,
"end": 2226
} | class ____(Flow):
process_class = TestWorkflowViewestProcess
start = flow.StartHandle().Next(this.end)
end = flow.End()
| TestWorkflowViewestFlow |
python | django-compressor__django-compressor | compressor/tests/test_mtime_cache.py | {
"start": 147,
"end": 1382
} | class ____(TestCase):
# FIXME: add actual tests, improve the existing ones.
exclusion_patterns = [
"*CACHE*",
"*custom*",
"*066cd253eada.js",
"*d728fc7f9301.js",
"*8a0fed36c317.js",
"test.txt*",
]
def default_ignore(self):
return ["--ignore=%s" % pattern for pattern in self.exclusion_patterns]
def test_handle_no_args(self):
with self.assertRaises(CommandError):
call_command("mtime_cache")
def test_handle_add(self):
out = io.StringIO()
with self.settings(CACHES={}):
call_command("mtime_cache", "--add", *self.default_ignore(), stdout=out)
output = out.getvalue()
self.assertIn("Deleted mtimes of 20 files from the cache.", output)
self.assertIn("Added mtimes of 20 files to cache.", output)
def test_handle_clean(self):
out = io.StringIO()
with self.settings(CACHES={}):
call_command("mtime_cache", "--clean", *self.default_ignore(), stdout=out)
output = out.getvalue()
self.assertIn("Deleted mtimes of 20 files from the cache.", output)
self.assertNotIn("Added mtimes of 20 files to cache.", output)
| TestMtimeCacheCommand |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/attributes.py | {
"start": 4155,
"end": 4548
} | class ____:
attribute: str = ""
def test_no_issue_sanitize():
x = A()
x.attribute = _test_source()
x.attribute = ""
_test_sink(x.attribute)
def sanitize_attribute(x: A) -> None:
x.attribute = ""
def test_no_issue_sanitize_via_call():
x = A()
x.attribute = _test_source()
sanitize_attribute(x)
_test_sink(x.attribute) # TODO(T218153519): False positive
| A |
python | spack__spack | lib/spack/spack/vendor/macholib/MachO.py | {
"start": 5018,
"end": 16685
} | class ____(object):
"""
Provides reading/writing the Mach-O header of a specific existing file.
If allow_unknown_load_commands is True, allows unknown load commands.
Otherwise, raises ValueError if the file contains an unknown load command.
"""
# filename - the original filename of this mach-o
# sizediff - the current deviation from the initial mach-o size
# header - the mach-o header
# commands - a list of (load_command, somecommand, data)
# data is either a str, or a list of segment structures
# total_size - the current mach-o header size (including header)
# low_offset - essentially, the maximum mach-o header size
# id_cmd - the index of my id command, or None
def __init__(
self,
parent,
fh,
offset,
size,
magic,
hdr,
endian,
allow_unknown_load_commands=False,
):
self.MH_MAGIC = magic
self.mach_header = hdr
# These are all initialized by self.load()
self.parent = parent
self.offset = offset
self.size = size
self.endian = endian
self.header = None
self.commands = None
self.id_cmd = None
self.sizediff = None
self.total_size = None
self.low_offset = None
self.filetype = None
self.headers = []
self.allow_unknown_load_commands = allow_unknown_load_commands
self.load(fh)
def __repr__(self):
return "<%s filename=%r offset=%d size=%d endian=%r>" % (
type(self).__name__,
self.parent.filename,
self.offset,
self.size,
self.endian,
)
def load(self, fh):
fh = fileview(fh, self.offset, self.size)
fh.seek(0)
self.sizediff = 0
kw = {"_endian_": self.endian}
header = self.mach_header.from_fileobj(fh, **kw)
self.header = header
# if header.magic != self.MH_MAGIC:
# raise ValueError("header has magic %08x, expecting %08x" % (
# header.magic, self.MH_MAGIC))
cmd = self.commands = []
self.filetype = self.get_filetype_shortname(header.filetype)
read_bytes = 0
low_offset = sys.maxsize
for i in range(header.ncmds):
# read the load command
cmd_load = load_command.from_fileobj(fh, **kw)
# read the specific command
klass = LC_REGISTRY.get(cmd_load.cmd, None)
if klass is None:
if not self.allow_unknown_load_commands:
raise ValueError("Unknown load command: %d" % (cmd_load.cmd,))
# No load command in the registry, so append the load command itself
# instead of trying to deserialize the data after the header.
data_size = cmd_load.cmdsize - sizeof(load_command)
cmd_data = fh.read(data_size)
cmd.append((cmd_load, cmd_load, cmd_data))
read_bytes += cmd_load.cmdsize
continue
cmd_cmd = klass.from_fileobj(fh, **kw)
if cmd_load.cmd == LC_ID_DYLIB:
# remember where this command was
if self.id_cmd is not None:
raise ValueError("This dylib already has an id")
self.id_cmd = i
if cmd_load.cmd in (LC_SEGMENT, LC_SEGMENT_64):
# for segment commands, read the list of segments
segs = []
# assert that the size makes sense
if cmd_load.cmd == LC_SEGMENT:
section_cls = section
else: # LC_SEGMENT_64
section_cls = section_64
expected_size = (
sizeof(klass)
+ sizeof(load_command)
+ (sizeof(section_cls) * cmd_cmd.nsects)
)
if cmd_load.cmdsize != expected_size:
raise ValueError("Segment size mismatch")
# this is a zero block or something
# so the beginning is wherever the fileoff of this command is
if cmd_cmd.nsects == 0:
if cmd_cmd.filesize != 0:
low_offset = min(low_offset, cmd_cmd.fileoff)
else:
# this one has multiple segments
for _j in range(cmd_cmd.nsects):
# read the segment
seg = section_cls.from_fileobj(fh, **kw)
# if the segment has a size and is not zero filled
# then its beginning is the offset of this segment
not_zerofill = (seg.flags & S_ZEROFILL) != S_ZEROFILL
if seg.offset > 0 and seg.size > 0 and not_zerofill:
low_offset = min(low_offset, seg.offset)
if not_zerofill:
c = fh.tell()
fh.seek(seg.offset)
sd = fh.read(seg.size)
seg.add_section_data(sd)
fh.seek(c)
segs.append(seg)
# data is a list of segments
cmd_data = segs
# These are disabled for now because writing back doesn't work
# elif cmd_load.cmd == LC_CODE_SIGNATURE:
# c = fh.tell()
# fh.seek(cmd_cmd.dataoff)
# cmd_data = fh.read(cmd_cmd.datasize)
# fh.seek(c)
# elif cmd_load.cmd == LC_SYMTAB:
# c = fh.tell()
# fh.seek(cmd_cmd.stroff)
# cmd_data = fh.read(cmd_cmd.strsize)
# fh.seek(c)
else:
# data is a raw str
data_size = cmd_load.cmdsize - sizeof(klass) - sizeof(load_command)
cmd_data = fh.read(data_size)
cmd.append((cmd_load, cmd_cmd, cmd_data))
read_bytes += cmd_load.cmdsize
# make sure the header made sense
if read_bytes != header.sizeofcmds:
raise ValueError(
"Read %d bytes, header reports %d bytes"
% (read_bytes, header.sizeofcmds)
)
self.total_size = sizeof(self.mach_header) + read_bytes
self.low_offset = low_offset
def walkRelocatables(self, shouldRelocateCommand=_shouldRelocateCommand):
"""
for all relocatable commands
yield (command_index, command_name, filename)
"""
for (idx, (lc, cmd, data)) in enumerate(self.commands):
if shouldRelocateCommand(lc.cmd):
name = _RELOCATABLE_NAMES[lc.cmd]
ofs = cmd.name - sizeof(lc.__class__) - sizeof(cmd.__class__)
yield idx, name, data[
ofs : data.find(b"\x00", ofs) # noqa: E203
].decode(sys.getfilesystemencoding())
def rewriteInstallNameCommand(self, loadcmd):
"""Rewrite the load command of this dylib"""
if self.id_cmd is not None:
self.rewriteDataForCommand(self.id_cmd, loadcmd)
return True
return False
def changedHeaderSizeBy(self, bytes):
self.sizediff += bytes
if (self.total_size + self.sizediff) > self.low_offset:
print(
"WARNING: Mach-O header in %r may be too large to relocate"
% (self.parent.filename,)
)
def rewriteLoadCommands(self, changefunc):
"""
Rewrite the load commands based upon a change dictionary
"""
data = changefunc(self.parent.filename)
changed = False
if data is not None:
if self.rewriteInstallNameCommand(data.encode(sys.getfilesystemencoding())):
changed = True
for idx, _name, filename in self.walkRelocatables():
data = changefunc(filename)
if data is not None:
if self.rewriteDataForCommand(
idx, data.encode(sys.getfilesystemencoding())
):
changed = True
return changed
def rewriteDataForCommand(self, idx, data):
lc, cmd, old_data = self.commands[idx]
hdrsize = sizeof(lc.__class__) + sizeof(cmd.__class__)
align = struct.calcsize("Q")
data = data + (b"\x00" * (align - (len(data) % align)))
newsize = hdrsize + len(data)
self.commands[idx] = (lc, cmd, data)
self.changedHeaderSizeBy(newsize - lc.cmdsize)
lc.cmdsize, cmd.name = newsize, hdrsize
return True
def synchronize_size(self):
if (self.total_size + self.sizediff) > self.low_offset:
raise ValueError(
(
"New Mach-O header is too large to relocate in %r "
"(new size=%r, max size=%r, delta=%r)"
)
% (
self.parent.filename,
self.total_size + self.sizediff,
self.low_offset,
self.sizediff,
)
)
self.header.sizeofcmds += self.sizediff
self.total_size = sizeof(self.mach_header) + self.header.sizeofcmds
self.sizediff = 0
def write(self, fileobj):
fileobj = fileview(fileobj, self.offset, self.size)
fileobj.seek(0)
# serialize all the mach-o commands
self.synchronize_size()
self.header.to_fileobj(fileobj)
for lc, cmd, data in self.commands:
lc.to_fileobj(fileobj)
cmd.to_fileobj(fileobj)
if sys.version_info[0] == 2:
if isinstance(data, unicode):
fileobj.write(data.encode(sys.getfilesystemencoding()))
elif isinstance(data, (bytes, str)):
fileobj.write(data)
else:
# segments..
for obj in data:
obj.to_fileobj(fileobj)
else:
if isinstance(data, str):
fileobj.write(data.encode(sys.getfilesystemencoding()))
elif isinstance(data, bytes):
fileobj.write(data)
else:
# segments..
for obj in data:
obj.to_fileobj(fileobj)
# zero out the unused space, doubt this is strictly necessary
# and is generally probably already the case
fileobj.write(b"\x00" * (self.low_offset - fileobj.tell()))
def getSymbolTableCommand(self):
for lc, cmd, _data in self.commands:
if lc.cmd == LC_SYMTAB:
return cmd
return None
def getDynamicSymbolTableCommand(self):
for lc, cmd, _data in self.commands:
if lc.cmd == LC_DYSYMTAB:
return cmd
return None
def get_filetype_shortname(self, filetype):
if filetype in MH_FILETYPE_SHORTNAMES:
return MH_FILETYPE_SHORTNAMES[filetype]
else:
return "unknown"
def main(fn):
m = MachO(fn)
seen = set()
for header in m.headers:
for _idx, name, other in header.walkRelocatables():
if other not in seen:
seen.add(other)
print("\t" + name + ": " + other)
if __name__ == "__main__":
import sys
files = sys.argv[1:] or ["/bin/ls"]
for fn in files:
print(fn)
main(fn)
| MachOHeader |
python | openai__openai-python | src/openai/types/responses/response_code_interpreter_call_interpreting_event.py | {
"start": 221,
"end": 774
} | class ____(BaseModel):
item_id: str
"""The unique identifier of the code interpreter tool call item."""
output_index: int
"""
The index of the output item in the response for which the code interpreter is
interpreting code.
"""
sequence_number: int
"""The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call.interpreting"]
"""The type of the event. Always `response.code_interpreter_call.interpreting`."""
| ResponseCodeInterpreterCallInterpretingEvent |
python | has2k1__plotnine | plotnine/scales/limits.py | {
"start": 3830,
"end": 3913
} | class ____(_lim):
"""
Shapee limits
"""
aesthetic = "shape"
| shapelim |
python | ionelmc__pytest-benchmark | src/pytest_benchmark/utils.py | {
"start": 7083,
"end": 14018
} | class ____(RegressionCheck):
def compute(self, current, compared):
return current[self.field] - compared[self.field]
def parse_compare_fail(
string,
rex=re.compile(
r'^(?P<field>min|max|mean|median|stddev|iqr):' r'((?P<percentage>[0-9]+)%|(?P<difference>[0-9]*\.?[0-9]+([eE][-+]?[' r'0-9]+)?))$'
),
):
m = rex.match(string)
if m:
g = m.groupdict()
if g['percentage']:
return PercentageRegressionCheck(g['field'], int(g['percentage']))
elif g['difference']:
return DifferenceRegressionCheck(g['field'], float(g['difference']))
raise argparse.ArgumentTypeError(f'Could not parse value: {string!r}.')
def parse_cprofile_loops(string):
if string == 'auto':
return None
else:
try:
value = int(string)
except ValueError:
raise argparse.ArgumentTypeError(f'Could not parse value: {string!r}. Expected an integer or `auto`.') from None
if value < 1:
raise argparse.ArgumentTypeError(f'Invalid value: {string!r}. Must be greater than 0.') from None
return value
def parse_warmup(string):
string = string.lower().strip()
if string == 'auto':
return platform.python_implementation() == 'PyPy'
elif string in ['off', 'false', 'no']:
return False
elif string in ['on', 'true', 'yes', '']:
return True
else:
raise argparse.ArgumentTypeError(f'Could not parse value: {string!r}.')
def name_formatter_short(bench):
name = bench['name']
if bench['source']:
name = '{} ({:.4})'.format(name, split(bench['source'])[-1])
if name.startswith('test_'):
name = name[5:]
return name
def name_formatter_normal(bench):
name = bench['name']
if bench['source']:
parts = bench['source'].split('/')
parts[-1] = parts[-1][:12]
name = '{} ({})'.format(name, '/'.join(parts))
return name
def name_formatter_long(bench):
if bench['source']:
return '{fullname} ({source})'.format(**bench)
else:
return bench['fullname']
def name_formatter_trial(bench):
if bench['source']:
return '{:.4}'.format(split(bench['source'])[-1])
else:
return '????'
NAME_FORMATTERS = {
'short': name_formatter_short,
'normal': name_formatter_normal,
'long': name_formatter_long,
'trial': name_formatter_trial,
}
def parse_name_format(string):
string = string.lower().strip()
if string in NAME_FORMATTERS:
return string
else:
raise argparse.ArgumentTypeError(f'Could not parse value: {string!r}.')
def parse_timer(string):
return str(load_timer(string))
def parse_sort(string):
string = string.lower().strip()
if string not in ('min', 'max', 'mean', 'stddev', 'name', 'fullname'):
raise argparse.ArgumentTypeError(
f'Unacceptable value: {string!r}. '
"Value for --benchmark-sort must be one of: 'min', 'max', 'mean', "
"'stddev', 'name', 'fullname'."
)
return string
def parse_columns(string):
columns = [str.strip(s) for s in string.lower().split(',')]
invalid = set(columns) - set(ALLOWED_COLUMNS)
if invalid:
# there are extra items in columns!
msg = 'Invalid column name(s): {}. '.format(', '.join(invalid))
msg += 'The only valid column names are: {}'.format(', '.join(ALLOWED_COLUMNS))
raise argparse.ArgumentTypeError(msg)
return columns
def parse_rounds(string):
try:
value = int(string)
except ValueError as exc:
raise argparse.ArgumentTypeError(exc) from None
else:
if value < 1:
raise argparse.ArgumentTypeError('Value for --benchmark-rounds must be at least 1.')
return value
def parse_seconds(string):
try:
return SecondsDecimal(string).as_string
except Exception as exc:
raise argparse.ArgumentTypeError(f'Invalid decimal value {string!r}: {exc!r}') from None
def parse_save(string):
if not string:
raise argparse.ArgumentTypeError("Can't be empty.")
illegal = ''.join(c for c in r'\/:*?<>|' if c in string)
if illegal:
raise argparse.ArgumentTypeError(f'Must not contain any of these characters: /:*?<>|\\ (it has {illegal!r})')
return string
def _parse_hosts(storage_url, netrc_file):
# load creds from netrc file
path = Path(netrc_file).expanduser()
creds = None
if netrc_file and path.is_file():
creds = netrc.netrc(path)
# add creds to urls
urls = []
for netloc in storage_url.netloc.split(','):
auth = ''
if creds and '@' not in netloc:
host = netloc.split(':').pop(0)
res = creds.authenticators(host)
if res:
user, _, secret = res
auth = f'{user}:{secret}@'
url = f'{storage_url.scheme}://{auth}{netloc}'
urls.append(url)
return urls
def parse_elasticsearch_storage(string, default_index='benchmark', default_doctype='benchmark', netrc_file=''):
storage_url = urlparse(string)
hosts = _parse_hosts(storage_url, netrc_file)
index = default_index
doctype = default_doctype
if storage_url.path and storage_url.path != '/':
splitted = storage_url.path.strip('/').split('/')
index = splitted[0]
if len(splitted) >= 2:
doctype = splitted[1]
query = parse_qs(storage_url.query)
try:
project_name = query['project_name'][0]
except KeyError:
project_name = get_project_name()
return hosts, index, doctype, project_name
def load_storage(storage, **kwargs):
if '://' not in storage:
storage = 'file://' + storage
netrc_file = kwargs.pop('netrc') # only used by elasticsearch storage
if storage.startswith('file://'):
from .storage.file import FileStorage # noqa: PLC0415
return FileStorage(storage[len('file://') :], **kwargs)
elif storage.startswith('elasticsearch+'):
from .storage.elasticsearch import ElasticsearchStorage # noqa: PLC0415
# TODO update benchmark_autosave
args = parse_elasticsearch_storage(storage[len('elasticsearch+') :], netrc_file=netrc_file)
return ElasticsearchStorage(*args, **kwargs)
else:
raise argparse.ArgumentTypeError('Storage must be in form of file://path or elasticsearch+http[s]://host1,host2/index/doctype')
def time_unit(value):
if value < 1e-6:
return 'n', 1e9
elif value < 1e-3:
return 'u', 1e6
elif value < 1:
return 'm', 1e3
else:
return '', 1.0
def operations_unit(value):
if value > 1e6:
return 'M', 1e-6
if value > 1e3:
return 'K', 1e-3
return '', 1.0
def format_time(value):
unit, adjustment = time_unit(value)
return f'{value * adjustment:.2f}{unit:s}'
| DifferenceRegressionCheck |
python | facebook__pyre-check | tools/typeshed_patcher/typeshed.py | {
"start": 4744,
"end": 6285
} | class ____(Typeshed):
"""
A typeshed backed up by another `Typeshed` object and a set of patch results
that overwrite file contents in the base `Typeshed` object.
Patches are specified as a dictionary from paths to either a `str` or `None`.
When the value is a string, it serves as the new content for the corresponding
file (or the content of a new file if the file did not exist before). When the
value is `None`, it indicates that the corresponding file will be removed.
"""
base: Typeshed
updated_files: Dict[pathlib.Path, str]
removed_files: Set[pathlib.Path]
def __init__(
self,
base: Typeshed,
patch_results: Mapping[pathlib.Path, Optional[str]],
) -> None:
self.base = base
self.updated_files = {
path: content
for path, content in patch_results.items()
if content is not None
}
self.removed_files = {
path for path, content in patch_results.items() if content is None
}
def all_files(self) -> Iterable[pathlib.Path]:
return (
set(self.base.all_files()) | self.updated_files.keys()
) - self.removed_files
def get_file_content(self, path: pathlib.Path) -> Optional[str]:
if path in self.removed_files:
return None
updated_content = self.updated_files.get(path, None)
if updated_content is not None:
return updated_content
return self.base.get_file_content(path)
| PatchedTypeshed |
python | pypa__pipenv | pipenv/cli/options.py | {
"start": 1960,
"end": 2330
} | class ____:
def __init__(self):
self.dev = False
self.pre = False
self.ignore_pipfile = False
self.code = False
self.requirementstxt = None
self.deploy = False
self.packages = []
self.editables = []
self.extra_pip_args = []
self.categories = []
self.skip_lock = False
| InstallState |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 9186,
"end": 9820
} | class ____(Sequence):
"""Composite of a LazyAttribute and a Sequence.
Attributes:
function (function): A function, expecting the current LazyStub and the
current sequence counter.
type (function): A function converting an integer into the expected kind
of counter for the 'function' attribute.
"""
def evaluate(self, instance, step, extra):
logger.debug(
"LazyAttributeSequence: Computing next value of %r for seq=%s, obj=%r",
self.function, step.sequence, instance)
return self.function(instance, int(step.sequence))
| LazyAttributeSequence |
python | django__django | tests/middleware/tests.py | {
"start": 30016,
"end": 35024
} | class ____(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the
middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="sameorigin"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
def test_deny(self):
"""
The X_FRAME_OPTIONS setting can be set to DENY to have the middleware
use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS="DENY"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
with override_settings(X_FRAME_OPTIONS="deny"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_defaults_sameorigin(self):
"""
If the X_FRAME_OPTIONS setting is not set then it defaults to
DENY.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_dont_set_if_set(self):
"""
If the X-Frame-Options header is already set then the middleware does
not attempt to override it.
"""
def same_origin_response(request):
response = HttpResponse()
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
def deny_response(request):
response = HttpResponse()
response.headers["X-Frame-Options"] = "DENY"
return response
with override_settings(X_FRAME_OPTIONS="DENY"):
r = XFrameOptionsMiddleware(same_origin_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(deny_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_response_exempt(self):
"""
If the response has an xframe_options_exempt attribute set to False
then it still sets the header, but if it's set to True then it doesn't.
"""
def xframe_exempt_response(request):
response = HttpResponse()
response.xframe_options_exempt = True
return response
def xframe_not_exempt_response(request):
response = HttpResponse()
response.xframe_options_exempt = False
return response
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(xframe_not_exempt_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
r = XFrameOptionsMiddleware(xframe_exempt_response)(HttpRequest())
self.assertIsNone(r.headers.get("X-Frame-Options"))
def test_is_extendable(self):
"""
The XFrameOptionsMiddleware method that determines the X-Frame-Options
header value can be overridden based on something in the request or
response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, "sameorigin", False):
return "SAMEORIGIN"
if getattr(response, "sameorigin", False):
return "SAMEORIGIN"
return "DENY"
def same_origin_response(request):
response = HttpResponse()
response.sameorigin = True
return response
with override_settings(X_FRAME_OPTIONS="DENY"):
r = OtherXFrameOptionsMiddleware(same_origin_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware(get_response_empty)(request)
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = OtherXFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
| XFrameOptionsMiddlewareTest |
python | ansible__ansible | test/integration/targets/callback-dispatch/callback_plugins/missing_base_class.py | {
"start": 37,
"end": 169
} | class ____:
"""This callback should fail to load since it doesn't extend the required builtin base class."""
pass
| CallbackModule |
python | kamyu104__LeetCode-Solutions | Python/sort-vowels-in-a-string.py | {
"start": 45,
"end": 1193
} | class ____(object):
def sortVowels(self, s):
"""
:type s: str
:rtype: str
"""
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
if not nums:
return
count = [0]*(max(nums)+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
while nums[i] >= 0:
count[nums[i]] -= 1
j = count[nums[i]]
nums[i], nums[j] = nums[j], ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
VOWELS = "AEIOUaeiou"
LOOKUP = {x:i for i, x in enumerate(VOWELS)}
vowels = [LOOKUP[x] for x in s if x in LOOKUP]
inplace_counting_sort(vowels, reverse=True)
return "".join(VOWELS[vowels.pop()] if x in LOOKUP else x for x in s)
# Time: O(nlogn)
# Space: O(1)
# sort
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_ops.py | {
"start": 28012,
"end": 51812
} | class ____:
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape.
dilation_rate: see `with_space_to_batch`.
padding: see `with_space_to_batch`.
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see `with_space_to_batch`.
spatial_dims: `see with_space_to_batch`.
data_format: see `with_space_to_batch`.
num_batch_dims: (Optional). Number of batch dims in `input_shape`.
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None,
num_batch_dims=1):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
if dilation_rate.shape.ndims not in (None, 1):
raise ValueError(
"`dilation_rate.shape.rank` must be 1. Received: "
f"dilation_rate={dilation_rate} of rank {dilation_rate.shape.rank}")
if not dilation_rate.shape.is_fully_defined():
raise ValueError(
"`dilation_rate.shape` must be fully defined. Received: "
f"dilation_rate={dilation_rate} with shape "
f"{dilation_rate.shape}")
num_spatial_dims = dilation_rate.shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = num_batch_dims + 1
else:
starting_spatial_dim = num_batch_dims
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"`spatial_dims` must be a monotonically increasing sequence of "
f"positive integers. Received: spatial_dims={orig_spatial_dims}")
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
f"`input.shape.rank` must be at least {expected_input_rank}. "
f"Received: input.shape={input_shape} with rank {input_shape.rank}")
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError(
"`dilation_rate` must be positive. "
f"Received: dilation_rate={const_rate}")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError(
"`filter_shape` must be specified for `padding='SAME'`. "
f"Received: filter_shape={filter_shape} and padding={padding}")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("`padding` must be one of 'SAME' or 'VALID'. "
f"Received: padding={padding}")
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops_stack.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops_stack.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError(f"`len(dilation_rate)` should be {num_spatial_dims}. "
f"Received: dilation_rate={dilation_rate} of length "
f"{len(dilation_rate)}")
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of `dilation_rate` must be positive. "
f"Received: dilation_rate={dilation_rate}")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError(f"`len(strides)` should be {num_spatial_dims}. "
f"Received: strides={strides} of length {len(strides)}")
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of `strides` must be positive. "
f"Received: strides={strides}")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"`strides > 1` not supported in conjunction with `dilation_rate > 1`. "
f"Received: strides={strides} and dilation_rate={dilation_rate}")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
@dispatch.add_dispatch_support
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape N (defaults to `[1]*N`) specifying
the filter upsampling/input downsampling rate, and an optional list of N
`strides` (defaults to `[1]*N`), this computes for each N-D spatial output
position `(x[0], ..., x[N-1])`:
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides`.
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
```python
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input when the strides are 1. See
[here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2)
for more information.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to `[1]*N`. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
@dispatch.add_dispatch_support
def convolution_v2( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True,
num_spatial_dims=None):
"""Internal function which performs rank agnostic convolution.
Args:
input: See `convolution`.
filters: See `convolution`.
strides: See `convolution`.
padding: See `convolution`.
data_format: See `convolution`.
dilations: See `convolution`.
name: See `convolution`.
call_from_convolution: See `convolution`.
num_spatial_dims: (Optional.). It is a integer describing the
rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
Returns:
A tensor of shape and dtype matching that of `input`.
Raises:
ValueError: If input and filter both have unknown shapes, or if
`num_spatial_dims` is provided and incompatible with the value
estimated from `filters.shape`.
"""
if (not isinstance(filters, variables_lib.Variable) and
not tensor_util.is_tf_type(filters)):
with ops.name_scope("convolution_internal", None, [filters, input]):
filters = ops.convert_to_tensor(filters, name='filters')
if (not isinstance(input, tensor_lib.Tensor) and not tensor_util.is_tf_type(
input)):
with ops.name_scope("convolution_internal", None, [filters, input]):
input = ops.convert_to_tensor(input, name="input")
filters_rank = filters.shape.rank
inputs_rank = input.shape.rank
if num_spatial_dims is None:
if filters_rank:
num_spatial_dims = filters_rank - 2
elif inputs_rank:
num_spatial_dims = inputs_rank - 2
else:
raise ValueError(
"When `num_spatial_dims` is not set, one of `input.shape.rank` or "
"`filters.shape.rank` must be known. "
f"Received: input.shape={input.shape} of rank {inputs_rank} and "
f"filters.shape={filters.shape} of rank {filters_rank}")
elif filters_rank and filters_rank - 2 != num_spatial_dims:
raise ValueError(
"`filters.shape.rank - 2` should equal `num_spatial_dims`. Received: "
f"filters.shape={filters.shape} of rank {filters_rank} and "
f"num_spatial_dims={num_spatial_dims}")
if inputs_rank:
num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.
else:
num_batch_dims = 1 # By default, assume single batch dimension.
if num_spatial_dims not in {1, 2, 3}:
raise ValueError(
"`num_spatial_dims` must be 1, 2, or 3. "
f"Received: num_spatial_dims={num_spatial_dims}.")
if data_format is None or data_format in _CHANNELS_LAST_FORMATS:
channel_index = num_batch_dims + num_spatial_dims
else:
channel_index = num_batch_dims
if dilations is None:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = False
else:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = any(i != 1 for i in dilations)
strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides")
has_tpu_context = device_context.enclosing_tpu_context() is not None
if name:
default_name = None
elif not has_tpu_context or call_from_convolution:
default_name = "convolution"
elif num_spatial_dims == 2: # Most common case.
default_name = "Conv2D"
elif num_spatial_dims == 3:
default_name = "Conv3D"
else:
default_name = "conv1d"
with ops.name_scope(name, default_name, [input, filters]) as name:
# Fast path for TPU or if no dilation, as gradient only supported on TPU
# for dilations.
if not is_dilated_conv or has_tpu_context:
if num_spatial_dims == 2: # Most common case.
op = _conv2d_expanded_batch
elif num_spatial_dims == 3:
op = _conv3d_expanded_batch
else:
op = conv1d
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format,
num_spatial_dims=num_spatial_dims)
return op(input, filters)
| _WithSpaceToBatch |
python | tensorflow__tensorflow | tensorflow/python/ops/data_flow_ops.py | {
"start": 38378,
"end": 41156
} | class ____(QueueBase):
"""A queue implementation that dequeues elements in prioritized order.
See `tf.queue.QueueBase` for a description of the methods on
this class.
"""
def __init__(self,
capacity,
types,
shapes=None,
names=None,
shared_name=None,
name="priority_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `PriorityQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `PriorityQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `types`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
Enqueues and Dequeues to the `PriorityQueue` must include an additional
tuple entry at the beginning: the `priority`. The priority must be
an int64 scalar (for `enqueue`) or an int64 vector (for `enqueue_many`).
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
types: A list of `DType` objects. The length of `types` must equal
the number of tensors in each queue element, except the first priority
element. The first tensor in each element is the priority,
which must be type int64.
shapes: (Optional.) A list of fully-defined `TensorShape` objects,
with the same length as `types`, or `None`.
names: (Optional.) A list of strings naming the components in the queue
with the same length as `dtypes`, or `None`. If specified, the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
types = _as_type_list(types)
shapes = _as_shape_list(shapes, types)
queue_ref = gen_data_flow_ops.priority_queue_v2(
component_types=types,
shapes=shapes,
capacity=capacity,
shared_name=_shared_name(shared_name),
name=name)
priority_dtypes = [_dtypes.int64] + types
priority_shapes = [()] + shapes if shapes else shapes
super(PriorityQueue, self).__init__(priority_dtypes, priority_shapes, names,
queue_ref)
# TODO(josh11b): class BatchQueue(QueueBase):
| PriorityQueue |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 28870,
"end": 29594
} | class ____(NamedColumn[_T]):
__visit_name__ = "scalar_function_column"
_traverse_internals = [
("name", InternalTraversal.dp_anon_name),
("type", InternalTraversal.dp_type),
("fn", InternalTraversal.dp_clauseelement),
]
is_literal = False
table = None
def __init__(
self,
fn: FunctionElement[_T],
name: str,
type_: Optional[_TypeEngineArgument[_T]] = None,
) -> None:
self.fn = fn
self.name = name
# if type is None, we get NULLTYPE, which is our _T. But I don't
# know how to get the overloads to express that correctly
self.type = type_api.to_instance(type_) # type: ignore
| ScalarFunctionColumn |
python | apache__airflow | providers/apache/livy/tests/unit/apache/livy/sensors/test_livy.py | {
"start": 1165,
"end": 2492
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
create_connection_without_db(
Connection(conn_id="livyunittest", conn_type="livy", host="http://localhost:8998")
)
@pytest.mark.parametrize(
"batch_state", [pytest.param(bs, id=bs.name) for bs in BatchState if bs in LivyHook.TERMINAL_STATES]
)
def test_poke_on_terminal_state(self, batch_state):
sensor = LivySensor(
livy_conn_id="livyunittest", task_id="livy_sensor_test", dag=self.dag, batch_id=100
)
with patch.object(LivyHook, "get_batch_state", return_value=batch_state):
assert sensor.poke({})
@pytest.mark.parametrize(
"batch_state",
[pytest.param(bs, id=bs.name) for bs in BatchState if bs not in LivyHook.TERMINAL_STATES],
)
def test_poke_on_non_terminal_state(self, batch_state):
sensor = LivySensor(
livy_conn_id="livyunittest", task_id="livy_sensor_test", dag=self.dag, batch_id=100
)
with patch.object(LivyHook, "get_batch_state", return_value=batch_state):
assert not sensor.poke({})
| TestLivySensor |
python | spyder-ide__spyder | spyder/api/fonts.py | {
"start": 384,
"end": 1150
} | class ____:
"""
Font types used in Spyder plugins and the entire application.
Notes
-----
* This enum is meant to be used to get the QFont object corresponding to
each type.
* The names associated to the values in this enum depend on historical
reasons that go back to Spyder 2 and are not easy to change now.
* Monospace is the font used used in the Editor, IPython console and
History; Interface is used by the entire Spyder app; and
MonospaceInterface is used, for instance, by the Variable Explorer and
corresponds to Monospace font resized to look good against the
Interface one.
"""
Monospace = 'font'
Interface = 'app_font'
MonospaceInterface = 'monospace_app_font'
| SpyderFontType |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-telegram/llama_index/readers/telegram/base.py | {
"start": 274,
"end": 5897
} | class ____(BaseReader):
"""
Telegram posts/chat messages/comments reader.
Read posts/chat messages/comments from Telegram channels or chats.
Before working with Telegram’s API, you need to get your own API ID and hash:
1. Login to your Telegram account with the phone number of the developer account to use.
2. Click under API Development tools.
3. A Create new application window will appear. Fill in your application details.\
There is no need to enter any URL,\
and only the first two fields (App title and Short name) can currently be changed later.
4. Click on Create application at the end.\
Remember that your API hash is secret and Telegram won’t let you revoke it.\
Don’t post it anywhere!
This API ID and hash is the one used by your application, not your phone number.\
You can use this API ID and hash with any phone number.
Args:
session_name (str): The file name of the session file to be used\
if a string is given (it may be a full path),\
or the Session instance to be used otherwise.
api_id (int): The API ID you obtained from https://my.telegram.org.
api_hash (str): The API hash you obtained from https://my.telegram.org.
phone_number (str): The phone to which the code will be sent.
"""
def __init__(
self,
session_name: str,
api_id: int,
api_hash: str,
phone_number: str,
) -> None:
"""Initialize with parameters."""
super().__init__()
self.session_name = session_name
self.api_id = api_id
self.api_hash = api_hash
self.phone_number = phone_number
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def load_data(
self,
entity_name: str,
post_id: Optional[int] = None,
limit: Optional[int] = None,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
) -> List[Document]:
"""
Load posts/chat messages/comments from Telegram channels or chats.
Since Telethon is an asynchronous library,\
you need to await coroutine functions to have them run\
(or otherwise, run the loop until they are complete)
Args:
entity_name (str): The entity from whom to retrieve the message history.
post_id (int): If set to a post ID, \
the comments that reply to this ID will be returned.\
Else will get posts/chat messages.
limit (int): Number of messages to be retrieved.
start_date (datetime.datetime): Start date of the time period.
end_date (datetime.datetime): End date of the time period.
"""
return self.loop.run_until_complete(
self._load_data(
entity_name=entity_name,
post_id=post_id,
limit=limit,
start_date=start_date,
end_date=end_date,
)
)
async def _load_data(
self,
entity_name: str,
post_id: Optional[int] = None,
limit: Optional[int] = None,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = None,
) -> List[Document]:
"""
Load posts/chat messages/comments from Telegram channels or chats.
Args:
entity_name (str): The entity from whom to retrieve the message history.
post_id (int): If set to a post ID, \
the comments that reply to this ID will be returned.\
Else will get posts/chat messages.
limit (int): Number of messages to be retrieved.
start_date (datetime.datetime): Start date of the time period.
end_date (datetime.datetime): End date of the time period.
"""
import telethon
client = telethon.TelegramClient(self.session_name, self.api_id, self.api_hash)
await client.start(phone=self.phone_number)
results = []
async with client:
if end_date and start_date:
# Asynchronously iterate over messages in between start_date and end_date
async for message in client.iter_messages(
entity_name,
reply_to=post_id,
limit=limit,
offset_date=end_date,
):
if message.date < start_date:
break
if isinstance(message.text, str) and message.text != "":
results.append(Document(text=self._remove_links(message.text)))
else:
# Asynchronously iterate over messages
async for message in client.iter_messages(
entity_name,
reply_to=post_id,
limit=limit,
):
if isinstance(message.text, str) and message.text != "":
results.append(Document(text=self._remove_links(message.text)))
return results
def _remove_links(self, string) -> str:
"""Removes all URLs from a given string, leaving only the base domain name."""
def replace_match(match):
text = match.group(1)
return text if text else ""
url_pattern = r"https?://(?:www\.)?((?!www\.).)+?"
return re.sub(url_pattern, replace_match, string)
| TelegramReader |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/linear_model.py | {
"start": 1705,
"end": 2815
} | class ____(Predictor, Estimator, Benchmark):
"""
Benchmarks for Ridge.
"""
param_names = ["representation", "solver"]
params = (
["dense", "sparse"],
["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"],
)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=500000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=100000, n_features=10000, density=0.005
)
return data
def make_estimator(self, params):
representation, solver = params
estimator = Ridge(solver=solver, fit_intercept=False, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, solver = params
if representation == "sparse" and solver == "svd":
return True
return False
| RidgeBenchmark |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 36993,
"end": 41586
} | class ____(Structure):
_fields_ = (
("vmaddr", p_uint64),
("fileoff", p_uint64),
("entry_id", lc_str),
("reserved", p_uint32),
)
LC_REGISTRY = {
LC_SEGMENT: segment_command,
LC_IDFVMLIB: fvmlib_command,
LC_LOADFVMLIB: fvmlib_command,
LC_ID_DYLIB: dylib_command,
LC_LOAD_DYLIB: dylib_command,
LC_LOAD_WEAK_DYLIB: dylib_command,
LC_SUB_FRAMEWORK: sub_framework_command,
LC_SUB_CLIENT: sub_client_command,
LC_SUB_UMBRELLA: sub_umbrella_command,
LC_SUB_LIBRARY: sub_library_command,
LC_PREBOUND_DYLIB: prebound_dylib_command,
LC_ID_DYLINKER: dylinker_command,
LC_LOAD_DYLINKER: dylinker_command,
LC_THREAD: thread_command,
LC_UNIXTHREAD: thread_command,
LC_ROUTINES: routines_command,
LC_SYMTAB: symtab_command,
LC_DYSYMTAB: dysymtab_command,
LC_TWOLEVEL_HINTS: twolevel_hints_command,
LC_PREBIND_CKSUM: prebind_cksum_command,
LC_SYMSEG: symseg_command,
LC_IDENT: ident_command,
LC_FVMFILE: fvmfile_command,
LC_SEGMENT_64: segment_command_64,
LC_ROUTINES_64: routines_command_64,
LC_UUID: uuid_command,
LC_RPATH: rpath_command,
LC_CODE_SIGNATURE: linkedit_data_command,
LC_CODE_SEGMENT_SPLIT_INFO: linkedit_data_command,
LC_REEXPORT_DYLIB: dylib_command,
LC_LAZY_LOAD_DYLIB: dylib_command,
LC_ENCRYPTION_INFO: encryption_info_command,
LC_DYLD_INFO: dyld_info_command,
LC_DYLD_INFO_ONLY: dyld_info_command,
LC_LOAD_UPWARD_DYLIB: dylib_command,
LC_VERSION_MIN_MACOSX: version_min_command,
LC_VERSION_MIN_IPHONEOS: version_min_command,
LC_FUNCTION_STARTS: linkedit_data_command,
LC_DYLD_ENVIRONMENT: dylinker_command,
LC_MAIN: entry_point_command,
LC_DATA_IN_CODE: linkedit_data_command,
LC_SOURCE_VERSION: source_version_command,
LC_DYLIB_CODE_SIGN_DRS: linkedit_data_command,
LC_ENCRYPTION_INFO_64: encryption_info_command_64,
LC_LINKER_OPTION: linker_option_command,
LC_LINKER_OPTIMIZATION_HINT: linkedit_data_command,
LC_VERSION_MIN_TVOS: version_min_command,
LC_VERSION_MIN_WATCHOS: version_min_command,
LC_NOTE: note_command,
LC_BUILD_VERSION: build_version_command,
LC_DYLD_EXPORTS_TRIE: linkedit_data_command,
LC_DYLD_CHAINED_FIXUPS: linkedit_data_command,
LC_FILESET_ENTRY: fileset_entry_command,
}
LC_NAMES = {
LC_SEGMENT: "LC_SEGMENT",
LC_IDFVMLIB: "LC_IDFVMLIB",
LC_LOADFVMLIB: "LC_LOADFVMLIB",
LC_ID_DYLIB: "LC_ID_DYLIB",
LC_LOAD_DYLIB: "LC_LOAD_DYLIB",
LC_LOAD_WEAK_DYLIB: "LC_LOAD_WEAK_DYLIB",
LC_SUB_FRAMEWORK: "LC_SUB_FRAMEWORK",
LC_SUB_CLIENT: "LC_SUB_CLIENT",
LC_SUB_UMBRELLA: "LC_SUB_UMBRELLA",
LC_SUB_LIBRARY: "LC_SUB_LIBRARY",
LC_PREBOUND_DYLIB: "LC_PREBOUND_DYLIB",
LC_ID_DYLINKER: "LC_ID_DYLINKER",
LC_LOAD_DYLINKER: "LC_LOAD_DYLINKER",
LC_THREAD: "LC_THREAD",
LC_UNIXTHREAD: "LC_UNIXTHREAD",
LC_ROUTINES: "LC_ROUTINES",
LC_SYMTAB: "LC_SYMTAB",
LC_DYSYMTAB: "LC_DYSYMTAB",
LC_TWOLEVEL_HINTS: "LC_TWOLEVEL_HINTS",
LC_PREBIND_CKSUM: "LC_PREBIND_CKSUM",
LC_SYMSEG: "LC_SYMSEG",
LC_IDENT: "LC_IDENT",
LC_FVMFILE: "LC_FVMFILE",
LC_SEGMENT_64: "LC_SEGMENT_64",
LC_ROUTINES_64: "LC_ROUTINES_64",
LC_UUID: "LC_UUID",
LC_RPATH: "LC_RPATH",
LC_CODE_SIGNATURE: "LC_CODE_SIGNATURE",
LC_CODE_SEGMENT_SPLIT_INFO: "LC_CODE_SEGMENT_SPLIT_INFO",
LC_REEXPORT_DYLIB: "LC_REEXPORT_DYLIB",
LC_LAZY_LOAD_DYLIB: "LC_LAZY_LOAD_DYLIB",
LC_ENCRYPTION_INFO: "LC_ENCRYPTION_INFO",
LC_DYLD_INFO: "LC_DYLD_INFO",
LC_DYLD_INFO_ONLY: "LC_DYLD_INFO_ONLY",
LC_LOAD_UPWARD_DYLIB: "LC_LOAD_UPWARD_DYLIB",
LC_VERSION_MIN_MACOSX: "LC_VERSION_MIN_MACOSX",
LC_VERSION_MIN_IPHONEOS: "LC_VERSION_MIN_IPHONEOS",
LC_FUNCTION_STARTS: "LC_FUNCTION_STARTS",
LC_DYLD_ENVIRONMENT: "LC_DYLD_ENVIRONMENT",
LC_MAIN: "LC_MAIN",
LC_DATA_IN_CODE: "LC_DATA_IN_CODE",
LC_SOURCE_VERSION: "LC_SOURCE_VERSION",
LC_DYLIB_CODE_SIGN_DRS: "LC_DYLIB_CODE_SIGN_DRS",
LC_LINKER_OPTIMIZATION_HINT: "LC_LINKER_OPTIMIZATION_HINT",
LC_VERSION_MIN_TVOS: "LC_VERSION_MIN_TVOS",
LC_VERSION_MIN_WATCHOS: "LC_VERSION_MIN_WATCHOS",
LC_NOTE: "LC_NOTE",
LC_BUILD_VERSION: "LC_BUILD_VERSION",
LC_DYLD_EXPORTS_TRIE: "LC_DYLD_EXPORTS_TRIE",
LC_DYLD_CHAINED_FIXUPS: "LC_DYLD_CHAINED_FIXUPS",
LC_ENCRYPTION_INFO_64: "LC_ENCRYPTION_INFO_64",
LC_LINKER_OPTION: "LC_LINKER_OPTION",
LC_PREPAGE: "LC_PREPAGE",
LC_FILESET_ENTRY: "LC_FILESET_ENTRY",
}
# this is another union.
| fileset_entry_command |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 339754,
"end": 350924
} | class ____:
class RaiseOnBool:
def __bool__(self):
raise ValueError
true_vals = [True, np._CopyMode.ALWAYS, np.True_]
if_needed_vals = [None, np._CopyMode.IF_NEEDED]
false_vals = [False, np._CopyMode.NEVER, np.False_]
def test_scalars(self):
# Test both numpy and python scalars
for dtype in np.typecodes["All"]:
arr = np.zeros((), dtype=dtype)
scalar = arr[()]
pyscalar = arr.item(0)
# Test never-copy raises error:
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
assert_raises(ValueError, _multiarray_tests.npy_ensurenocopy,
[1])
for copy in self.false_vals:
assert_raises(ValueError, np.array, scalar, copy=copy)
assert_raises(ValueError, np.array, pyscalar, copy=copy)
# Casting with a dtype (to unsigned integers) can be special:
with pytest.raises(ValueError):
np.array(pyscalar, dtype=np.int64, copy=copy)
def test_compatible_cast(self):
# Some types are compatible even though they are different, no
# copy is necessary for them. This is mostly true for some integers
def int_types(byteswap=False):
int_types = (np.typecodes["Integer"] +
np.typecodes["UnsignedInteger"])
for int_type in int_types:
yield np.dtype(int_type)
if byteswap:
yield np.dtype(int_type).newbyteorder()
for int1 in int_types():
for int2 in int_types(True):
arr = np.arange(10, dtype=int1)
for copy in self.true_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
if int1 == int2:
# Casting is not necessary, base check is sufficient here
for copy in self.if_needed_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is arr or res.base is arr
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is arr or res.base is arr
else:
# Casting is necessary, assert copy works:
for copy in self.if_needed_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
assert_raises(ValueError, np.array,
arr, copy=False,
dtype=int2)
def test_buffer_interface(self):
# Buffer interface gives direct memory access (no copy)
arr = np.arange(10)
view = memoryview(arr)
# Checking bases is a bit tricky since numpy creates another
# memoryview, so use may_share_memory.
for copy in self.true_vals:
res = np.array(view, copy=copy)
assert not np.may_share_memory(arr, res)
for copy in self.false_vals:
res = np.array(view, copy=copy)
assert np.may_share_memory(arr, res)
res = np.array(view, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res)
def test_array_interfaces(self):
base_arr = np.arange(10)
# Array interface gives direct memory access (much like a memoryview)
class ArrayLike:
__array_interface__ = base_arr.__array_interface__
arr = ArrayLike()
for copy, val in [(True, None), (np._CopyMode.ALWAYS, None),
(False, arr), (np._CopyMode.IF_NEEDED, arr),
(np._CopyMode.NEVER, arr)]:
res = np.array(arr, copy=copy)
assert res.base is val
def test___array__(self):
base_arr = np.arange(10)
class ArrayLike:
def __array__(self, dtype=None, copy=None):
return base_arr
arr = ArrayLike()
for copy in self.true_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
# An additional copy is no longer forced by NumPy in this case.
# NumPy trusts the ArrayLike made a copy:
assert res is base_arr
for copy in self.if_needed_vals + self.false_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
assert res is base_arr # numpy trusts the ArrayLike
def test___array__copy_arg(self):
a = np.ones((10, 10), dtype=int)
assert np.shares_memory(a, a.__array__())
assert not np.shares_memory(a, a.__array__(float))
assert not np.shares_memory(a, a.__array__(float, copy=None))
assert not np.shares_memory(a, a.__array__(copy=True))
assert np.shares_memory(a, a.__array__(copy=None))
assert np.shares_memory(a, a.__array__(copy=False))
assert np.shares_memory(a, a.__array__(int, copy=False))
with pytest.raises(ValueError):
np.shares_memory(a, a.__array__(float, copy=False))
base_arr = np.arange(10)
class ArrayLikeNoCopy:
def __array__(self, dtype=None):
return base_arr
a = ArrayLikeNoCopy()
# explicitly passing copy=None shouldn't raise a warning
arr = np.array(a, copy=None)
assert_array_equal(arr, base_arr)
assert arr is base_arr
# As of NumPy 2.1, explicitly passing copy=True does trigger passing
# it to __array__ (deprecation warning is triggered).
with pytest.warns(DeprecationWarning,
match="__array__.*must implement.*'copy'"):
arr = np.array(a, copy=True)
assert_array_equal(arr, base_arr)
assert arr is not base_arr
# And passing copy=False gives a deprecation warning, but also raises
# an error:
with pytest.warns(DeprecationWarning, match="__array__.*'copy'"):
with pytest.raises(ValueError,
match=r"Unable to avoid copy(.|\n)*numpy_2_0_migration_guide.html"):
np.array(a, copy=False)
def test___array__copy_once(self):
size = 100
base_arr = np.zeros((size, size))
copy_arr = np.zeros((size, size))
class ArrayRandom:
def __init__(self):
self.true_passed = False
def __array__(self, dtype=None, copy=None):
if copy:
self.true_passed = True
return copy_arr
else:
return base_arr
arr_random = ArrayRandom()
first_copy = np.array(arr_random, copy=True)
assert arr_random.true_passed
assert first_copy is copy_arr
arr_random = ArrayRandom()
no_copy = np.array(arr_random, copy=False)
assert not arr_random.true_passed
assert no_copy is base_arr
arr_random = ArrayRandom()
_ = np.array([arr_random], copy=True)
assert not arr_random.true_passed
arr_random = ArrayRandom()
second_copy = np.array(arr_random, copy=True, order="F")
assert arr_random.true_passed
assert second_copy is not copy_arr
arr_random = ArrayRandom()
arr = np.ones((size, size))
arr[...] = arr_random
assert not arr_random.true_passed
assert not np.shares_memory(arr, base_arr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test__array__reference_leak(self):
class NotAnArray:
def __array__(self, dtype=None, copy=None):
raise NotImplementedError
x = NotAnArray()
refcount = sys.getrefcount(x)
try:
np.array(x)
except NotImplementedError:
pass
gc.collect()
assert refcount == sys.getrefcount(x)
@pytest.mark.parametrize(
"arr", [np.ones(()), np.arange(81).reshape((9, 9))])
@pytest.mark.parametrize("order1", ["C", "F", None])
@pytest.mark.parametrize("order2", ["C", "F", "A", "K"])
def test_order_mismatch(self, arr, order1, order2):
# The order is the main (python side) reason that can cause
# a never-copy to fail.
# Prepare C-order, F-order and non-contiguous arrays:
arr = arr.copy(order1)
if order1 == "C":
assert arr.flags.c_contiguous
elif order1 == "F":
assert arr.flags.f_contiguous
elif arr.ndim != 0:
# Make array non-contiguous
arr = arr[::2, ::2]
assert not arr.flags.forc
# Whether a copy is necessary depends on the order of arr:
if order2 == "C":
no_copy_necessary = arr.flags.c_contiguous
elif order2 == "F":
no_copy_necessary = arr.flags.f_contiguous
else:
# Keeporder and Anyorder are OK with non-contiguous output.
# This is not consistent with the `astype` behaviour which
# enforces contiguity for "A". It is probably historic from when
# "K" did not exist.
no_copy_necessary = True
# Test it for both the array and a memoryview
for view in [arr, memoryview(arr)]:
for copy in self.true_vals:
res = np.array(view, copy=copy, order=order2)
assert res is not arr and res.flags.owndata
assert_array_equal(arr, res)
if no_copy_necessary:
for copy in self.if_needed_vals + self.false_vals:
res = np.array(view, copy=copy, order=order2)
# res.base.obj refers to the memoryview
if not IS_PYPY:
assert res is arr or res.base.obj is arr
else:
for copy in self.if_needed_vals:
res = np.array(arr, copy=copy, order=order2)
assert_array_equal(arr, res)
for copy in self.false_vals:
assert_raises(ValueError, np.array,
view, copy=copy, order=order2)
def test_striding_not_ok(self):
arr = np.array([[1, 2, 4], [3, 4, 5]])
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C')
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C', dtype=np.int64)
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F')
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F', dtype=np.int64)
| TestArrayCreationCopyArgument |
python | jina-ai__jina | tests/integration/docarray_v2/wrong_schema_executor.py | {
"start": 106,
"end": 230
} | class ____(Executor):
@requests
def foo(self, docs: TextDoc, **kwargs) -> DocList[TextDoc]:
pass
| WrongSchemaExec |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_xcom.py | {
"start": 20694,
"end": 24679
} | class ____(TestXComEndpoint):
@pytest.mark.parametrize(
("dag_id", "task_id", "dag_run_id", "request_body", "expected_status", "expected_detail"),
[
# Test case: Valid input, should succeed with 201 CREATED
pytest.param(
TEST_DAG_ID,
TEST_TASK_ID,
run_id,
XComCreateBody(key=TEST_XCOM_KEY, value=TEST_XCOM_VALUE),
201,
None,
id="valid-xcom-entry",
),
# Test case: DAG not found
pytest.param(
"invalid-dag-id",
TEST_TASK_ID,
run_id,
XComCreateBody(key=TEST_XCOM_KEY, value=TEST_XCOM_VALUE),
404,
"The Dag with ID: `invalid-dag-id` was not found",
id="dag-not-found",
),
# Test case: Task not found in DAG
pytest.param(
TEST_DAG_ID,
"invalid-task-id",
run_id,
XComCreateBody(key=TEST_XCOM_KEY, value=TEST_XCOM_VALUE),
404,
f"Task with ID: `invalid-task-id` not found in dag: `{TEST_DAG_ID}`",
id="task-not-found",
),
# Test case: DAG Run not found
pytest.param(
TEST_DAG_ID,
TEST_TASK_ID,
"invalid-dag-run-id",
XComCreateBody(key=TEST_XCOM_KEY, value=TEST_XCOM_VALUE),
404,
f"Dag Run with ID: `invalid-dag-run-id` not found for dag: `{TEST_DAG_ID}`",
id="dag-run-not-found",
),
# Test case: XCom entry already exists
pytest.param(
TEST_DAG_ID,
TEST_TASK_ID,
run_id,
XComCreateBody(key=TEST_XCOM_KEY, value=TEST_XCOM_VALUE),
409,
f"The XCom with key: `{TEST_XCOM_KEY}` with mentioned task instance already exists.",
id="xcom-already-exists",
),
],
)
def test_create_xcom_entry(
self,
dag_id,
task_id,
dag_run_id,
request_body,
expected_status,
expected_detail,
test_client,
session,
):
# Pre-create an XCom entry to test conflict case
if expected_status == 409:
self._create_xcom(TEST_XCOM_KEY, TEST_XCOM_VALUE)
response = test_client.post(
f"/dags/{dag_id}/dagRuns/{dag_run_id}/taskInstances/{task_id}/xcomEntries",
json=request_body.dict(),
)
assert response.status_code == expected_status
if expected_detail:
assert response.json()["detail"] == expected_detail
elif expected_status == 201:
# Validate the created XCom response
current_data = response.json()
assert current_data["key"] == request_body.key
assert current_data["value"] == XComModel.serialize_value(request_body.value)
assert current_data["dag_id"] == dag_id
assert current_data["task_id"] == task_id
assert current_data["run_id"] == dag_run_id
assert current_data["map_index"] == request_body.map_index
check_last_log(session, dag_id=TEST_DAG_ID, event="create_xcom_entry", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.post(
"/dags/dag_id/dagRuns/dag_run_id/taskInstances/task_id/xcomEntries",
json={},
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.post(
"/dags/dag_id/dagRuns/dag_run_id/taskInstances/task_id/xcomEntries",
json={},
)
assert response.status_code == 403
| TestCreateXComEntry |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 10838,
"end": 11257
} | class ____(BaseGroup):
"""
Base class for mapping tests
"""
def setUp(self):
self.f = File(self.mktemp(), 'w')
self.groups = ('a', 'b', 'c', 'd')
for x in self.groups:
self.f.create_group(x)
self.f['x'] = h5py.SoftLink('/mongoose')
self.groups = self.groups + ('x',)
def tearDown(self):
if self.f:
self.f.close()
| BaseMapping |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_checkpoint.py | {
"start": 10415,
"end": 10931
} | class ____(nn.Module):
def __init__(self, checkpoint: bool = False, use_reentrant: bool = True):
super().__init__()
self.l1 = nn.Linear(100, 100)
self.relu = nn.ReLU()
self.checkpoint1 = ModelWithCheckpointSubmodule(checkpoint, use_reentrant)
self.checkpoint2 = ModelWithCheckpointSubmodule(checkpoint, use_reentrant)
self.l2 = nn.Linear(100, 100)
def forward(self, x):
return self.l2(self.relu(self.checkpoint2(self.checkpoint1(self.l1(x)))))
| TestModel |
python | scipy__scipy | scipy/linalg/tests/test_decomp_lu.py | {
"start": 8038,
"end": 11321
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(1682281250228846)
self.a = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
self.ca = np.array([[1, 2, 3], [1, 2, 3], [2, 5j, 6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
self.cb = np.array([[1j, 2j, 3j], [4j, 5j, 6j], [7j, 8j, 9j]])
# Rectangular matrices
self.hrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8],
[9, 10, 12, 12]]) * 1.j
self.vrect = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 12, 12]])
# Medium sizes matrices
self.med = self.rng.random((30, 40))
self.cmed = self.rng.random((30, 40)) + 1.j*self.rng.random((30, 40))
def _test_common_lu_factor(self, data):
l_and_u1, piv1 = lu_factor(data)
(getrf,) = get_lapack_funcs(("getrf",), (data,))
l_and_u2, piv2, _ = getrf(data, overwrite_a=False)
assert_allclose(l_and_u1, l_and_u2)
assert_allclose(piv1, piv2)
# Simple tests.
# For lu_factor gives a LinAlgWarning because these matrices are singular
def test_hrectangular(self):
self._test_common_lu_factor(self.hrect)
def test_vrectangular(self):
self._test_common_lu_factor(self.vrect)
def test_hrectangular_complex(self):
self._test_common_lu_factor(self.chrect)
def test_vrectangular_complex(self):
self._test_common_lu_factor(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common_lu_factor(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common_lu_factor(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_allclose(p @ l @ u, self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1], [0, 1.]], order=order)
LU, P = lu_factor(A)
assert_allclose(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
@pytest.mark.parametrize("m", [0, 1, 2])
@pytest.mark.parametrize("n", [0, 1, 2])
@pytest.mark.parametrize('dtype', DTYPES)
def test_shape_dtype(self, m, n, dtype):
k = min(m, n)
a = np.eye(m, n, dtype=dtype)
lu, p = lu_factor(a)
assert_equal(lu.shape, (m, n))
assert_equal(lu.dtype, dtype)
assert_equal(p.shape, (k,))
assert_equal(p.dtype, np.int32)
@pytest.mark.parametrize(("m", "n"), [(0, 0), (0, 2), (2, 0)])
def test_empty(self, m, n):
a = np.zeros((m, n))
lu, p = lu_factor(a)
assert_allclose(lu, np.empty((m, n)))
assert_allclose(p, np.arange(0))
| TestLUFactor |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/utils/utils.py | {
"start": 25036,
"end": 27712
} | class ____(InfoJsonEncodable):
"""Defines encoding DAG object to JSON."""
includes = [
"dag_id",
"description",
"fileloc",
"owner",
"owner_links",
"schedule_interval", # For Airflow 2 only -> AF3 has timetable_summary
"start_date",
"tags",
]
casts = {
"timetable": lambda dag: DagInfo.serialize_timetable(dag),
"timetable_summary": lambda dag: DagInfo.timetable_summary(dag),
}
renames = {"_dag_id": "dag_id"}
@classmethod
def timetable_summary(cls, dag: DAG) -> str | None:
"""Extract summary from timetable if missing a ``timetable_summary`` property."""
if getattr(dag, "timetable_summary", None):
return dag.timetable_summary
if getattr(dag, "timetable", None):
return dag.timetable.summary
return None
@classmethod
def serialize_timetable(cls, dag: DAG) -> dict[str, Any]:
# This is enough for Airflow 2.10+ and has all the information needed
serialized = dag.timetable.serialize() or {}
# In Airflow 2.9 when using Dataset scheduling we do not receive datasets in serialized timetable
# Also for DatasetOrTimeSchedule, we only receive timetable without dataset_condition
if hasattr(dag, "dataset_triggers") and "dataset_condition" not in serialized:
try:
# Make sure we are in Airflow version where these are importable
from airflow.datasets import BaseDatasetEventInput, DatasetAll, DatasetAny
except ImportError:
log.warning("OpenLineage could not serialize full dag's timetable for dag `%s`.", dag.dag_id)
return serialized
def _serialize_ds(ds: BaseDatasetEventInput) -> dict[str, Any]:
if isinstance(ds, (DatasetAny, DatasetAll)):
return {
"__type": "dataset_all" if isinstance(ds, DatasetAll) else "dataset_any",
"objects": [_serialize_ds(child) for child in ds.objects],
}
return {"__type": "dataset", "uri": ds.uri, "extra": ds.extra}
if isinstance(dag.dataset_triggers, BaseDatasetEventInput):
serialized["dataset_condition"] = _serialize_ds(dag.dataset_triggers)
elif isinstance(dag.dataset_triggers, list) and len(dag.dataset_triggers):
serialized["dataset_condition"] = {
"__type": "dataset_all",
"objects": [_serialize_ds(trigger) for trigger in dag.dataset_triggers],
}
return serialized
| DagInfo |
python | getsentry__sentry | src/sentry/rules/conditions/event_frequency.py | {
"start": 21915,
"end": 31769
} | class ____(EventUniqueUserFrequencyCondition):
id = "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyConditionWithConditions"
label = "The issue is seen by more than {value} users in {interval} with conditions"
def query_hook(
self,
event: GroupEvent,
start: datetime,
end: datetime,
environment_id: int,
) -> int:
assert self.rule
if not features.has(
"organizations:event-unique-user-frequency-condition-with-conditions",
Project.objects.get(id=self.rule.project_id).organization,
):
raise NotImplementedError(
"EventUniqueUserFrequencyConditionWithConditions is not enabled for this organization"
)
if self.rule.data["filter_match"] == "any":
raise NotImplementedError(
"EventUniqueUserFrequencyConditionWithConditions does not support filter_match == any"
)
conditions = []
for condition in self.rule.data["conditions"]:
if condition["id"] == self.id:
continue
snuba_condition = self.convert_rule_condition_to_snuba_condition(condition)
if snuba_condition:
conditions.append(snuba_condition)
total = self.get_chunked_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
model=get_issue_tsdb_user_group_model(GroupCategory.ERROR),
organization_id=event.group.project.organization_id,
group_ids=[event.group.id],
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_uniq_user_frequency",
conditions=conditions,
group_on_time=False,
)
return total[event.group.id]
def batch_query_hook(
self,
group_ids: set[int],
start: datetime,
end: datetime,
environment_id: int,
group_on_time: bool = False,
) -> dict[int, int | float]:
logger = logging.getLogger(
"sentry.rules.event_frequency.EventUniqueUserFrequencyConditionWithConditions"
)
logger.info(
"batch_query_hook_start",
extra={
"group_ids": group_ids,
"start": start,
"end": end,
"environment_id": environment_id,
},
)
assert self.rule
if not features.has(
"organizations:event-unique-user-frequency-condition-with-conditions",
self.rule.project.organization,
):
raise NotImplementedError(
"EventUniqueUserFrequencyConditionWithConditions is not enabled for this organization"
)
if self.rule.data["filter_match"] == "any":
raise NotImplementedError(
"EventUniqueUserFrequencyConditionWithConditions does not support filter_match == any"
)
batch_totals: dict[int, int | float] = defaultdict(int)
groups = Group.objects.filter(id__in=group_ids).values(
"id", "type", "project_id", "project__organization_id"
)
error_issue_ids, generic_issue_ids = self.get_error_and_generic_group_ids(groups)
organization_id = self.get_value_from_groups(groups, "project__organization_id")
conditions = []
for condition in self.rule.data["conditions"]:
if condition["id"] == self.id:
continue
snuba_condition = self.convert_rule_condition_to_snuba_condition(condition)
if snuba_condition:
conditions.append(snuba_condition)
logger.info(
"batch_query_hook_conditions",
extra={"conditions": conditions},
)
if error_issue_ids and organization_id:
error_totals = self.get_chunked_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
model=get_issue_tsdb_user_group_model(GroupCategory.ERROR),
group_ids=error_issue_ids,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_uniq_user_frequency",
conditions=conditions,
group_on_time=group_on_time,
)
batch_totals.update(error_totals)
if generic_issue_ids and organization_id:
error_totals = self.get_chunked_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
model=get_issue_tsdb_user_group_model(GroupCategory.PERFORMANCE),
group_ids=generic_issue_ids,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_uniq_user_frequency",
conditions=conditions,
group_on_time=group_on_time,
)
batch_totals.update(error_totals)
logger.info(
"batch_query_hook_end",
extra={"batch_totals": batch_totals},
)
return batch_totals
def get_snuba_query_result(
self,
tsdb_function: Callable[..., Any],
keys: list[int],
group_id: int,
organization_id: int,
model: TSDBModel,
start: datetime,
end: datetime,
environment_id: int,
referrer_suffix: str,
group_on_time: bool = False,
project_ids: list[int] | None = None,
conditions: list[tuple[str, str, str | list[str]]] | None = None,
) -> Mapping[int, int]:
result: Mapping[int, int] = tsdb_function(
model=model,
keys=keys,
start=start,
end=end,
environment_id=environment_id,
use_cache=True,
jitter_value=group_id,
tenant_ids={"organization_id": organization_id},
referrer_suffix=referrer_suffix,
conditions=conditions,
group_on_time=group_on_time,
)
return result
def get_chunked_result(
self,
tsdb_function: Callable[..., Any],
model: TSDBModel,
group_ids: list[int],
organization_id: int,
start: datetime,
end: datetime,
environment_id: int,
referrer_suffix: str,
group_on_time: bool = False,
project_ids: list[int] | None = None,
conditions: list[tuple[str, str, str | list[str]]] | None = None,
) -> dict[int, int]:
batch_totals: dict[int, int] = defaultdict(int)
group_id = group_ids[0]
for group_chunk in chunked(group_ids, SNUBA_LIMIT):
result = self.get_snuba_query_result(
tsdb_function=tsdb_function,
model=model,
keys=[group_id for group_id in group_chunk],
group_id=group_id,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix=referrer_suffix,
project_ids=project_ids,
conditions=conditions,
group_on_time=group_on_time,
)
batch_totals.update(result)
return batch_totals
@staticmethod
def convert_rule_condition_to_snuba_condition(
condition: dict[str, Any],
) -> tuple[str, str, str | list[str]] | None:
if condition["id"] != "sentry.rules.filters.tagged_event.TaggedEventFilter":
return None
lhs = f"tags[{condition['key']}]"
rhs = condition["value"]
match condition["match"]:
case MatchType.EQUAL:
operator = Op.EQ
case MatchType.NOT_EQUAL:
operator = Op.NEQ
case MatchType.STARTS_WITH:
operator = Op.LIKE
rhs = f"{rhs}%"
case MatchType.NOT_STARTS_WITH:
operator = Op.NOT_LIKE
rhs = f"{rhs}%"
case MatchType.ENDS_WITH:
operator = Op.LIKE
rhs = f"%{rhs}"
case MatchType.NOT_ENDS_WITH:
operator = Op.NOT_LIKE
rhs = f"%{rhs}"
case MatchType.CONTAINS:
operator = Op.LIKE
rhs = f"%{rhs}%"
case MatchType.NOT_CONTAINS:
operator = Op.NOT_LIKE
rhs = f"%{rhs}%"
case MatchType.IS_SET:
operator = Op.IS_NOT_NULL
rhs = None
case MatchType.NOT_SET:
operator = Op.IS_NULL
rhs = None
case MatchType.IS_IN:
operator = Op.IN
rhs = rhs.split(",")
case MatchType.NOT_IN:
operator = Op.NOT_IN
rhs = rhs.split(",")
case _:
raise ValueError(f"Unsupported match type: {condition['match']}")
return (lhs, operator.value, rhs)
PERCENT_INTERVALS: dict[str, tuple[str, timedelta]] = {
"1m": ("1 minute", timedelta(minutes=1)),
"5m": ("5 minutes", timedelta(minutes=5)),
"10m": ("10 minutes", timedelta(minutes=10)),
"30m": ("30 minutes", timedelta(minutes=30)),
"1h": ("1 hour", timedelta(minutes=60)),
}
PERCENT_INTERVALS_TO_DISPLAY: dict[str, tuple[str, timedelta]] = {
"5m": ("5 minutes", timedelta(minutes=5)),
"10m": ("10 minutes", timedelta(minutes=10)),
"30m": ("30 minutes", timedelta(minutes=30)),
"1h": ("1 hour", timedelta(minutes=60)),
}
MIN_SESSIONS_TO_FIRE = 50
| EventUniqueUserFrequencyConditionWithConditions |
python | getsentry__sentry | src/sentry/integrations/api/bases/external_actor.py | {
"start": 6641,
"end": 7264
} | class ____(ExternalActorSerializerBase):
_actor_key = "team_id"
team_id = serializers.IntegerField(required=True)
def validate_team_id(self, team_id: int) -> Team:
"""Ensure that this team exists and that they belong to the organization."""
try:
return Team.objects.get(id=team_id, organization=self.organization)
except Team.DoesNotExist:
raise serializers.ValidationError("This team does not exist.")
class Meta:
model = ExternalActor
fields = ["team_id", "external_id", "external_name", "provider", "integration_id"]
| ExternalTeamSerializer |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/base.py | {
"start": 2139,
"end": 47204
} | class ____(BaseReader, DispatcherSpanMixin):
"""
Confluence reader.
Reads a set of confluence pages given a space key and optionally a list of page ids
For more on OAuth login, checkout:
- https://atlassian-python-api.readthedocs.io/index.html
- https://developer.atlassian.com/cloud/confluence/oauth-2-3lo-apps/
Args:
oauth2 (dict): Atlassian OAuth 2.0, minimum fields are `client_id` and `token`, where `token` is a dict and must at least contain "access_token" and "token_type".
base_url (str): 'base_url' for confluence cloud instance, this is suffixed with '/wiki', eg 'https://yoursite.atlassian.com/wiki'
cloud (bool): connecting to Confluence Cloud or self-hosted instance
api_token (str): Confluence API token, see https://confluence.atlassian.com/cloud/api-tokens-938839638.html
cookies (dict): Confluence cookies, see https://atlassian-python-api.readthedocs.io/index.html
user_name (str): Confluence username, used for basic auth. Must be used with `password`.
password (str): Confluence password, used for basic auth. Must be used with `user_name`.
client_args (dict): Additional keyword arguments to pass directly to the Atlassian Confluence client constructor, for example `{'backoff_and_retry': True}`.
custom_parsers (dict): Optional custom parsers for different file types. Maps FileType enum values to BaseReader instances.
process_attachment_callback (callable): Optional callback function to determine whether to process an attachment. Should return tuple[bool, str] where bool indicates whether to process and str provides reason if not.
process_document_callback (callable): Optional callback function to determine whether to process a document. Should return bool indicating whether to process.
custom_folder (str): Optional custom folder path for storing temporary files. Can only be used when custom_parsers are provided. Defaults to current working directory if custom_parsers are used.
logger (Logger): Optional custom logger instance. If not provided, uses internal logger.
fail_on_error (bool): Whether to raise exceptions on processing errors or continue with warnings. Default is True.
Instrumentation Events:
The ConfluenceReader uses LlamaIndex's instrumentation system to emit events during document and attachment processing.
These events can be captured by adding event handlers to the dispatcher.
Available events:
- TotalPagesToProcessEvent: Emitted when the total number of pages to process is determined
- PageDataFetchStartedEvent: Emitted when processing of a page begins
- PageDataFetchCompletedEvent: Emitted when a page is successfully processed
- PageFailedEvent: Emitted when page processing fails
- PageSkippedEvent: Emitted when a page is skipped due to callback decision
- AttachmentProcessingStartedEvent: Emitted when attachment processing begins
- AttachmentProcessedEvent: Emitted when an attachment is successfully processed
- AttachmentSkippedEvent: Emitted when an attachment is skipped
- AttachmentFailedEvent: Emitted when attachment processing fails
To listen to events, add an event handler to the dispatcher:
```python
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.event_handlers import BaseEventHandler
class MyEventHandler(BaseEventHandler):
def handle(self, event):
print(f"Event: {event.class_name()}")
dispatcher = get_dispatcher(__name__)
dispatcher.add_event_handler(MyEventHandler())
```
"""
def __init__(
self,
base_url: str = None,
oauth2: Optional[Dict] = None,
cloud: bool = True,
api_token: Optional[str] = None,
cookies: Optional[dict] = None,
user_name: Optional[str] = None,
password: Optional[str] = None,
client_args: Optional[dict] = None,
custom_parsers: Optional[Dict[FileType, BaseReader]] = None,
process_attachment_callback: Optional[
Callable[[str, int], tuple[bool, str]]
] = None,
process_document_callback: Optional[Callable[[str], bool]] = None,
custom_folder: Optional[str] = None,
logger: Optional[logging.Logger] = None,
fail_on_error: bool = True,
) -> None:
if base_url is None:
raise ValueError("Must provide `base_url`")
self.base_url = base_url
self.custom_parsers = custom_parsers or {}
# Only set custom_folder if custom_parsers are provided
if custom_parsers and custom_folder:
self.custom_folder = custom_folder
elif custom_parsers:
self.custom_folder = os.getcwd()
elif custom_folder:
raise ValueError(
"custom_folder can only be used when custom_parsers are provided"
)
else:
self.custom_folder = None
self.logger = logger or internal_logger
self.process_attachment_callback = process_attachment_callback
self.process_document_callback = process_document_callback
self.fail_on_error = fail_on_error
try:
from atlassian import Confluence
except ImportError:
raise ImportError(
"`atlassian` package not found, please run `pip install atlassian-python-api`"
)
self.confluence: Confluence = None
if client_args is None:
client_args = {}
if oauth2:
self.confluence = Confluence(
url=base_url, oauth2=oauth2, cloud=cloud, **client_args
)
else:
if api_token is not None:
self.confluence = Confluence(
url=base_url, token=api_token, cloud=cloud, **client_args
)
elif cookies is not None:
self.confluence = Confluence(
url=base_url, cookies=cookies, cloud=cloud, **client_args
)
elif user_name is not None and password is not None:
self.confluence = Confluence(
url=base_url,
username=user_name,
password=password,
cloud=cloud,
**client_args,
)
else:
api_token = os.getenv(CONFLUENCE_API_TOKEN)
if api_token is not None:
self.confluence = Confluence(
url=base_url, token=api_token, cloud=cloud, **client_args
)
else:
user_name = os.getenv(CONFLUENCE_USERNAME)
password = os.getenv(CONFLUENCE_PASSWORD)
if user_name is not None and password is not None:
self.confluence = Confluence(
url=base_url,
username=user_name,
password=password,
cloud=cloud,
**client_args,
)
else:
raise ValueError(
"Must set one of environment variables `CONFLUENCE_API_KEY`, or"
" `CONFLUENCE_USERNAME` and `CONFLUENCE_PASSWORD`, if oauth2, or"
" api_token, or user_name and password parameters are not provided"
)
self._next_cursor = None
if custom_parsers:
self.custom_parser_manager = CustomParserManager(
custom_parsers, self.custom_folder
)
else:
self.custom_parser_manager = None
def _format_attachment_header(self, attachment: dict) -> str:
"""Formats the attachment title as a markdown header."""
return f"# {attachment['title']}\n"
@dispatcher.span
def load_data(
self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
page_status: Optional[str] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_attachments=False,
include_children=False,
start: Optional[int] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
max_num_results: Optional[int] = None,
) -> List[Document]:
"""
Load Confluence pages from Confluence, specifying by one of four mutually exclusive methods:
`space_key`, `page_ids`, `label`, `folder_id` or `cql`
(Confluence Query Language https://developer.atlassian.com/cloud/confluence/advanced-searching-using-cql/ ).
Args:
space_key (str): Confluence space key, eg 'DS'
page_ids (list): List of page ids, eg ['123456', '123457']
folder_id (str): Confluence folder id, eg '1234567890'
page_status (str): Page status, one of None (all statuses), 'current', 'draft', 'archived'. Only compatible with space_key.
label (str): Confluence label, eg 'my-label'
cql (str): Confluence Query Language query, eg 'label="my-label"'
include_attachments (bool): If True, include attachments.
include_children (bool): If True, do a DFS of the descendants of each page_id in `page_ids`. Only compatible with `page_ids`.
start (int): Skips over the first n elements. Used only with space_key
cursor (str): Skips to the cursor. Used with cql and label, set when the max limit has been hit for cql based search
limit (int): Deprecated, use `max_num_results` instead.
max_num_results (int): Maximum number of results to return. If None, return all results. Requests are made in batches to achieve the desired number of results.
"""
num_space_key_parameter = 1 if space_key else 0
num_page_ids_parameter = 1 if page_ids is not None else 0
num_label_parameter = 1 if label else 0
num_cql_parameter = 1 if cql else 0
num_folder_id_parameter = 1 if folder_id else 0
if (
num_space_key_parameter
+ num_page_ids_parameter
+ num_label_parameter
+ num_cql_parameter
+ num_folder_id_parameter
!= 1
):
raise ValueError(
"Must specify exactly one among `space_key`, `page_ids`, `label`, `cql` parameters."
)
if cursor and start:
raise ValueError("Must not specify `start` when `cursor` is specified")
if space_key and cursor:
raise ValueError("Must not specify `cursor` when `space_key` is specified")
if page_status and not space_key:
raise ValueError(
"Must specify `space_key` when `page_status` is specified."
)
if include_children and not page_ids:
raise ValueError(
"Must specify `page_ids` when `include_children` is specified."
)
if limit is not None:
max_num_results = limit
self.logger.warning(
"`limit` is deprecated and no longer relates to the Confluence server's"
" API limits. If you wish to limit the number of returned results"
" please use `max_num_results` instead."
)
from .html_parser import HtmlTextParser
text_maker = HtmlTextParser()
if not start:
start = 0
pages: List = []
if space_key:
pages.extend(
self._get_data_with_paging(
self.confluence.get_all_pages_from_space,
start=start,
max_num_results=max_num_results,
space=space_key,
status=page_status,
expand="body.export_view.value",
content_type="page",
)
)
elif label:
pages.extend(
self._get_cql_data_with_paging(
start=start,
cursor=cursor,
cql=f'type="page" AND label="{label}"',
max_num_results=max_num_results,
expand="body.export_view.value",
)
)
elif cql:
pages.extend(
self._get_cql_data_with_paging(
start=start,
cursor=cursor,
cql=cql,
max_num_results=max_num_results,
expand="body.export_view.value",
)
)
elif page_ids:
if include_children:
dfs_page_ids = []
max_num_remaining = max_num_results
for page_id in page_ids:
current_dfs_page_ids = self._dfs_page_ids(
page_id,
type="page",
max_num_results=max_num_remaining,
)
dfs_page_ids.extend(current_dfs_page_ids)
if max_num_results is not None:
max_num_remaining -= len(current_dfs_page_ids)
if max_num_remaining <= 0:
break
page_ids = dfs_page_ids
for page_id in (
page_ids[:max_num_results] if max_num_results is not None else page_ids
):
pages.append(
self._get_data_with_retry(
self.confluence.get_page_by_id,
page_id=page_id,
expand="body.export_view.value",
)
)
elif folder_id:
# Fetch all folders in the folder
max_num_remaining = max_num_results
page_ids = self._dfs_page_ids(
folder_id,
type="folder",
max_num_results=max_num_remaining,
)
for page_id in (
page_ids[:max_num_results] if max_num_results is not None else page_ids
):
pages.append(
self._get_data_with_retry(
self.confluence.get_page_by_id,
page_id=page_id,
expand="body.export_view.value",
)
)
docs = []
if pages:
dispatcher.event(TotalPagesToProcessEvent(total_pages=len(pages)))
for page in pages:
try:
doc = self.process_page(page, include_attachments, text_maker)
if doc:
docs.append(doc)
except Exception as e:
self.logger.error(f"Error processing page: {e}")
dispatcher.event(PageFailedEvent(page_id=page["id"], error=str(e)))
if self.fail_on_error:
raise
else:
self.logger.warning(
f"Failed to process page {page['id']}: {e}. Skipping this page."
)
continue
return docs
def _dfs_page_ids(self, id, type="page", max_num_results=None):
ret = []
max_num_remaining = max_num_results
if type == "page":
ret.append(id)
if max_num_remaining is not None:
max_num_remaining -= 1
if max_num_remaining < 0:
return ret
# Fetch both page and folder children with their types
child_items = [
(child_id, "page")
for child_id in self._get_data_with_paging(
self.confluence.get_child_id_list,
page_id=id,
type="page",
max_num_results=max_num_remaining,
)
]
if self.confluence.cloud:
child_items.extend(
[
(child_id, "folder")
for child_id in self._get_data_with_paging(
self.confluence.get_child_id_list,
page_id=id,
type="folder",
max_num_results=max_num_remaining,
)
]
)
for child_id, child_type in child_items:
if max_num_remaining is not None and max_num_remaining <= 0:
break
dfs_ids = self._dfs_page_ids(
child_id, type=child_type, max_num_results=max_num_remaining
)
ret.extend(dfs_ids)
if max_num_remaining is not None:
max_num_remaining -= len(dfs_ids)
if max_num_remaining <= 0:
break
return ret
def _get_data_with_paging(
self, paged_function, start=0, max_num_results=50, **kwargs
):
max_num_remaining = max_num_results
ret = []
while True:
results = self._get_data_with_retry(
paged_function, start=start, limit=max_num_remaining, **kwargs
)
ret.extend(results)
if (
len(results) == 0
or max_num_results is not None
and len(results) >= max_num_remaining
):
break
start += len(results)
if max_num_remaining is not None:
max_num_remaining -= len(results)
return ret
def _get_cql_data_with_paging(
self,
cql,
start=0,
cursor=None,
max_num_results=50,
expand="body.export_view.value",
):
max_num_remaining = max_num_results
ret = []
params = {"cql": cql, "start": start, "expand": expand}
if cursor:
params["cursor"] = unquote(cursor)
if max_num_results is not None:
params["limit"] = max_num_remaining
while True:
results = self._get_data_with_retry(
self.confluence.get, path="rest/api/content/search", params=params
)
ret.extend(results["results"])
params["start"] += len(results["results"])
next_url = (
results["_links"]["next"] if "next" in results["_links"] else None
)
if not next_url:
self._next_cursor = None
break
if "cursor=" in next_url: # On confluence Server this is not set
cursor = next_url.split("cursor=")[1].split("&")[0]
params["cursor"] = unquote(cursor)
if max_num_results is not None:
params["limit"] -= len(results["results"])
if params["limit"] <= 0:
self._next_cursor = cursor
break
return ret
def get_next_cursor(self):
"""
Returns: The last set cursor from a cql based search.
"""
return self._next_cursor
@retry(stop_max_attempt_number=1, wait_fixed=4)
def _get_data_with_retry(self, function, **kwargs):
return function(**kwargs)
@dispatcher.span
def process_page(self, page, include_attachments, text_maker):
self.logger.info("Processing " + self.base_url + page["_links"]["webui"])
if self.process_document_callback:
should_process = self.process_document_callback(page["id"])
if not should_process:
self.logger.info(
f"Skipping page {page['id']} based on callback decision."
)
dispatcher.event(PageSkippedEvent(page_id=page["id"]))
return None
dispatcher.event(PageDataFetchStartedEvent(page_id=page["id"]))
if include_attachments:
attachment_texts = self.process_attachment(page["id"])
else:
attachment_texts = []
if FileType.HTML in self.custom_parsers and self.custom_folder:
html_text = page["body"]["export_view"]["value"]
# save in temporary file
file_location = None
with tempfile.NamedTemporaryFile(
mode="w",
suffix=".html",
encoding="utf-8",
dir=self.custom_folder,
delete=False,
) as f:
f.write(html_text)
file_location = f.name
try:
text = (
page["title"]
+ "\n"
+ "\n".join(
doc.text
for doc in self.custom_parsers[FileType.HTML].load_data(
file_path=file_location
)
)
+ "\n"
+ "\n".join(attachment_texts)
)
finally:
try:
os.unlink(file_location)
except OSError:
pass
else:
text = text_maker.convert(page["body"]["export_view"]["value"]) + "".join(
attachment_texts
)
doc = Document(
text=text,
doc_id=page["id"],
extra_info={
"title": page["title"],
"page_id": page["id"],
"status": page["status"],
"url": self.base_url + page["_links"]["webui"],
},
)
dispatcher.event(PageDataFetchCompletedEvent(page_id=page["id"], document=doc))
return doc
@dispatcher.span
def process_attachment(self, page_id):
try:
pass
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` or `Pillow` package not found, please run `pip install pytesseract pdf2image Pillow`"
)
# depending on setup you may also need to set the correct path for poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
if not attachments:
return texts
for attachment in attachments:
self.logger.info("Processing attachment " + attachment["title"])
dispatcher.event(
AttachmentProcessingStartedEvent(
page_id=page_id,
attachment_id=attachment["id"],
attachment_name=attachment["title"],
attachment_type=attachment["metadata"]["mediaType"],
attachment_size=attachment["extensions"]["fileSize"],
attachment_link=attachment["_links"]["webui"],
)
)
if self.process_attachment_callback:
should_process, reason = self.process_attachment_callback(
attachment["metadata"]["mediaType"],
attachment["extensions"]["fileSize"],
attachment["title"],
)
if not should_process:
self.logger.info(
f"Skipping attachment {attachment['title']} based on callback decision."
)
dispatcher.event(
AttachmentSkippedEvent(
page_id=page_id,
attachment_id=attachment["id"],
attachment_name=attachment["title"],
attachment_type=attachment["metadata"]["mediaType"],
attachment_size=attachment["extensions"]["fileSize"],
attachment_link=attachment["_links"]["webui"],
reason=reason,
)
)
continue
try:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = self._format_attachment_header(attachment)
if media_type == "application/pdf":
self.logger.info("Processing PDF attachment " + absolute_url)
text = title + self.process_pdf(absolute_url)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
or media_type == "image/webp"
):
self.logger.info("Processing image attachment " + absolute_url)
text = title + self.process_image(absolute_url)
elif (
media_type
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
):
self.logger.info(
"Processing Word document attachment " + absolute_url
)
text = title + self.process_doc(absolute_url)
elif (
media_type == "application/vnd.ms-excel"
or media_type
== "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
or media_type == "application/vnd.ms-excel.sheet.macroenabled.12"
):
if attachment["title"].endswith(".csv") or absolute_url.endswith(
".csv"
):
self.logger.info("Processing CSV attachment " + absolute_url)
text = title + self.process_csv(absolute_url)
else:
self.logger.info("Processing XLS attachment " + absolute_url)
text = title + self.process_xls(absolute_url)
elif (
media_type
== "application/vnd.ms-excel.sheet.binary.macroenabled.12"
):
self.logger.info("Processing XLSB attachment " + absolute_url)
text = title + self.process_xlsb(absolute_url)
elif media_type == "text/csv":
self.logger.info("Processing CSV attachment " + absolute_url)
text = title + self.process_csv(absolute_url)
elif media_type == "application/vnd.ms-outlook":
self.logger.info(
"Processing Outlook message attachment " + absolute_url
)
text = title + self.process_msg(absolute_url)
elif media_type == "text/html":
self.logger.info(" Processing HTML attachment " + absolute_url)
text = title + self.process_html(absolute_url)
elif media_type == "text/plain":
if attachment["title"].endswith(".csv") or absolute_url.endswith(
".csv"
):
self.logger.info("Processing CSV attachment " + absolute_url)
text = title + self.process_csv(absolute_url)
else:
self.logger.info("Processing Text attachment " + absolute_url)
text = title + self.process_txt(absolute_url)
elif media_type == "text/markdown" or absolute_url.endswith(
(".md", ".mdx")
):
self.logger.info("Processing Markdown attachment " + absolute_url)
text = title + self.process_txt(absolute_url)
elif media_type == "image/svg+xml":
self.logger.info("Processing SVG attachment " + absolute_url)
text = title + self.process_svg(absolute_url)
elif (
media_type
== "application/vnd.openxmlformats-officedocument.presentationml.presentation"
or media_type
== "application/vnd.ms-powerpoint.presentation.macroenabled.12"
):
self.logger.info(
"Processing PowerPoint attachment "
+ absolute_url
+ " ("
+ media_type
+ ")"
)
text = title + self.process_ppt(absolute_url)
elif media_type == "binary/octet-stream" and (
attachment["title"].strip().endswith(".md")
or attachment["title"].strip().endswith(".mdx")
):
self.logger.info("Processing Markdown attachment " + absolute_url)
text = title + self.process_txt(absolute_url)
else:
self.logger.info(
f"Skipping unsupported attachment {absolute_url} of media_type {media_type}"
)
dispatcher.event(
AttachmentSkippedEvent(
page_id=page_id,
attachment_id=attachment["id"],
attachment_name=attachment["title"],
attachment_type=attachment["metadata"]["mediaType"],
attachment_size=attachment["extensions"]["fileSize"],
attachment_link=attachment["_links"]["webui"],
reason="Unsupported media type",
)
)
continue
texts.append(text)
dispatcher.event(
AttachmentProcessedEvent(
page_id=page_id,
attachment_id=attachment["id"],
attachment_name=attachment["title"],
attachment_type=attachment["metadata"]["mediaType"],
attachment_size=attachment["extensions"]["fileSize"],
attachment_link=attachment["_links"]["webui"],
)
)
except Exception as e:
self.logger.error(
f"Failed to process attachment {attachment['title']}: {e}"
)
dispatcher.event(
AttachmentFailedEvent(
page_id=page_id,
attachment_id=attachment["id"],
attachment_name=attachment["title"],
attachment_type=attachment["metadata"]["mediaType"],
attachment_size=attachment["extensions"]["fileSize"],
attachment_link=attachment["_links"]["webui"],
error=str(e),
)
)
return texts
def process_pdf(self, link):
if FileType.PDF not in self.custom_parsers:
try:
import pytesseract # type: ignore
from pdf2image import convert_from_bytes # type: ignore
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
if FileType.PDF in self.custom_parsers and self.custom_parser_manager:
return self.custom_parser_manager.process_with_custom_parser(
FileType.PDF, response.content, "pdf"
)
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
def process_html(self, link):
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` or `requests` package not found, please run `pip install beautifulsoup4 requests`"
)
try:
response = self.confluence.request(path=link, absolute=True)
if response.status_code != 200:
return "Error fetching HTML content: HTTP Status Code {}".format(
response.status_code
)
if FileType.HTML in self.custom_parsers and self.custom_parser_manager:
return self.custom_parser_manager.process_with_custom_parser(
FileType.HTML, response.content, "html"
)
# Parse the HTML content and extract text
soup = BeautifulSoup(response.content, "html.parser")
return soup.get_text(separator=" ", strip=True)
except Exception as e:
self.logger.error(f"Error processing HTML file at {link}: {e}")
return f"Error processing HTML file: {link}. An error occurred while fetching or parsing the content."
def process_txt(self, link):
try:
response = self.confluence.request(path=link, absolute=True)
if response.status_code != 200:
return "Error fetching text content: HTTP Status Code {}".format(
response.status_code
)
return response.text
except Exception as e:
self.logger.error(f"Error processing text file at {link}: {e}")
return f"Error processing text file: {link}. An error occurred while fetching the content."
def process_msg(self, link):
try:
import extract_msg # type: ignore
except ImportError:
raise ImportError(
"`extract-msg` package not found, please run `pip install extract-msg`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if response.status_code != 200 or response.content in [b"", None]:
self.logger.error(f"Failed to download .msg file from {link}")
return text
file_data = BytesIO(response.content)
try:
# Load the .msg file content
with extract_msg.Message(file_data) as msg:
subject = msg.subject
sender = msg.sender
to = msg.to
cc = msg.cc
body = msg.body
# Compile the extracted information into a text string
text = (
f"Subject: {subject}\nFrom: {sender}\nTo: {to}\nCC: {cc}\n\n{body}"
)
except Exception as e:
self.logger.error(f"Error processing .msg file at {link}: {e}")
return "Error processing .msg file."
return text
def process_image(self, link):
try:
import pytesseract # type: ignore
from PIL import Image # type: ignore
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, please run `pip install pytesseract Pillow`"
)
text = ""
try:
response = self.confluence.request(path=link, absolute=True)
# Check if the response status code indicates success (200 OK)
if response.status_code == 200 and response.content:
try:
image = Image.open(BytesIO(response.content))
text = pytesseract.image_to_string(image)
except OSError:
# Handle errors that occur while opening or processing the image
self.logger.error(
f"Error processing image at {link}: Unable to open or read the image content."
)
return text
else:
# Log non-200 responses here if needed
self.logger.error(
f"Error fetching image at {link}: HTTP status code {response.status_code}."
)
return text
except requests.exceptions.RequestException as e:
# This catches any Requests-related exceptions, including HTTPError, ConnectionError, etc.
self.logger.error(f"Request error while fetching image at {link}: {e}")
return text
return text
def process_doc(self, link):
try:
import zipfile # Import zipfile to catch BadZipFile exceptions
except ImportError:
raise ImportError("Failed to import BytesIO from io")
if not self.custom_parsers.get(FileType.DOCUMENT):
try:
import docx2txt
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
text = ""
try:
response = self.confluence.request(path=link, absolute=True)
if response.status_code != 200 or response.content in [b"", None]:
self.logger.error(
f"Error fetching document at {link}: HTTP status code {response.status_code}."
)
return text
file_data = BytesIO(response.content)
# save in file
# Use custom parser if available
if FileType.DOCUMENT in self.custom_parsers and self.custom_parser_manager:
return self.custom_parser_manager.process_with_custom_parser(
FileType.DOCUMENT, file_data.getbuffer(), "docx"
)
try:
text = docx2txt.process(file_data)
except zipfile.BadZipFile:
self.logger.error(
f"Error processing Word document at {link}: File is not a zip file."
)
return text
except Exception as e:
self.logger.error(f"Unexpected error processing document at {link}: {e}")
return text
return text
def process_ppt(self, link):
if not self.custom_parsers.get(FileType.PRESENTATION):
try:
from pptx import Presentation # type: ignore
except ImportError:
raise ImportError(
"`python-pptx` package not found, please run `pip install python-pptx`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
if not file_data:
self.logger.error(
f"Error processing PowerPoint file at {link}: Empty content."
)
return text
if FileType.PRESENTATION in self.custom_parsers and self.custom_parser_manager:
return self.custom_parser_manager.process_with_custom_parser(
FileType.PRESENTATION, file_data.getbuffer(), "pptx"
)
# Check if the response content is empty
try:
presentation = Presentation(file_data)
for slide in presentation.slides:
for shape in slide.shapes:
if hasattr(shape, "text"):
text += shape.text + " "
except (
Exception
) as e: # Catching a general exception to handle any unexpected errors
self.logger.error(f"Error processing PowerPoint file at {link}: {e}")
text = f"Error processing PowerPoint file: {link}. The file might be corrupt or not a valid PowerPoint file."
return text.strip() # Remove any leading/trailing whitespace
def process_xls(self, link):
try:
import pandas as pd # type: ignore
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
if FileType.SPREADSHEET in self.custom_parsers and self.custom_parser_manager:
return self.custom_parser_manager.process_with_custom_parser(
FileType.SPREADSHEET, file_data.getbuffer(), "xlsx"
)
# Try to read the Excel file
try:
# Use pandas to read all sheets; returns a dict of DataFrame
sheets = pd.read_excel(file_data, sheet_name=None, engine="openpyxl")
except Exception as e:
return f"Failed to read Excel file: {e!s}"
for sheet_name, sheet_data in sheets.items():
text += f"{sheet_name}:\n"
for row_index, row in sheet_data.iterrows():
text += "\t".join(str(value) for value in row) + "\n"
text += "\n"
return text.strip()
def process_xlsb(self, link):
try:
import pandas as pd
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
try:
# Use pandas to read the .xlsb file, specifying pyxlsb as the engine
df = pd.read_excel(file_data, engine="pyxlsb")
# Convert the DataFrame to a text string
text_rows = []
for index, row in df.iterrows():
text_rows.append(", ".join(row.astype(str)))
text = "\n".join(text_rows)
except Exception as e:
self.logger.error(f"Error processing XLSB file at {link}: {e}")
text = "Error processing XLSB file."
return text
def process_csv(self, link):
try:
import pandas as pd
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
try:
# Assuming CSV uses default comma delimiter. If delimiter varies, consider detecting it.
df = pd.read_csv(file_data, low_memory=False)
# Convert the DataFrame to a text string, including headers
text_rows = []
for index, row in df.iterrows():
text_rows.append(", ".join(row.astype(str)))
text = "\n".join(text_rows)
except Exception as e:
self.logger.error(f"Error processing CSV file: {e}")
text = "Error processing CSV file."
return text
def process_svg(self, link):
try:
import pytesseract # type: ignore
from PIL import Image # type: ignore
from reportlab.graphics import renderPM # type: ignore
from svglib.svglib import svg2rlg # type: ignore
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, or `svglib` package not found, please run `pip install pytesseract Pillow svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image)
if __name__ == "__main__":
reader = ConfluenceReader()
| ConfluenceReader |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 145369,
"end": 148601
} | class ____(Layout):
def __init__(self, target: IRNode) -> None:
super().__init__(
target.get_device_or_error(),
target.get_dtype(),
target.get_size(),
None,
)
self.target = target
name = self.get_buffer().get_name()
V.graph.mark_buffer_mutated(name)
@property
def stride(self) -> Sequence[Expr]: # type: ignore[override]
return self.real_layout().stride
@stride.setter # type: ignore[override]
def stride(self, value: Never) -> None:
pass # ignore setting of stride
def storage_size(self) -> Expr:
return self.real_layout().storage_size()
def get_buffer(self) -> Buffer:
def unwrap_views(target: Any) -> Any:
if isinstance(target, MutationLayoutSHOULDREMOVE):
return unwrap_views(target.target)
if isinstance(target, BaseView):
return unwrap_views(target.unwrap_view())
if isinstance(target, MutableBox):
return unwrap_views(target.data)
return target
result = unwrap_views(self.target)
assert isinstance(result, Buffer), type(result)
return result
def real_layout(self) -> Layout:
layout = self.get_buffer().layout
assert isinstance(layout, Layout)
return layout
@classmethod
def realize_into(
cls, src: IRNode, dst: IRNode, unsafe_alias: bool = False
) -> IRNode:
dst.realize()
# NOTE: We must realize users of `dst` before we realize `src`, since
# realization order determines scheduling order. Otherwise, src's
# mutation would be scheduled before the existing users of dst!
V.graph.mark_buffer_mutated(dst.get_name())
if isinstance(src, TensorBox):
src = src.data
# We copy the contents of src into dst. In most cases this should
# be fused into a single kernel by the scheduler.
# NOTE: We cannot change src's layout to mutate dst directly as this
# would alias src to dst, which is not correct as further mutations to
# dst would effect users of src. However if there are no more users of
# dst, we can alias src to dst.
src.realize_hint()
if not unsafe_alias:
node = Pointwise.create(
device=src.get_device(),
dtype=src.get_dtype(),
inner_fn=src.make_loader(),
ranges=[
V.graph.sizevars.check_equals_and_simplify(a, b)
for a, b in zip(src.get_size(), dst.get_size())
],
)
assert isinstance(node, (BaseView, MutableBox))
src = node.data
src.realize()
assert hasattr(src, "data"), src
assert isinstance(src.data.layout, FlexibleLayout), type(src.data.layout)
src.data.layout = MutationLayoutSHOULDREMOVE(dst)
return src.data
def as_fixed(self) -> Self: # type: ignore[override]
return self
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
return self.target.make_indexer()
@ir_dataclass(frozen=False)
| MutationLayoutSHOULDREMOVE |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 1319,
"end": 1367
} | class ____(ASTNode):
value: str
@dataclass
| Hex |
python | kamyu104__LeetCode-Solutions | Python/top-k-frequent-elements.py | {
"start": 50,
"end": 750
} | class ____(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
counts = collections.Counter(nums)
buckets = [[] for _ in xrange(len(nums)+1)]
for i, count in counts.iteritems():
buckets[count].append(i)
result = []
for i in reversed(xrange(len(buckets))):
for j in xrange(len(buckets[i])):
result.append(buckets[i][j])
if len(result) == k:
return result
return result
# Time: O(n) ~ O(n^2), O(n) on average.
# Space: O(n)
# Quick Select Solution
from random import randint
| Solution |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 8643,
"end": 9773
} | class ____(Protocol[ContextT]):
"""Extra data used during validation."""
@property
def context(self) -> ContextT:
"""The current validation context."""
...
@property
def config(self) -> CoreConfig | None:
"""The CoreConfig that applies to this validation."""
...
@property
def mode(self) -> Literal['python', 'json']:
"""The type of input data we are currently validating."""
...
@property
def data(self) -> dict[str, Any]:
"""The data being validated for this model."""
...
@property
def field_name(self) -> str | None:
"""
The name of the current field being validated if this validator is
attached to a model field.
"""
...
ExpectedSerializationTypes = Literal[
'none',
'int',
'bool',
'float',
'str',
'bytes',
'bytearray',
'list',
'tuple',
'set',
'frozenset',
'generator',
'dict',
'datetime',
'date',
'time',
'timedelta',
'url',
'multi-host-url',
'json',
'uuid',
'any',
]
| ValidationInfo |
python | ray-project__ray | python/ray/util/client/server/logservicer.py | {
"start": 2724,
"end": 4278
} | class ____(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self):
super().__init__()
self.num_clients = 0
self.client_lock = threading.Lock()
def Logstream(self, request_iterator, context):
initialized = False
with self.client_lock:
threshold = CLIENT_SERVER_MAX_THREADS / 2
if self.num_clients + 1 >= threshold:
context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
logger.warning(
f"Logstream: Num clients {self.num_clients} has reached "
f"the threshold {threshold}. Rejecting new connection."
)
return
self.num_clients += 1
initialized = True
logger.info(
"New logs connection established. " f"Total clients: {self.num_clients}"
)
log_queue = queue.Queue()
thread = threading.Thread(
target=log_status_change_thread,
args=(log_queue, request_iterator),
daemon=True,
)
thread.start()
try:
queue_iter = iter(log_queue.get, None)
for record in queue_iter:
if record is None:
break
yield record
except grpc.RpcError as e:
logger.debug(f"Closing log channel: {e}")
finally:
thread.join()
with self.client_lock:
if initialized:
self.num_clients -= 1
| LogstreamServicer |
python | tiangolo__fastapi | scripts/contributors.py | {
"start": 1778,
"end": 1843
} | class ____(BaseModel):
pullRequests: PullRequests
| PRsRepository |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-tree-from-leaf-values.py | {
"start": 29,
"end": 438
} | class ____(object):
def mctFromLeafValues(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
result = 0
stk = [float("inf")]
for x in arr:
while stk[-1] <= x:
result += stk.pop() * min(stk[-1], x)
stk.append(x)
while len(stk) > 2:
result += stk.pop() * stk[-1]
return result
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 15811,
"end": 16901
} | class ____(BaseAsset, AttrsInstance):
"""
Reference to an asset.
This class is not intended to be instantiated directly. Call ``Asset.ref``
instead to create one of the subclasses.
:meta private:
"""
_dependency_type: Literal["asset-name-ref", "asset-uri-ref"]
def as_expression(self) -> Any:
return {"asset_ref": attrs.asdict(self)}
def iter_assets(self) -> Iterator[tuple[AssetUniqueKey, Asset]]:
return iter(())
def iter_asset_aliases(self) -> Iterator[tuple[str, AssetAlias]]:
return iter(())
def iter_asset_refs(self) -> Iterator[AssetRef]:
yield self
def iter_dag_dependencies(self, *, source: str = "", target: str = "") -> Iterator[DagDependency]:
(dependency_id,) = attrs.astuple(self)
yield DagDependency(
source=source or self._dependency_type,
target=target or self._dependency_type,
label=dependency_id,
dependency_type=self._dependency_type,
dependency_id=dependency_id,
)
@attrs.define(hash=True)
| AssetRef |
python | doocs__leetcode | solution/0700-0799/0714.Best Time to Buy and Sell Stock with Transaction Fee/Solution.py | {
"start": 0,
"end": 430
} | class ____:
def maxProfit(self, prices: List[int], fee: int) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= len(prices):
return 0
ans = dfs(i + 1, j)
if j:
ans = max(ans, prices[i] + dfs(i + 1, 0) - fee)
else:
ans = max(ans, -prices[i] + dfs(i + 1, 1))
return ans
return dfs(0, 0)
| Solution |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 19745,
"end": 21610
} | class ____(TestBaseAsyncSpiderMiddleware):
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
MW_EXC_SIMPLE = ProcessSpiderExceptionSimpleIterableMiddleware
MW_EXC_ASYNCGEN = ProcessSpiderExceptionAsyncIteratorMiddleware
def _callback(self) -> Any:
1 / 0
async def _test_asyncgen_nodowngrade(self, *mw_classes: type[Any]) -> None:
with pytest.raises(
_InvalidOutput,
match=r"Async iterable returned from .+ cannot be downgraded",
):
await self._get_middleware_result(*mw_classes)
@deferred_f_from_coro_f
async def test_exc_simple(self):
"""Simple exc mw"""
await self._test_simple_base(self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async(self):
"""Async exc mw"""
await self._test_asyncgen_base(self.MW_EXC_ASYNCGEN)
@deferred_f_from_coro_f
async def test_exc_simple_simple(self):
"""Simple exc mw -> simple output mw"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async_async(self):
"""Async exc mw -> async output mw"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_EXC_ASYNCGEN)
@deferred_f_from_coro_f
async def test_exc_simple_async(self):
"""Simple exc mw -> async output mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async_simple(self):
"""Async exc mw -> simple output mw; cannot work as downgrading is not supported"""
await self._test_asyncgen_nodowngrade(self.MW_SIMPLE, self.MW_EXC_ASYNCGEN)
| TestProcessSpiderException |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 32746,
"end": 33445
} | class ____(graphene.Mutation):
"""Sets the concurrency limit for a given concurrency key."""
Output = graphene.NonNull(graphene.Boolean)
class Meta:
name = "SetConcurrencyLimitMutation"
class Arguments:
concurrencyKey = graphene.Argument(graphene.NonNull(graphene.String))
limit = graphene.Argument(graphene.NonNull(graphene.Int))
@capture_error
@check_permission(Permissions.EDIT_CONCURRENCY_LIMIT)
def mutate(self, graphene_info, concurrencyKey: str, limit: int):
graphene_info.context.instance.event_log_storage.set_concurrency_slots(
concurrencyKey, limit
)
return True
| GrapheneSetConcurrencyLimitMutation |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 50106,
"end": 50320
} | class ____(BatchPointwiseMathOpsPostGradFusion):
def __init__(self, **kwargs) -> None:
super().__init__(aten.div.Tensor, **kwargs)
@register_fusion("batch_aten_mul", pre_grad=False)
| BatchDivPostGradFusion |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/input_managers.py | {
"start": 1143,
"end": 3499
} | class ____(PandasCsvIOManager):
def load_input(self, context) -> np.ndarray: # pyright: ignore[reportIncompatibleMethodOverride]
if context.upstream_output:
file_path = self._get_path(context.upstream_output)
df = np.genfromtxt(file_path, delimiter=",", dtype=None)
return df
else:
multiplier = context.config["multiplier"]
df = pd.DataFrame(
{
"ints": [10 * multiplier, 20 * multiplier, 30 * multiplier, 40 * multiplier],
"floats": [
10.0 * multiplier,
20.0 * multiplier,
30.0 * multiplier,
40.0 * multiplier,
],
"strings": ["ten", "twenty", "thirty", "forty"],
}
)
return df.to_numpy()
@io_manager(
config_schema={"base_dir": Field(Noneable(str), default_value=None, is_required=False)},
input_config_schema={"multiplier": Field(int, is_required=False, default_value=1)},
)
def numpy_io_manager(init_context):
return NumpyCsvIOManager(base_dir=init_context.resource_config["base_dir"])
@op
def make_a_df():
df = pd.DataFrame(
{
"ints": [1, 2, 3, 4],
"floats": [1.0, 2.0, 3.0, 4.0],
"strings": ["one", "two", "three", "four"],
}
)
return df
@op
def avg_ints(context, df):
avg = df["ints"].mean().item()
context.log.info(f"Dataframe with type {type(df)} has average of the ints is {avg}")
@op(ins={"df": In(input_manager_key="numpy_csv_mgr")})
def median_floats(context, df):
med = df["floats"].median().item()
context.log.info(f"Dataframe with type {type(df)} has median of the floats is {med}")
@op(
ins={"df": In(input_manager_key="numpy_csv_mgr")},
)
def count_rows(context, df: np.ndarray):
num_rows = df.shape[0]
context.log.info(f"Dataframe with type {type(df)} has {num_rows} rows")
@graph
def df_stats():
df = make_a_df()
avg_ints(df)
median_floats() # no output passed, will load using input manager
count_rows(df)
df_stats_job = df_stats.to_job(
name="df_stats_job",
resource_defs={
"io_manager": pandas_io_manager,
"numpy_csv_mgr": numpy_io_manager,
},
)
| NumpyCsvIOManager |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/bash.py | {
"start": 1468,
"end": 11073
} | class ____(BaseOperator):
r"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
Bash script (must be '.sh' or '.bash') to be executed. (templated)
:param env: If env is not None, it must be a dict that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:param append_env: If False(default) uses the environment variables passed in env params
and does not inherit the current process environment. If True, inherits the environment variables
from current passes and then environment variable passed by the user will either update the existing
inherited environment variables or the new variables gets appended to it
:param output_encoding: Output encoding of Bash command
:param skip_on_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: 99). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param cwd: Working directory to execute the command in (templated).
If None (default), the command is run in a temporary directory.
To use current DAG folder as the working directory,
you might set template ``{{ task.dag.folder }}``.
When bash_command is a '.sh' or '.bash' file, Airflow must have write
access to the working directory. The script will be rendered (Jinja
template) into a new temporary file in this directory.
:param output_processor: Function to further process the output of the bash script
(default is lambda output: output).
Airflow will evaluate the exit code of the Bash command. In general, a non-zero exit code will result in
task failure and zero will result in task success.
Exit code ``99`` (or another set in ``skip_on_exit_code``)
will throw an :class:`airflow.exceptions.AirflowSkipException`, which will leave the task in ``skipped``
state. You can have all non-zero exit codes be treated as a failure by setting ``skip_on_exit_code=None``.
.. list-table::
:widths: 25 25
:header-rows: 1
* - Exit code
- Behavior
* - 0
- success
* - `skip_on_exit_code` (default: 99)
- raise :class:`airflow.exceptions.AirflowSkipException`
* - otherwise
- raise :class:`airflow.exceptions.AirflowException`
.. note::
Airflow will not recognize a non-zero exit code unless the whole shell exit with a non-zero exit
code. This can be an issue if the non-zero exit arises from a sub-command. The easiest way of
addressing this is to prefix the command with ``set -e;``
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ data_interval_end }}'"
.. note::
To simply execute a ``.sh`` or ``.bash`` script (without any Jinja template), add a space after the
script name ``bash_command`` argument -- for example ``bash_command="my_script.sh "``. This
is because Airflow tries to load this file and process it as a Jinja template when
it ends with ``.sh`` or ``.bash``.
If you have Jinja template in your script, do not put any blank space. And add the script's directory
in the DAG's ``template_searchpath``. If you specify a ``cwd``, Airflow must have write access to
this directory. The script will be rendered (Jinja template) into a new temporary file in this directory.
.. warning::
Care should be taken with "user" input or when using Jinja templates in the
``bash_command``, as this bash operator does not perform any escaping or
sanitization of the command.
This applies mostly to using "dag_run" conf, as that can be submitted via
users in the Web UI. Most of the default template variables are not at
risk.
For example, do **not** do this:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command='echo "Here is the message: \'{{ dag_run.conf["message"] if dag_run else "" }}\'"',
)
Instead, you should pass this via the ``env`` kwarg and use double-quotes
inside the bash_command, as below:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command="echo \"here is the message: '$message'\"",
env={"message": '{{ dag_run.conf["message"] if dag_run else "" }}'},
)
.. versionadded:: 2.10.0
The `output_processor` parameter.
"""
template_fields: Sequence[str] = ("bash_command", "env", "cwd")
template_fields_renderers = {"bash_command": "bash", "env": "json"}
template_ext: Sequence[str] = (".sh", ".bash")
ui_color = "#f0ede4"
def __init__(
self,
*,
bash_command: str | ArgNotSet,
env: dict[str, str] | None = None,
append_env: bool = False,
output_encoding: str = "utf-8",
skip_on_exit_code: int | Container[int] | None = 99,
cwd: str | None = None,
output_processor: Callable[[str], Any] = lambda result: result,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code is not None
else []
)
self.cwd = cwd
self.append_env = append_env
self.output_processor = output_processor
self._is_inline_cmd = None
if isinstance(bash_command, str):
self._is_inline_cmd = self._is_inline_command(bash_command=bash_command)
@cached_property
def subprocess_hook(self):
"""Returns hook for running the bash command."""
return SubprocessHook()
def get_env(self, context) -> dict:
"""Build the set of environment variables to be exposed for the bash command."""
system_env = os.environ.copy()
env = self.env
if env is None:
env = system_env
else:
if self.append_env:
system_env.update(env)
env = system_env
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug(
"Exporting env vars: %s",
" ".join(f"{k}={v!r}" for k, v in airflow_context_vars.items()),
)
env.update(airflow_context_vars)
return env
def execute(self, context: Context):
bash_path: str = shutil.which("bash") or "bash"
if self.cwd is not None:
if not os.path.exists(self.cwd):
raise AirflowException(f"Can not find the cwd: {self.cwd}")
if not os.path.isdir(self.cwd):
raise AirflowException(f"The cwd {self.cwd} must be a directory")
env = self.get_env(context)
if self._is_inline_cmd:
result = self._run_inline_command(bash_path=bash_path, env=env)
else:
result = self._run_rendered_script_file(bash_path=bash_path, env=env)
if result.exit_code in self.skip_on_exit_code:
raise AirflowSkipException(f"Bash command returned exit code {result.exit_code}. Skipping.")
if result.exit_code != 0:
raise AirflowException(
f"Bash command failed. The command returned a non-zero exit code {result.exit_code}."
)
return self.output_processor(result.output)
def _run_inline_command(self, bash_path: str, env: dict) -> SubprocessResult:
"""Pass the bash command as string directly in the subprocess."""
return self.subprocess_hook.run_command(
command=[bash_path, "-c", self.bash_command],
env=env,
output_encoding=self.output_encoding,
cwd=self.cwd,
)
def _run_rendered_script_file(self, bash_path: str, env: dict) -> SubprocessResult:
"""
Save the bash command into a file and execute this file.
This allows for longer commands, and prevents "Argument list too long error".
"""
with working_directory(cwd=self.cwd) as cwd:
with tempfile.NamedTemporaryFile(mode="w", dir=cwd, suffix=".sh") as file:
file.write(cast("str", self.bash_command))
file.flush()
bash_script = os.path.basename(file.name)
return self.subprocess_hook.run_command(
command=[bash_path, bash_script],
env=env,
output_encoding=self.output_encoding,
cwd=cwd,
)
@classmethod
def _is_inline_command(cls, bash_command: str) -> bool:
"""Return True if the bash command is an inline string. False if it's a bash script file."""
return not bash_command.endswith(tuple(cls.template_ext))
def on_kill(self) -> None:
self.subprocess_hook.send_sigterm()
| BashOperator |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 2578,
"end": 2832
} | class ____(_MetadataRequester):
"""A Mixin to ``groups`` by default.
This Mixin makes the object to request ``groups`` by default as ``True``.
.. versionadded:: 1.3
"""
__metadata_request__split = {"groups": True}
| GroupsConsumerMixin |
python | walkccc__LeetCode | solutions/3121. Count the Number of Special Characters II/3121.py | {
"start": 0,
"end": 419
} | class ____:
def numberOfSpecialChars(self, word: str) -> int:
lower = collections.defaultdict(bool)
upper = collections.defaultdict(bool)
for c in word:
if c.islower():
lower[c] = not upper[c.upper()]
else:
upper[c] = True
return sum(lower[a] and upper[b]
for a, b in zip(string.ascii_lowercase,
string.ascii_uppercase))
| Solution |
python | python-openxml__python-docx | src/docx/oxml/xmlchemy.py | {
"start": 5103,
"end": 7410
} | class ____(BaseAttribute):
"""Defines an optional attribute on a custom element class.
An optional attribute returns a default value when not present for reading. When
assigned |None|, the attribute is removed, but still returns the default value when
one is specified.
"""
def __init__(
self,
attr_name: str,
simple_type: Type[BaseXmlEnum] | Type[BaseSimpleType],
default: BaseXmlEnum | BaseSimpleType | str | bool | None = None,
):
super(OptionalAttribute, self).__init__(attr_name, simple_type)
self._default = default
@property
def _docstring(self):
"""String to use as `__doc__` attribute of attribute property."""
return (
f"{self._simple_type.__name__} type-converted value of"
f" ``{self._attr_name}`` attribute, or |None| (or specified default"
f" value) if not present. Assigning the default value causes the"
f" attribute to be removed from the element."
)
@property
def _getter(
self,
) -> Callable[[BaseOxmlElement], Any | None]:
"""Function suitable for `__get__()` method on attribute property descriptor."""
def get_attr_value(
obj: BaseOxmlElement,
) -> Any | None:
attr_str_value = obj.get(self._clark_name)
if attr_str_value is None:
return self._default
return self._simple_type.from_xml(attr_str_value)
get_attr_value.__doc__ = self._docstring
return get_attr_value
@property
def _setter(self) -> Callable[[BaseOxmlElement, Any], None]:
"""Function suitable for `__set__()` method on attribute property descriptor."""
def set_attr_value(obj: BaseOxmlElement, value: Any | None):
if value is None or value == self._default:
if self._clark_name in obj.attrib:
del obj.attrib[self._clark_name]
return
str_value = self._simple_type.to_xml(value)
if str_value is None:
if self._clark_name in obj.attrib:
del obj.attrib[self._clark_name]
return
obj.set(self._clark_name, str_value)
return set_attr_value
| OptionalAttribute |
python | joblib__joblib | joblib/externals/loky/reusable_executor.py | {
"start": 3511,
"end": 10863
} | class ____(ProcessPoolExecutor):
def __init__(
self,
submit_resize_lock,
max_workers=None,
context=None,
timeout=None,
executor_id=0,
job_reducers=None,
result_reducers=None,
initializer=None,
initargs=(),
env=None,
):
super().__init__(
max_workers=max_workers,
context=context,
timeout=timeout,
job_reducers=job_reducers,
result_reducers=result_reducers,
initializer=initializer,
initargs=initargs,
env=env,
)
self.executor_id = executor_id
self._submit_resize_lock = submit_resize_lock
@classmethod
def get_reusable_executor(
cls,
max_workers=None,
context=None,
timeout=10,
kill_workers=False,
reuse="auto",
job_reducers=None,
result_reducers=None,
initializer=None,
initargs=(),
env=None,
):
with _executor_lock:
global _executor, _executor_kwargs
executor = _executor
if max_workers is None:
if reuse is True and executor is not None:
max_workers = executor._max_workers
else:
max_workers = cpu_count()
elif max_workers <= 0:
raise ValueError(
f"max_workers must be greater than 0, got {max_workers}."
)
if isinstance(context, str):
context = get_context(context)
if context is not None and context.get_start_method() == "fork":
raise ValueError(
"Cannot use reusable executor with the 'fork' context"
)
kwargs = dict(
context=context,
timeout=timeout,
job_reducers=job_reducers,
result_reducers=result_reducers,
initializer=initializer,
initargs=initargs,
env=env,
)
if executor is None:
is_reused = False
mp.util.debug(
f"Create a executor with max_workers={max_workers}."
)
executor_id = _get_next_executor_id()
_executor_kwargs = kwargs
_executor = executor = cls(
_executor_lock,
max_workers=max_workers,
executor_id=executor_id,
**kwargs,
)
else:
if reuse == "auto":
reuse = kwargs == _executor_kwargs
if (
executor._flags.broken
or executor._flags.shutdown
or not reuse
or executor.queue_size < max_workers
):
if executor._flags.broken:
reason = "broken"
elif executor._flags.shutdown:
reason = "shutdown"
elif executor.queue_size < max_workers:
# Do not reuse the executor if the queue size is too
# small as this would lead to limited parallelism.
reason = "queue size is too small"
else:
reason = "arguments have changed"
mp.util.debug(
"Creating a new executor with max_workers="
f"{max_workers} as the previous instance cannot be "
f"reused ({reason})."
)
executor.shutdown(wait=True, kill_workers=kill_workers)
_executor = executor = _executor_kwargs = None
# Recursive call to build a new instance
return cls.get_reusable_executor(
max_workers=max_workers, **kwargs
)
else:
mp.util.debug(
"Reusing existing executor with "
f"max_workers={executor._max_workers}."
)
is_reused = True
executor._resize(max_workers)
return executor, is_reused
def submit(self, fn, *args, **kwargs):
with self._submit_resize_lock:
return super().submit(fn, *args, **kwargs)
def _resize(self, max_workers):
with self._submit_resize_lock:
if max_workers is None:
raise ValueError("Trying to resize with max_workers=None")
elif max_workers == self._max_workers:
return
if self._executor_manager_thread is None:
# If the executor_manager_thread has not been started
# then no processes have been spawned and we can just
# update _max_workers and return
self._max_workers = max_workers
return
self._wait_job_completion()
# Some process might have returned due to timeout so check how many
# children are still alive. Use the _process_management_lock to
# ensure that no process are spawned or timeout during the resize.
with self._processes_management_lock:
processes = list(self._processes.values())
nb_children_alive = sum(p.is_alive() for p in processes)
self._max_workers = max_workers
for _ in range(max_workers, nb_children_alive):
self._call_queue.put(None)
while (
len(self._processes) > max_workers and not self._flags.broken
):
time.sleep(1e-3)
self._adjust_process_count()
processes = list(self._processes.values())
while not all(p.is_alive() for p in processes):
time.sleep(1e-3)
def _wait_job_completion(self):
"""Wait for the cache to be empty before resizing the pool."""
# Issue a warning to the user about the bad effect of this usage.
if self._pending_work_items:
warnings.warn(
"Trying to resize an executor with running jobs: "
"waiting for jobs completion before resizing.",
UserWarning,
)
mp.util.debug(
f"Executor {self.executor_id} waiting for jobs completion "
"before resizing"
)
# Wait for the completion of the jobs
while self._pending_work_items:
time.sleep(1e-3)
def _setup_queues(self, job_reducers, result_reducers):
# As this executor can be resized, use a large queue size to avoid
# underestimating capacity and introducing overhead
# Also handle the case where the user set max_workers to a value larger
# than cpu_count(), to avoid limiting the number of parallel jobs.
min_queue_size = max(cpu_count(), self._max_workers)
self.queue_size = 2 * min_queue_size + EXTRA_QUEUED_CALLS
super()._setup_queues(
job_reducers, result_reducers, queue_size=self.queue_size
)
| _ReusablePoolExecutor |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/prediction_writer.py | {
"start": 1414,
"end": 6087
} | class ____(Callback):
"""Base class to implement how the predictions should be stored.
Args:
write_interval: When to write.
Example::
import torch
from lightning.pytorch.callbacks import BasePredictionWriter
class CustomWriter(BasePredictionWriter):
def __init__(self, output_dir, write_interval):
super().__init__(write_interval)
self.output_dir = output_dir
def write_on_batch_end(
self, trainer, pl_module, prediction, batch_indices, batch, batch_idx, dataloader_idx
):
torch.save(prediction, os.path.join(self.output_dir, dataloader_idx, f"{batch_idx}.pt"))
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
torch.save(predictions, os.path.join(self.output_dir, "predictions.pt"))
pred_writer = CustomWriter(output_dir="pred_path", write_interval="epoch")
trainer = Trainer(callbacks=[pred_writer])
model = BoringModel()
trainer.predict(model, return_predictions=False)
Example::
# multi-device inference example
import torch
from lightning.pytorch.callbacks import BasePredictionWriter
class CustomWriter(BasePredictionWriter):
def __init__(self, output_dir, write_interval):
super().__init__(write_interval)
self.output_dir = output_dir
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
# this will create N (num processes) files in `output_dir` each containing
# the predictions of it's respective rank
torch.save(predictions, os.path.join(self.output_dir, f"predictions_{trainer.global_rank}.pt"))
# optionally, you can also save `batch_indices` to get the information about the data index
# from your prediction data
torch.save(batch_indices, os.path.join(self.output_dir, f"batch_indices_{trainer.global_rank}.pt"))
# or you can set `write_interval="batch"` and override `write_on_batch_end` to save
# predictions at batch level
pred_writer = CustomWriter(output_dir="pred_path", write_interval="epoch")
trainer = Trainer(accelerator="gpu", strategy="ddp", devices=8, callbacks=[pred_writer])
model = BoringModel()
trainer.predict(model, return_predictions=False)
"""
def __init__(self, write_interval: Literal["batch", "epoch", "batch_and_epoch"] = "batch") -> None:
if write_interval not in list(WriteInterval):
raise MisconfigurationException(f"`write_interval` should be one of {[i.value for i in WriteInterval]}.")
self.interval = WriteInterval(write_interval)
@override
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: str) -> None:
if is_param_in_hook_signature(pl_module.predict_step, "dataloader_iter", explicit=True):
raise NotImplementedError("The `PredictionWriterCallback` does not support using `dataloader_iter`.")
def write_on_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
prediction: Any,
batch_indices: Optional[Sequence[int]],
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
"""Override with the logic to write a single batch."""
raise NotImplementedError()
def write_on_epoch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
predictions: Sequence[Any],
batch_indices: Sequence[Any],
) -> None:
"""Override with the logic to write all batches."""
raise NotImplementedError()
@override
def on_predict_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int = 0,
) -> None:
if not self.interval.on_batch:
return
batch_indices = trainer.predict_loop.current_batch_indices
self.write_on_batch_end(trainer, pl_module, outputs, batch_indices, batch, batch_idx, dataloader_idx)
@override
def on_predict_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not self.interval.on_epoch:
return
epoch_batch_indices = trainer.predict_loop.epoch_batch_indices
self.write_on_epoch_end(trainer, pl_module, trainer.predict_loop.predictions, epoch_batch_indices)
| BasePredictionWriter |
python | numba__numba | numba/tests/test_array_attr.py | {
"start": 7991,
"end": 11257
} | class ____(MemoryLeakMixin, TestCase):
def check_complex(self, pyfunc):
cfunc = njit(pyfunc)
# test 1D
size = 10
arr = np.arange(size) + np.arange(size) * 10j
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
# test 2D
arr = arr.reshape(2, 5)
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
def test_complex_real(self):
self.check_complex(array_real)
def test_complex_imag(self):
self.check_complex(array_imag)
def check_number_real(self, dtype):
pyfunc = array_real
cfunc = njit(pyfunc)
# test 1D
size = 10
arr = np.arange(size, dtype=dtype)
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
# test 2D
arr = arr.reshape(2, 5)
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
# test identity
self.assertEqual(arr.data, pyfunc(arr).data)
self.assertEqual(arr.data, cfunc(arr).data)
# test writable
real = cfunc(arr)
self.assertNotEqual(arr[0, 0], 5)
real[0, 0] = 5
self.assertEqual(arr[0, 0], 5)
def test_number_real(self):
"""
Testing .real of non-complex dtypes
"""
for dtype in [np.uint8, np.int32, np.float32, np.float64]:
self.check_number_real(dtype)
def check_number_imag(self, dtype):
pyfunc = array_imag
cfunc = njit(pyfunc)
# test 1D
size = 10
arr = np.arange(size, dtype=dtype)
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
# test 2D
arr = arr.reshape(2, 5)
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
# test are zeros
self.assertEqual(cfunc(arr).tolist(), np.zeros_like(arr).tolist())
# test readonly
imag = cfunc(arr)
with self.assertRaises(ValueError) as raises:
imag[0] = 1
self.assertEqual('assignment destination is read-only',
str(raises.exception))
def test_number_imag(self):
"""
Testing .imag of non-complex dtypes
"""
for dtype in [np.uint8, np.int32, np.float32, np.float64]:
self.check_number_imag(dtype)
def test_record_real(self):
rectyp = np.dtype([('real', np.float32), ('imag', np.complex64)])
arr = np.zeros(3, dtype=rectyp)
arr['real'] = np.random.random(arr.size)
arr['imag'] = np.random.random(arr.size) * 1.3j
# check numpy behavior
# .real is identity
self.assertIs(array_real(arr), arr)
# .imag is zero_like
self.assertEqual(array_imag(arr).tolist(), np.zeros_like(arr).tolist())
# check numba behavior
# it's most likely a user error, anyway
jit_array_real = njit(array_real)
jit_array_imag = njit(array_imag)
with self.assertRaises(TypingError) as raises:
jit_array_real(arr)
self.assertIn("cannot access .real of array of Record",
str(raises.exception))
with self.assertRaises(TypingError) as raises:
jit_array_imag(arr)
self.assertIn("cannot access .imag of array of Record",
str(raises.exception))
| TestRealImagAttr |
python | spack__spack | lib/spack/spack/vendor/attr/_make.py | {
"start": 91629,
"end": 95758
} | class ____:
"""
Stores a factory callable.
If passed as the default value to `attrs.field`, the factory is used to
generate a new value.
:param callable factory: A callable that takes either none or exactly one
mandatory positional argument depending on *takes_self*.
:param bool takes_self: Pass the partially initialized instance that is
being initialized as a positional argument.
.. versionadded:: 17.1.0 *takes_self*
"""
__slots__ = ("factory", "takes_self")
def __init__(self, factory, takes_self=False):
"""
`Factory` is part of the default machinery so if we want a default
value here, we have to implement it ourselves.
"""
self.factory = factory
self.takes_self = takes_self
def __getstate__(self):
"""
Play nice with pickle.
"""
return tuple(getattr(self, name) for name in self.__slots__)
def __setstate__(self, state):
"""
Play nice with pickle.
"""
for name, value in zip(self.__slots__, state):
setattr(self, name, value)
_f = [
Attribute(
name=name,
default=NOTHING,
validator=None,
repr=True,
cmp=None,
eq=True,
order=False,
hash=True,
init=True,
inherited=False,
)
for name in Factory.__slots__
]
Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
def make_class(name, attrs, bases=(object,), **attributes_arguments):
"""
A quick way to create a new class called *name* with *attrs*.
:param str name: The name for the new class.
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: `list` or `dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to `attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
"""
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = {a: attrib() for a in attrs}
else:
raise TypeError("attrs argument must be a dict or a list.")
pre_init = cls_dict.pop("__attrs_pre_init__", None)
post_init = cls_dict.pop("__attrs_post_init__", None)
user_init = cls_dict.pop("__init__", None)
body = {}
if pre_init is not None:
body["__attrs_pre_init__"] = pre_init
if post_init is not None:
body["__attrs_post_init__"] = post_init
if user_init is not None:
body["__init__"] = user_init
type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
# For pickling to work, the __module__ variable needs to be set to the
# frame where the class is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
# We do it here for proper warnings with meaningful stacklevel.
cmp = attributes_arguments.pop("cmp", None)
(
attributes_arguments["eq"],
attributes_arguments["order"],
) = _determine_attrs_eq_order(
cmp,
attributes_arguments.get("eq"),
attributes_arguments.get("order"),
True,
)
return _attrs(these=cls_dict, **attributes_arguments)(type_)
# These are required by within this module so we define them here and merely
# import into .validators / .converters.
@attrs(slots=True, hash=True)
| Factory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.