language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__jax | tests/multiprocess/wait_barrier_test.py | {
"start": 712,
"end": 3391
} | class ____(jt_multiprocess.MultiProcessTest):
def test_only_participants_call_succeeds(self):
client = distributed.global_state.client
timeout_in_ms = 1000
# Only even process ids will participate in the barrier.
barrier_participants = []
for process_index in range(jax.process_count()):
if process_index % 2 == 0:
barrier_participants.append(process_index)
if jax.process_index() % 2 == 0:
client.wait_at_barrier(
'only_even_participants_call',
timeout_in_ms,
process_ids=barrier_participants,
)
# This test is intended to implicitly verify that no exceptions are raised
# when the barrier is called if only the barrier participants including
# each one of them call the barrier. Thus there are no explicit assertions.
def test_non_participant_calls_fails(self):
client = distributed.global_state.client
timeout_in_ms = 1000
process_group = []
for process_index in range(jax.process_count()):
if process_index % 2 == 0:
process_group.append(process_index)
# Processes 0, 2, 4 ... wait here.
# Processes 1, 3, 5 ... go ahead to the barrier call.
if jax.process_index() % 2 == 0:
client.blocking_key_value_get('sync', timeout_in_ms=1000)
# 1, 3, 5 ... arrive and hit an error as they are non-participating.
# 0, 2, 4 ... has not yet arrived here. They will arrive once 1 unblocks
# them after leaving the barrier. But when they arrive at the barrier, they
# would see the error state even though they are participating.
with self.assertRaisesRegex(
jax.errors.JaxRuntimeError,
r'INVALID_ARGUMENT: A non-participating task.*'
):
client.wait_at_barrier(
'non_participant_calls', timeout_in_ms, process_ids=process_group
)
# 1 unblocks 0, 2 and 4.
if jax.process_index() == 1:
client.key_value_set('sync', 'process 1 exiting')
def test_all_participate_succeeds(self):
client = distributed.global_state.client
timeout_in_ms = 1000
client.wait_at_barrier('all_processes_call', timeout_in_ms)
# This test checks that processes do wait in `wait_at_barrier`and do not
# leave until all participating processes arrive.
def test_one_participant_never_calls_fails(self):
client = distributed.global_state.client
timeout_in_ms = 1000
if jax.process_index() != 0:
with self.assertRaisesRegex(
jax.errors.JaxRuntimeError, r'DEADLINE_EXCEEDED: Barrier timed out.*'
):
client.wait_at_barrier('one_participant_never_calls', timeout_in_ms)
if __name__ == '__main__':
jt_multiprocess.main()
| WaitBarrierTest |
python | pytorch__pytorch | test/distributed/_shard/sharded_optim/test_sharded_optim.py | {
"start": 576,
"end": 1375
} | class ____(torch.nn.Module):
def __init__(self, spec=None, group=None):
super().__init__()
# Use same seed.
torch.manual_seed(0)
self.param = torch.nn.Parameter(torch.rand(5, 10))
if spec is not None:
self.sharded_param = torch.nn.Parameter(
sharded_tensor.rand(
spec, 20, 10, requires_grad=True, process_group=group
)
)
else:
self.sharded_param = torch.nn.Parameter(torch.rand(5, 10))
def forward(self, input):
if isinstance(self.sharded_param, sharded_tensor.ShardedTensor):
return self.param + self.sharded_param.local_shards()[0].tensor + input
else:
return self.sharded_param + self.param + input
| MyShardedModel |
python | aio-libs__aiohttp | aiohttp/web_urldispatcher.py | {
"start": 8282,
"end": 10253
} | class ____(AbstractResource):
def __init__(self, *, name: str | None = None) -> None:
super().__init__(name=name)
self._routes: dict[str, ResourceRoute] = {}
self._any_route: ResourceRoute | None = None
self._allowed_methods: set[str] = set()
def add_route(
self,
method: str,
handler: type[AbstractView] | Handler,
*,
expect_handler: _ExpectHandler | None = None,
) -> "ResourceRoute":
if route := self._routes.get(method, self._any_route):
raise RuntimeError(
"Added route will never be executed, "
f"method {route.method} is already "
"registered"
)
route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler)
self.register_route(route_obj)
return route_obj
def register_route(self, route: "ResourceRoute") -> None:
assert isinstance(
route, ResourceRoute
), f"Instance of Route class is required, got {route!r}"
if route.method == hdrs.METH_ANY:
self._any_route = route
self._allowed_methods.add(route.method)
self._routes[route.method] = route
async def resolve(self, request: Request) -> _Resolve:
if (match_dict := self._match(request.rel_url.path_safe)) is None:
return None, set()
if route := self._routes.get(request.method, self._any_route):
return UrlMappingMatchInfo(match_dict, route), self._allowed_methods
return None, self._allowed_methods
@abc.abstractmethod
def _match(self, path: str) -> dict[str, str] | None:
"""Return dict of path values if path matches this resource, otherwise None."""
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator["ResourceRoute"]:
return iter(self._routes.values())
# TODO: implement all abstract methods
| Resource |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/cmake/package.py | {
"start": 378,
"end": 2301
} | class ____(Package):
"""A dummy package for the cmake build system."""
homepage = "https://www.cmake.org"
url = "https://cmake.org/files/v3.4/cmake-3.4.3.tar.gz"
tags = ["build-tools"]
executables = ["^cmake[0-9]*$"]
depends_on("c", type="build")
depends_on("cxx", type="build")
version(
"3.23.1",
md5="4cb3ff35b2472aae70f542116d616e63",
url="https://cmake.org/files/v3.4/cmake-3.4.3.tar.gz",
)
version(
"3.4.3",
md5="4cb3ff35b2472aae70f542116d616e63",
url="https://cmake.org/files/v3.4/cmake-3.4.3.tar.gz",
)
@classmethod
def determine_version(cls, exe):
output = Executable(exe)("--version", output=str, error=str)
match = re.search(r"cmake.*version\s+(\S+)", output)
return match.group(1) if match else None
def setup_build_environment(self, env: EnvironmentModifications) -> None:
spack_cc # Ensure spack module-scope variable is available
env.set("for_install", "for_install")
def setup_dependent_build_environment(
self, env: EnvironmentModifications, dependent_spec: Spec
) -> None:
env.set("from_cmake", "from_cmake")
def setup_dependent_package(self, module, dspec):
module.cmake = Executable(self.spec.prefix.bin.cmake)
module.ctest = Executable(self.spec.prefix.bin.ctest)
self.spec.from_cmake = "from_cmake"
module.from_cmake = "from_cmake"
self.spec.link_arg = "test link arg"
def install(self, spec, prefix):
mkdirp(prefix.bin)
check(
os.environ["for_install"] == "for_install",
"Couldn't read env var set in compile envieonmnt",
)
cmake_exe_ext = ".exe" if sys.platform == "win32" else ""
cmake_exe = join_path(prefix.bin, "cmake{}".format(cmake_exe_ext))
touch(cmake_exe)
set_executable(cmake_exe)
| Cmake |
python | getsentry__sentry | tests/sentry/digests/test_types.py | {
"start": 87,
"end": 542
} | class ____:
def test_missing_notification_uuid(self) -> None:
notification = Notification(mock.sentinel.rule, mock.sentinel.group)
assert notification.notification_uuid is None
def test_notification_uuid(self) -> None:
notification = Notification(
mock.sentinel.rule, mock.sentinel.group, notification_uuid=str(uuid.uuid4())
)
assert notification.notification_uuid is not None
| TestNotificationTuple |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 72451,
"end": 74726
} | class ____(Expr):
_parameters = ["frame", "mins", "maxes", "lens"]
@functools.cached_property
def _meta(self):
return self.frame._meta
def _divisions(self):
non_empties = [i for i, length in enumerate(self.lens) if length != 0]
if len(non_empties) == 0:
return (None, None)
return tuple(self.mins) + (self.maxes[-1],)
def _layer(self):
non_empties = [i for i, length in enumerate(self.lens) if length != 0]
# If all empty, collapse into one partition
if len(non_empties) == 0:
return {(self._name, 0): (self.frame._name, 0)}
# drop empty partitions by mapping each partition in a new graph to a particular
# partition on the old graph.
dsk = {
(self._name, i): (self.frame._name, div)
for i, div in enumerate(non_empties)
}
ddf_keys = list(dsk.values())
overlap = [
i for i in range(1, len(self.mins)) if self.mins[i] >= self.maxes[i - 1]
]
divisions = self.divisions
frames = []
for i in overlap:
# `frames` is a list of data from previous partitions that we may want to
# move to partition i. Here, we add "overlap" from the previous partition
# (i-1) to this list.
frames.append((get_overlap, ddf_keys[i - 1], divisions[i]))
# Make sure that any data added from partition i-1 to `frames` is removed
# from partition i-1.
dsk[(self._name, i - 1)] = (
drop_overlap,
dsk[(self._name, i - 1)],
divisions[i],
)
# We do not want to move "overlap" from the previous partition (i-1) into
# this partition (i) if the data from this partition will need to be moved
# to the next partition (i+1) anyway. If we concatenate data too early,
# we may lose rows (https://github.com/dask/dask/issues/6972).
if divisions[i] == divisions[i + 1] and i + 1 in overlap:
continue
frames.append(ddf_keys[i])
dsk[(self._name, i)] = (methods.concat, frames)
frames = []
return dsk
| ResolveOverlappingDivisions |
python | pypa__warehouse | tests/unit/test_referrer_policy.py | {
"start": 95,
"end": 837
} | class ____:
def test_referrer_policy(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub()
tween = referrer_policy.referrer_policy_tween_factory(handler, registry)
request = pretend.stub(path="/project/foobar/")
assert tween(request) is response
assert response.headers == {"Referrer-Policy": "origin-when-cross-origin"}
def test_includeme():
config = pretend.stub(add_tween=pretend.call_recorder(lambda tween: None))
referrer_policy.includeme(config)
assert config.add_tween.calls == [
pretend.call("warehouse.referrer_policy.referrer_policy_tween_factory")
]
| TestReferrerPolicyTween |
python | donnemartin__system-design-primer | solutions/object_oriented_design/parking_lot/parking_lot.py | {
"start": 1663,
"end": 2448
} | class ____(object):
SPOTS_PER_ROW = 10
def __init__(self, floor, total_spots):
self.floor = floor
self.num_spots = total_spots
self.available_spots = 0
self.spots = [] # List of ParkingSpots
def spot_freed(self):
self.available_spots += 1
def park_vehicle(self, vehicle):
spot = self._find_available_spot(vehicle)
if spot is None:
return None
else:
spot.park_vehicle(vehicle)
return spot
def _find_available_spot(self, vehicle):
"""Find an available spot where vehicle can fit, or return None"""
pass
def _park_starting_at_spot(self, spot, vehicle):
"""Occupy starting at spot.spot_number to vehicle.spot_size."""
pass
| Level |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 204078,
"end": 204413
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "subject")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
subject = sgqlc.types.Field("Votable", graphql_name="subject")
| AddUpvotePayload |
python | getsentry__sentry | src/sentry/loader/dynamic_sdk_options.py | {
"start": 24,
"end": 672
} | class ____(str, Enum):
HAS_REPLAY = "hasReplay"
HAS_PERFORMANCE = "hasPerformance"
HAS_DEBUG = "hasDebug"
def get_dynamic_sdk_loader_option(project_key, option: DynamicSdkLoaderOption, default=False):
dynamic_sdk_loader_options = project_key.data.get("dynamicSdkLoaderOptions", {})
return dynamic_sdk_loader_options.get(option.value, default)
def get_default_loader_data(project):
dynamic_sdk_loader_options = project.get_option("sentry:default_loader_options", None)
if dynamic_sdk_loader_options is not None:
return {"dynamicSdkLoaderOptions": dynamic_sdk_loader_options}
return {}
| DynamicSdkLoaderOption |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/type/definition.py | {
"start": 9611,
"end": 11625
} | class ____(GraphQLType):
"""Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type is used to describe what types are possible
as well as providing a function to determine which type is actually used when the field is resolved.
Example:
class PetType(GraphQLUnionType):
name = 'Pet'
types = [DogType, CatType]
def resolve_type(self, value):
if isinstance(value, Dog):
return DogType()
if isinstance(value, Cat):
return CatType()
"""
def __init__(self, name, types=None, resolve_type=None, description=None):
assert name, 'Type must be named.'
assert_valid_name(name)
self.name = name
self.description = description
if resolve_type is not None:
assert callable(resolve_type), '{} must provide "resolve_type" as a function.'.format(self)
self.resolve_type = resolve_type
self._types = types
@cached_property
def types(self):
return define_types(self, self._types)
def define_types(union_type, types):
if callable(types):
types = types()
assert isinstance(types, (list, tuple)) and len(
types) > 0, 'Must provide types for Union {}.'.format(union_type.name)
has_resolve_type_fn = callable(union_type.resolve_type)
for type in types:
assert isinstance(type, GraphQLObjectType), (
'{} may only contain Object types, it cannot contain: {}.'.format(union_type, type)
)
if not has_resolve_type_fn:
assert callable(type.is_type_of), (
'Union Type {} does not provide a "resolve_type" function '
'and possible Type {} does not provide a "is_type_of" '
'function. There is no way to resolve this possible type '
'during execution.'
).format(union_type, type)
return types
| GraphQLUnionType |
python | scipy__scipy | scipy/optimize/tests/test_optimize.py | {
"start": 108766,
"end": 112523
} | class ____:
# Tests that optimisation does not give up before trying requested
# number of iterations or evaluations. And that it does not succeed
# by exceeding the limits.
def setup_method(self):
self.funcalls = threading.local()
def slow_func(self, v):
if not hasattr(self.funcalls, 'c'):
self.funcalls.c = 0
self.funcalls.c += 1
r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])
return np.sin(r*20 + t)+r*0.5
@pytest.mark.fail_slow(10)
def test_neldermead_limit(self):
self.check_limits("Nelder-Mead", 200)
def test_powell_limit(self):
self.check_limits("powell", 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
for mfev in [50, 500, 5000]:
self.funcalls.c = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxfev": mfev})
assert self.funcalls.c == res["nfev"]
if res["success"]:
assert res["nfev"] < mfev
else:
assert res["nfev"] >= mfev
for mit in [50, 500, 5000]:
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit})
if res["success"]:
assert res["nit"] <= mit
else:
assert res["nit"] >= mit
for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
self.funcalls.c = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit,
"maxfev": mfev})
assert self.funcalls.c == res["nfev"]
if res["success"]:
assert res["nfev"] < mfev and res["nit"] <= mit
else:
assert res["nfev"] >= mfev or res["nit"] >= mit
for mfev, mit in [[np.inf, None], [None, np.inf]]:
self.funcalls.c = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit,
"maxfev": mfev})
assert self.funcalls.c == res["nfev"]
if res["success"]:
if mfev is None:
assert res["nfev"] < default_iters*2
else:
assert res["nit"] <= default_iters*2
else:
assert (res["nfev"] >= default_iters*2
or res["nit"] >= default_iters*2)
def test_result_x_shape_when_len_x_is_one():
def fun(x):
return x * x
def jac(x):
return 2. * x
def hess(x):
return np.array([[2.]])
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',
'COBYLA', 'COBYQA', 'SLSQP']
for method in methods:
res = optimize.minimize(fun, np.array([0.1]), method=method)
assert res.x.shape == (1,)
# use jac + hess
methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'Newton-CG']
for method in methods:
res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,
hess=hess)
assert res.x.shape == (1,)
| TestIterationLimits |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/retriever.py | {
"start": 1168,
"end": 26437
} | class ____(BaseRetriever):
"""
Vectara Retriever.
Args:
index (VectaraIndex): the Vectara Index
similarity_top_k (int): number of top k results to return, defaults to 5.
offset (int): number of results to skip, defaults to 0.
lambda_val (Union[List[float], float]): for hybrid search.
0 = neural search only.
1 = keyword match only.
In between values are a linear interpolation.
Provide single value for one corpus or a list of values for each corpus.
semantics (Union[List[str], str]): Indicates whether the query is intended as a query or response.
Provide single value for one corpus or a list of values for each corpus.
custom_dimensions (Dict): Custom dimensions for the query.
See (https://docs.vectara.com/docs/learn/semantic-search/add-custom-dimensions)
for more details about usage.
Provide single dict for one corpus or a list of dicts for each corpus.
n_sentences_before (int):
number of sentences before the matched sentence to return in the node
n_sentences_after (int):
number of sentences after the matched sentence to return in the node
filter (Union[List[str], str]): metadata filter (if specified). Provide single string for one corpus
or a list of strings to specify the filter for each corpus (if multiple corpora).
reranker (str): reranker to use: none, mmr, slingshot/multilingual_reranker_v1, userfn, or chain.
rerank_k (int): number of results to fetch for Reranking, defaults to 50.
rerank_limit (int): maximum number of results to return after reranking, defaults to 50.
Don't specify this for chain reranking. Instead, put the "limit" parameter in the dict for each individual reranker.
rerank_cutoff (float): minimum score threshold for results to include after reranking, defaults to 0.
Don't specify this for chain reranking. Instead, put the "chain" parameter in the dict for each individual reranker.
mmr_diversity_bias (float): number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to minimum diversity and 1 to maximum diversity.
Defaults to 0.3.
udf_expression (str): the user defined expression for reranking results.
See (https://docs.vectara.com/docs/learn/user-defined-function-reranker)
for more details about syntax for udf reranker expressions.
rerank_chain (List[Dict]): a list of rerankers to be applied in a sequence and their associated parameters
for the chain reranker. Each element should specify the "type" of reranker (mmr, slingshot, userfn)
and any other parameters (e.g. "limit" or "cutoff" for any type, "diversity_bias" for mmr, and "user_function" for userfn).
If using slingshot/multilingual_reranker_v1, it must be first in the list.
summary_enabled (bool): whether to generate summaries or not. Defaults to False.
summary_response_lang (str): language to use for summary generation.
summary_num_results (int): number of results to use for summary generation.
summary_prompt_name (str): name of the prompt to use for summary generation.
To use Vectara's Mockingbird LLM designed specifically for RAG, set to "mockingbird-1.0-2024-07-16".
If you are indexing documents with tables, we recommend "vectara-summary-table-query-ext-dec-2024-gpt-4o".
See (https://docs.vectara.com/docs/learn/grounded-generation/select-a-summarizer) for all available prompts.
prompt_text (str): the custom prompt, using appropriate prompt variables and functions.
See (https://docs.vectara.com/docs/1.0/prompts/custom-prompts-with-metadata)
for more details.
max_response_chars (int): the desired maximum number of characters for the generated summary.
max_tokens (int): the maximum number of tokens to be returned by the LLM.
temperature (float): The sampling temperature; higher values lead to more randomness.
frequency_penalty (float): How much to penalize repeating tokens in the response, reducing likelihood of repeating the same line.
presence_penalty (float): How much to penalize repeating tokens in the response, increasing the diversity of topics.
citations_style (str): The style of the citations in the summary generation,
either "numeric", "html", "markdown", or "none". Defaults to None.
citations_url_pattern (str): URL pattern for html and markdown citations.
If non-empty, specifies the URL pattern to use for citations; e.g. "{doc.url}".
See (https://docs.vectara.com/docs/api-reference/search-apis/search
#citation-format-in-summary) for more details. Defaults to None.
citations_text_pattern (str): The displayed text for citations.
If not specified, numeric citations are displayed for text.
save_history (bool): Whether to save the query in history. Defaults to False.
"""
def __init__(
self,
index: VectaraIndex,
similarity_top_k: int = 10,
offset: int = 0,
lambda_val: Union[List[float], float] = 0.005,
semantics: Union[List[str], str] = "default",
custom_dimensions: Union[List[Dict], Dict] = {},
n_sentences_before: int = 2,
n_sentences_after: int = 2,
filter: Union[List[str], str] = "",
reranker: VectaraReranker = VectaraReranker.NONE,
rerank_k: int = 50,
rerank_limit: Optional[int] = None,
rerank_cutoff: Optional[float] = None,
mmr_diversity_bias: float = 0.3,
udf_expression: str = None,
rerank_chain: List[Dict] = None,
summary_enabled: bool = False,
summary_response_lang: str = "eng",
summary_num_results: int = 7,
summary_prompt_name: str = "vectara-summary-ext-24-05-med-omni",
prompt_text: Optional[str] = None,
max_response_chars: Optional[int] = None,
max_tokens: Optional[int] = None,
llm_name: Optional[str] = None,
temperature: Optional[float] = None,
frequency_penalty: Optional[float] = None,
presence_penalty: Optional[float] = None,
citations_style: Optional[str] = None,
citations_url_pattern: Optional[str] = None,
citations_text_pattern: Optional[str] = None,
save_history: bool = False,
callback_manager: Optional[CallbackManager] = None,
x_source_str: str = "llama_index",
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._similarity_top_k = similarity_top_k
self._offset = offset
self._lambda_val = lambda_val
self._semantics = semantics
self._custom_dimensions = custom_dimensions
self._n_sentences_before = n_sentences_before
self._n_sentences_after = n_sentences_after
self._filter = filter
self._citations_style = citations_style
self._citations_url_pattern = citations_url_pattern
self._citations_text_pattern = citations_text_pattern
self._save_history = save_history
self._conv_id = None
self._x_source_str = x_source_str
if reranker in [
VectaraReranker.MMR,
VectaraReranker.SLINGSHOT,
VectaraReranker.SLINGSHOT_ALT_NAME,
VectaraReranker.UDF,
VectaraReranker.CHAIN,
VectaraReranker.NONE,
]:
self._rerank = True
self._reranker = reranker
self._rerank_k = rerank_k
self._rerank_limit = rerank_limit
self._rerank_cutoff = rerank_cutoff
if self._reranker == VectaraReranker.MMR:
self._mmr_diversity_bias = mmr_diversity_bias
elif self._reranker == VectaraReranker.UDF:
self._udf_expression = udf_expression
elif self._reranker == VectaraReranker.CHAIN:
self._rerank_chain = rerank_chain
for sub_reranker in self._rerank_chain:
if sub_reranker["type"] in [
VectaraReranker.SLINGSHOT,
VectaraReranker.SLINGSHOT_ALT_NAME,
]:
sub_reranker["type"] = "customer_reranker"
sub_reranker["reranker_name"] = "Rerank_Multilingual_v1"
else:
self._rerank = False
if summary_enabled:
self._summary_enabled = True
self._summary_response_lang = summary_response_lang
self._summary_num_results = summary_num_results
self._summary_prompt_name = summary_prompt_name
self._prompt_text = prompt_text
self._max_response_chars = max_response_chars
self._max_tokens = max_tokens
self._llm_name = llm_name
self._temperature = temperature
self._frequency_penalty = frequency_penalty
self._presence_penalty = presence_penalty
else:
self._summary_enabled = False
super().__init__(callback_manager)
def _get_post_headers(self) -> dict:
"""Returns headers that should be attached to each post request."""
return {
"x-api-key": self._index._vectara_api_key,
"Content-Type": "application/json",
"X-Source": self._x_source_str,
}
@property
def similarity_top_k(self) -> int:
"""Return similarity top k."""
return self._similarity_top_k
@similarity_top_k.setter
def similarity_top_k(self, similarity_top_k: int) -> None:
"""Set similarity top k."""
self._similarity_top_k = similarity_top_k
def _retrieve(
self,
query_bundle: QueryBundle,
**kwargs: Any,
) -> List[NodeWithScore]:
"""
Retrieve top k most similar nodes.
Args:
query_bundle: Query Bundle
"""
return self._vectara_query(query_bundle, **kwargs)[0] # return top_nodes only
def _build_vectara_query_body(
self,
query_str: str,
**kwargs: Any,
) -> Dict:
data = {
"query": query_str,
"search": {
"offset": self._offset,
"limit": self._rerank_k if self._rerank else self._similarity_top_k,
"context_configuration": {
"sentences_before": self._n_sentences_before,
"sentences_after": self._n_sentences_after,
},
},
}
corpora_config = [
{"corpus_key": corpus_key}
for corpus_key in self._index._vectara_corpus_key.split(",")
]
for i in range(len(corpora_config)):
corpora_config[i]["custom_dimensions"] = (
self._custom_dimensions[i]
if isinstance(self._custom_dimensions, list)
else self._custom_dimensions
)
corpora_config[i]["metadata_filter"] = (
self._filter[i] if isinstance(self._filter, list) else self._filter
)
corpora_config[i]["lexical_interpolation"] = (
self._lambda_val[i]
if isinstance(self._lambda_val, list)
else self._lambda_val
)
corpora_config[i]["semantics"] = (
self._semantics[i]
if isinstance(self._semantics, list)
else self._semantics
)
data["search"]["corpora"] = corpora_config
if self._rerank:
rerank_config = {}
if self._reranker in [
VectaraReranker.SLINGSHOT,
VectaraReranker.SLINGSHOT_ALT_NAME,
]:
rerank_config["type"] = "customer_reranker"
rerank_config["reranker_name"] = "Rerank_Multilingual_v1"
else:
rerank_config["type"] = self._reranker
if self._reranker == VectaraReranker.MMR:
rerank_config["diversity_bias"] = self._mmr_diversity_bias
elif self._reranker == VectaraReranker.UDF:
rerank_config["user_function"] = self._udf_expression
elif self._reranker == VectaraReranker.CHAIN:
rerank_config["rerankers"] = self._rerank_chain
if self._rerank_limit:
rerank_config["limit"] = self._rerank_limit
if self._rerank_cutoff and self._reranker != VectaraReranker.CHAIN:
rerank_config["cutoff"] = self._rerank_cutoff
data["search"]["reranker"] = rerank_config
if self._summary_enabled:
summary_config = {
"response_language": self._summary_response_lang,
"max_used_search_results": self._summary_num_results,
"generation_preset_name": self._summary_prompt_name,
"enable_factual_consistency_score": True,
}
if self._prompt_text:
summary_config["prompt_template"] = self._prompt_text
if self._max_response_chars:
summary_config["max_response_characters"] = self._max_response_chars
model_parameters = {}
if self._max_tokens:
model_parameters["max_tokens"] = self._max_tokens
if self._temperature:
model_parameters["temperature"] = self._temperature
if self._frequency_penalty:
model_parameters["frequency_penalty"] = self._frequency_penalty
if self._presence_penalty:
model_parameters["presence_penalty"] = self._presence_penalty
if self._llm_name:
model_parameters["llm_name"] = self._llm_name
if len(model_parameters) > 0:
summary_config["model_parameters"] = model_parameters
citations_config = {}
if self._citations_style:
if self._citations_style in ["numeric", "none"]:
citations_config["style"] = self._citations_style
elif (
self._citations_style in ["html", "markdown"]
and self._citations_url_pattern
):
citations_config["style"] = self._citations_style
citations_config["url_pattern"] = self._citations_url_pattern
citations_config["text_pattern"] = self._citations_text_pattern
else:
_logger.warning(
f"Invalid citations style {self._citations_style}. Must be one of 'numeric', 'html', 'markdown', or 'none'."
)
if len(citations_config) > 0:
summary_config["citations"] = citations_config
data["generation"] = summary_config
data["save_history"] = self._save_history
return data
def _vectara_stream(
self,
query_bundle: QueryBundle,
chat: bool = False,
conv_id: Optional[str] = None,
verbose: bool = False,
callback_func: Callable[[List, Dict], None] = None,
**kwargs: Any,
) -> StreamingResponse:
"""
Query Vectara index to get for top k most similar nodes.
Args:
query_bundle: Query Bundle
chat: whether to use chat API in Vectara
conv_id: conversation ID, if adding to existing chat
"""
body = self._build_vectara_query_body(query_bundle.query_str)
body["stream_response"] = True
if verbose:
print(f"Vectara streaming query request body: {body}")
if chat:
body["chat"] = {"store": True}
if conv_id or self._conv_id:
conv_id = conv_id or self._conv_id
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/chats/{conv_id}/turns",
data=json.dumps(body),
timeout=self._index.vectara_api_timeout,
stream=True,
)
else:
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/chats",
data=json.dumps(body),
timeout=self._index.vectara_api_timeout,
stream=True,
)
else:
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/query",
data=json.dumps(body),
timeout=self._index.vectara_api_timeout,
stream=True,
)
if response.status_code != 200:
result = response.json()
if response.status_code == 400:
if "messages" in result:
_logger.error(
f"Query failed (code {response.status_code}), reason {result['messages'][0]}"
)
else:
_logger.error(
f"Query failed (code {response.status_code}), err response {result}"
)
return None
def process_chunks(response):
source_nodes = []
response_metadata = {}
def text_generator() -> TokenGen:
for line in response.iter_lines():
line = line.decode("utf-8")
if line:
key, value = line.split(":", 1)
if key == "data":
line = json.loads(value)
if line["type"] == "generation_chunk":
yield line["generation_chunk"]
elif line["type"] == "factual_consistency_score":
response_metadata["fcs"] = line[
"factual_consistency_score"
]
elif line["type"] == "search_results":
search_results = line["search_results"]
source_nodes.extend(
[
NodeWithScore(
node=Node(
text_resource=MediaResource(
text=search_result["text"]
),
id_=search_result["document_id"],
metadata={
# Metadata from the matched part
**search_result.get(
"part_metadata", {}
),
# Document-level metadata
"document": search_result.get(
"document_metadata", {}
),
},
),
score=search_result["score"],
)
for search_result in search_results[
: self._similarity_top_k
]
]
)
elif line["type"] == "chat_info":
self._conv_id = line["chat_id"]
response_metadata["chat_id"] = line["chat_id"]
if callback_func:
callback_func(source_nodes, response_metadata)
return text_generator(), source_nodes, response_metadata
response_chunks, response_nodes, response_metadata = process_chunks(response)
return StreamingResponse(
response_gen=response_chunks,
source_nodes=response_nodes,
metadata=response_metadata,
)
def _vectara_query(
self,
query_bundle: QueryBundle,
chat: bool = False,
conv_id: Optional[str] = None,
verbose: bool = False,
**kwargs: Any,
) -> Tuple[List[NodeWithScore], Dict, str]:
"""
Query Vectara index to get for top k most similar nodes.
Args:
query: Query Bundle
chat: whether to use chat API in Vectara
conv_id: conversation ID, if adding to existing chat
verbose: whether to print verbose output (e.g. for debugging)
Additional keyword arguments
Returns:
List[NodeWithScore]: list of nodes with scores
Dict: summary
str: conversation ID, if applicable
"""
data = self._build_vectara_query_body(query_bundle.query_str)
if verbose:
print(f"Vectara query request body: {data}")
if chat:
data["chat"] = {"store": True}
if conv_id:
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/chats/{conv_id}/turns",
data=json.dumps(data),
timeout=self._index.vectara_api_timeout,
)
else:
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/chats",
data=json.dumps(data),
timeout=self._index.vectara_api_timeout,
)
else:
response = self._index._session.post(
headers=self._get_post_headers(),
url=f"{self._index._base_url}/v2/query",
data=json.dumps(data),
timeout=self._index.vectara_api_timeout,
)
result = response.json()
if response.status_code != 200:
if "messages" in result:
_logger.error(
f"Query failed (code {response.status_code}), reason {result['messages'][0]}"
)
else:
_logger.error(
f"Query failed (code {response.status_code}), err response {result}"
)
return [], {"text": ""}, ""
if "warnings" in result:
_logger.warning(f"Query warning(s) {(', ').join(result['warnings'])}")
if verbose:
print(f"Vectara query response: {result}")
if self._summary_enabled:
summary = {
"text": result["answer"] if chat else result["summary"],
"fcs": result.get("factual_consistency_score"),
}
else:
summary = None
search_results = result["search_results"]
top_nodes = [
NodeWithScore(
node=Node(
text_resource=MediaResource(text=search_result["text"]),
id_=search_result["document_id"],
metadata={
# Metadata from the matched part
**search_result.get("part_metadata", {}),
# Document-level metadata
"document": search_result.get("document_metadata", {}),
},
),
score=search_result["score"],
)
for search_result in search_results[: self._similarity_top_k]
]
conv_id = result["chat_id"] if chat else None
return top_nodes, summary, conv_id
async def _avectara_query(
self,
query_bundle: QueryBundle,
chat: bool = False,
conv_id: Optional[str] = None,
verbose: bool = False,
**kwargs: Any,
) -> Tuple[List[NodeWithScore], Dict]:
"""
Asynchronously query Vectara index to get for top k most similar nodes.
Args:
query: Query Bundle
chat: whether to use chat API in Vectara
conv_id: conversation ID, if adding to existing chat
verbose: whether to print verbose output (e.g. for debugging)
Additional keyword arguments
Returns:
List[NodeWithScore]: list of nodes with scores
Dict: summary
"""
return await self._vectara_query(query_bundle, chat, conv_id, verbose, **kwargs)
| VectaraRetriever |
python | tensorflow__tensorflow | tensorflow/python/distribute/metrics_v1_test.py | {
"start": 3633,
"end": 17117
} | class ____(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn())
if strategy_test_lib.is_tpu_strategy(distribution):
def step_fn(ctx, inputs):
value, update = distribution.extended.call_for_each_replica(
metric_fn, args=(inputs,))
ctx.set_non_tensor_output(name="value", output=value)
return distribution.group(update)
ctx = distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=distribution.extended.steps_per_run)
update = ctx.run_op
value = ctx.non_tensor_outputs["value"]
# In each run, we run multiple steps, and each steps consumes as many
# batches as number of replicas.
batches_per_update = (
distribution.num_replicas_in_sync *
distribution.extended.steps_per_run)
else:
value, update = distribution.extended.call_for_each_replica(
metric_fn, args=(iterator.get_next(),))
update = distribution.group(update)
# TODO(josh11b): Once we switch to using a global batch size for input,
# replace "distribution.num_replicas_in_sync" with "1".
batches_per_update = distribution.num_replicas_in_sync
self.evaluate(iterator.initializer)
self.evaluate(variables.local_variables_initializer())
batches_consumed = 0
for i in range(4):
self.evaluate(update)
batches_consumed += batches_per_update
self.assertAllClose(expected_fn(batches_consumed),
self.evaluate(value),
0.001,
msg="After update #" + str(i+1))
if batches_consumed >= 4: # Consume 4 input batches in total.
break
@combinations.generate(all_combinations() + tpu_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(
4, drop_remainder=True)
def _expected_fn(num_batches):
# Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.accuracy(labels, predictions)
def _expected_fn(num_batches):
return [3./4, 3./8, 3./12, 4./16][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
# TODO(priyag, jhseu): Enable TPU for this test once scatter_add is added
# for TPUMirroredVariable.
@combinations.generate(all_combinations())
def testMeanPerClassAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_per_class_accuracy(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1., 1., 1., 0., 0.]),
mean([0.5, 0.5, 0.5, 0., 0.]),
mean([1./3, 1./3, 0.5, 0., 0.]),
mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
# NOTE(priyag): This metric doesn't work on TPUs yet.
@combinations.generate(all_combinations())
def testMeanIOU(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_iou(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch
mean([1./4, 1./4, 1./3, 0., 0.]),
mean([1./6, 1./6, 1./5, 0., 0.]),
mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testMeanTensor(self, distribution):
def _dataset_fn():
dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float)
# Want to produce a fixed, known shape, so drop remainder when batching.
dataset = dataset.batch(4, drop_remainder=True)
return dataset
def _expected_fn(num_batches):
# Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2
# Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1
# Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches
# Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1
first = 2. * num_batches - 2.
return [first, first + 1., first + 2., first + 3.]
self._test_metric(
distribution, _dataset_fn, metrics.mean_tensor, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAUCROC(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="ROC",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.5, 7./9, 0.8, 0.75][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testAUCPR(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="PR",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalseNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives(labels, predictions)
def _expected_fn(num_batches):
return [1., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalseNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTrueNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives(labels, predictions)
def _expected_fn(num_batches):
return [0., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTrueNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[0.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalsePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testFalsePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTruePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 3., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testTruePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [3.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testPrecision(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 0.5, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testPrecisionAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRecall(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRecallAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 1./32, 0.208333, 0.15625][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations() + tpu_combinations())
def testRootMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.root_mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 0.176777, 0.456435, 0.395285][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSensitivityAtSpecificity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.sensitivity_at_specificity(labels, predictions, 0.8)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSpecificityAtSensitivity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.specificity_at_sensitivity(labels, predictions, 0.95)
def _expected_fn(num_batches):
return [0., 1./3, 0.5, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
if __name__ == "__main__":
test.main()
| MetricsV1Test |
python | ansible__ansible | test/units/plugins/lookup/test_password.py | {
"start": 17037,
"end": 19873
} | class ____(BaseTestLookupModule):
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_no_encrypt(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
results = self.password_lookup.run([u'/path/to/somewhere'], None)
# FIXME: assert something useful
for result in results:
assert len(result) == DEFAULT_LENGTH
assert isinstance(result, str)
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_password_already_created_no_encrypt(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
for result in results:
self.assertEqual(result, u'hunter42')
@patch.object(PluginLoader, '_get_paths')
@patch('ansible.plugins.lookup.password._write_password_file')
def test_only_a(self, mock_get_paths, mock_write_file):
mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
results = self.password_lookup.run([u'/path/to/somewhere chars=a'], None)
for result in results:
self.assertEqual(result, u'a' * DEFAULT_LENGTH)
@patch('time.sleep')
def test_lock_been_held(self, mock_sleep):
# pretend the lock file is here
def _already_exists(*args, **kwargs):
raise FileExistsError("The lock is busy, wait and try again.")
with (
pytest.raises(AnsibleError, match='^Password lookup cannot get the lock in 7 seconds.*'),
patch.object(password.os, 'open', _already_exists),
patch.object(password.os.path, 'exists', lambda *args, **kwargs: True),
):
# should timeout here
self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
def test_lock_not_been_held(self):
# pretend now there is password file but no lock
password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
# should not timeout here
results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
for result in results:
self.assertEqual(result, u'hunter42')
@pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests')
| TestLookupModuleWithoutPasslib |
python | PrefectHQ__prefect | src/prefect/utilities/visualization.py | {
"start": 2663,
"end": 2907
} | class ____:
def __init__(
self,
name: str,
upstream_tasks: Optional[list["VizTask"]] = None,
):
self.name = name
self.upstream_tasks: list[VizTask] = upstream_tasks if upstream_tasks else []
| VizTask |
python | realpython__materials | python-sqlite-sqlalchemy/project/examples/example_3/app/models.py | {
"start": 5493,
"end": 6409
} | class ____(db.Model):
__tablename__ = "invoice_items"
invoice_line_id = db.Column("InvoiceLineId", db.Integer, primary_key=True)
invoice_id = db.Column(
"InvoiceId",
db.ForeignKey("invoices.InvoiceId"),
nullable=False,
index=True,
)
track_id = db.Column(
"TrackId", db.ForeignKey("tracks.TrackId"), nullable=False, index=True
)
unit_price = db.Column("UnitPrice", db.Float, nullable=False)
quantity = db.Column("Quantity", db.Integer, nullable=False)
playlist_track = db.Table(
"playlist_track",
db.Column(
"PlaylistId",
db.Integer,
db.ForeignKey("playlists.PlaylistId"),
primary_key=True,
nullable=False,
),
db.Column(
"TrackId",
db.Integer,
db.ForeignKey("tracks.TrackId"),
primary_key=True,
nullable=False,
index=True,
),
)
| InvoiceItem |
python | huggingface__transformers | src/transformers/models/gpt2/configuration_gpt2.py | {
"start": 850,
"end": 8562
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`GPT2Model`]. It is used to
instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPT-2
[openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPT2Model`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (`string`, *optional*, defaults to `"cls_index"`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in for the multiple choice head in
[`GPT2DoubleHeadsModel`].
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].
The dropout ratio to be used after the projection and activation.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
bos_token_id (`int`, *optional*, defaults to 50256):
Id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 50256):
Id of the end of sentence token in the vocabulary.
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import GPT2Config, GPT2Model
>>> # Initializing a GPT2 configuration
>>> configuration = GPT2Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPT2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gpt2"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=50257,
n_positions=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
scale_attn_weights=True,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
scale_attn_by_inverse_layer_idx=False,
reorder_and_upcast_attn=False,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
__all__ = ["GPT2Config"]
| GPT2Config |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 12746,
"end": 13249
} | class ____(AnsibleSerializableWrapper[datetime.time]):
__slots__ = _NO_INSTANCE_STORAGE
@classmethod
def _from_dict(cls: t.Type[_TAnsibleSerializable], d: t.Dict[str, t.Any]) -> datetime.time:
value = datetime.time.fromisoformat(d['iso8601'])
value.replace(fold=d['fold'])
return value
def _as_dict(self) -> t.Dict[str, t.Any]:
return dict(
iso8601=self._value.isoformat(),
fold=self._value.fold,
)
| AnsibleSerializableTime |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/migrations/0001_initial.py | {
"start": 43,
"end": 703
} | class ____(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
name="HistoryEntry",
fields=[
(
"request_id",
models.UUIDField(primary_key=True, serialize=False),
),
("data", models.JSONField(default=dict)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
options={
"verbose_name": "history entry",
"verbose_name_plural": "history entries",
"ordering": ["-created_at"],
},
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/tensor_format_test.py | {
"start": 1173,
"end": 24127
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
np.set_printoptions(
precision=8, threshold=1000, edgeitems=3, linewidth=75)
def _checkTensorMetadata(self, tensor, annotations):
self.assertEqual(
{"dtype": tensor.dtype, "shape": tensor.shape},
annotations["tensor_metadata"])
# Regular expression for text representation of float numbers, possibly in
# engineering notation.
_ELEMENT_REGEX = re.compile(
r"([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?|nan|inf|-inf)")
def _checkBeginIndicesAnnotations(self, out, a):
"""Check the beginning-index annotations of an ndarray representation.
Args:
out: An instance of RichTextLines representing a numpy.ndarray.
a: The numpy.ndarray being represented.
Raises:
ValueError: if any ellipses ("...") are found in the lines representing
the array.
"""
begin_line_num = 0
while not out.lines[begin_line_num].startswith("array"):
begin_line_num += 1
element_index = 0
for line_num in range(begin_line_num, len(out.lines)):
line = out.lines[line_num]
if "..." in line:
raise ValueError("Unexpected found ellipses in line representing array")
matches = re.finditer(self._ELEMENT_REGEX, line)
for line_item_index, _ in enumerate(matches):
subscripts = list(np.unravel_index(element_index, a.shape))
if line_item_index == 0:
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: subscripts},
out.annotations[line_num])
element_index += 1
self.assertEqual(element_index, np.size(a))
def _checkTensorElementLocations(self, out, a):
"""Check the results of locate_tensor_element on an ndarray representation.
that represents a numpy.ndarray.
Args:
out: An instance of RichTextLines representing a numpy.ndarray.
a: The numpy.ndarray being represented.
Raises:
ValueError: if any ellipses ("...") are found in the lines representing
the array.
"""
# First, locate the beginning of the tensor value section.
begin_line_num = 0
while not out.lines[begin_line_num].startswith("array"):
begin_line_num += 1
# Second, find all matches to tensor-value regex.
element_index = 0
for line_num in range(begin_line_num, len(out.lines)):
line = out.lines[line_num]
if "..." in line:
raise ValueError("Unexpected found ellipses in line representing array")
matches = re.finditer(self._ELEMENT_REGEX, line)
for match in matches:
subscripts = list(np.unravel_index(element_index, a.shape))
is_omitted, row, start_col, end_col = (
tensor_format.locate_tensor_element(out, subscripts))
self.assertFalse(is_omitted)
self.assertEqual(line_num, row)
self.assertEqual(match.start(), start_col)
self.assertEqual(match.end(), end_col)
element_index += 1
self.assertEqual(element_index, np.size(a))
def _findFirst(self, lines, string):
"""Find first occurrence of a string in a list of strings."""
for i, line in enumerate(lines):
find_index = line.find(string)
if find_index >= 0:
return i, find_index
def _extractBoldNumbers(self, out, start_line):
"""Extract all numbers that have the bold font attribute.
Args:
out: An instance of RichTextLines.
start_line: 0-based index to start from.
Returns:
A list of floats.
"""
floats = []
for i in range(start_line, len(out.lines)):
if i not in out.font_attr_segs:
continue
line_attrs = out.font_attr_segs[i]
for begin, end, attr_value in line_attrs:
if attr_value == "bold":
floats.append(float(out.lines[i][begin:end]))
return floats
def testFormatZeroDimensionTensor(self):
a = np.array(42, dtype=np.int32)
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertTrue(out.lines[2].startswith("array(42"))
self._checkTensorMetadata(a, out.annotations)
def testFormatTensorHighlightsTensorNameWithoutDebugOp(self):
tensor_name = "a_tensor:0"
a = np.zeros(2)
out = tensor_format.format_tensor(
a, tensor_name, np_printoptions={"linewidth": 40})
self.assertEqual([(8, 8 + len(tensor_name), "bold")], out.font_attr_segs[0])
def testFormatTensorHighlightsTensorNameWithDebugOp(self):
tensor_name = "a_tensor:0"
debug_op = "DebugIdentity"
a = np.zeros(2)
out = tensor_format.format_tensor(
a, "%s:%s" % (tensor_name, debug_op), np_printoptions={"linewidth": 40})
self.assertEqual([(8, 8 + len(tensor_name), "bold"),
(8 + len(tensor_name) + 1,
8 + len(tensor_name) + 1 + len(debug_op), "yellow")],
out.font_attr_segs[0])
def testFormatTensor1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor2DNoEllipsisNoRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensorSuppressingTensorName(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, None)
self.assertEqual(repr(a).split("\n"), out.lines)
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensorWithMetadata(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_metadata=True)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\":",
" dtype: float64",
" shape: (4, 4)",
""], out.lines[:4])
self.assertEqual(repr(a).split("\n"), out.lines[4:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor2DNoEllipsisWithRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 40.0, 40).reshape([2, 20])
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 50})
self.assertEqual(
{"dtype": a.dtype, "shape": a.shape},
out.annotations["tensor_metadata"])
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor3DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
def testFormatTensor3DNoEllipsisWithArgwhereHighlightWithMatches(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
lower_bound = 0.26
upper_bound = 0.5
def highlight_filter(x):
return np.logical_and(x > lower_bound, x < upper_bound)
highlight_options = tensor_format.HighlightOptions(
highlight_filter, description="between 0.26 and 0.5")
out = tensor_format.format_tensor(
a, "a", highlight_options=highlight_options)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\": "
"Highlighted(between 0.26 and 0.5): 5 of 24 element(s) (20.83%)",
""],
out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndicesAnnotations(out, a)
self.assertAllClose(
[0.29166667, 0.33333333, 0.375, 0.41666667, 0.45833333],
self._extractBoldNumbers(out, 2))
def testFormatTensor3DNoEllipsisWithArgwhereHighlightWithNoMatches(self):
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
def highlight_filter(x):
return x > 10.0
highlight_options = tensor_format.HighlightOptions(highlight_filter)
out = tensor_format.format_tensor(
a, "a", highlight_options=highlight_options)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\": Highlighted: 0 of 24 element(s) (0.00%)", ""],
out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
self._checkBeginIndicesAnnotations(out, a)
# Check font attribute segments for highlighted elements.
for i in range(2, len(out.lines)):
self.assertNotIn(i, out.font_attr_segs)
def testFormatTensorWithEllipses(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
actual_row_0_0_0, _ = self._findFirst(out.lines, "1000")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [0, 0, 0]},
out.annotations[actual_row_0_0_0])
actual_row_0_1_0, _ = self._findFirst(out.lines, "1011")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [0, 1, 0]},
out.annotations[actual_row_0_1_0])
# Find the first line that is completely omitted.
omitted_line = 2
while not out.lines[omitted_line].strip().startswith("..."):
omitted_line += 1
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: [0, 2, 0]},
out.annotations[omitted_line])
actual_row_10_10_0, _ = self._findFirst(out.lines, "2320")
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: [10, 10, 0]},
out.annotations[actual_row_10_10_0])
# Find the last line that is completely omitted.
omitted_line = len(out.lines) - 1
while not out.lines[omitted_line].strip().startswith("..."):
omitted_line -= 1
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: [10, 2, 0]},
out.annotations[omitted_line])
def testFormatUninitializedTensor(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_FLOAT"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto, False), "a")
self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor:"],
out.lines[:3])
self.assertEqual(str(tensor_proto).split("\n"), out.lines[3:])
def testFormatResourceTypeTensor(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_RESOURCE"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto), "a")
self.assertEqual(["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(str(tensor_proto).split("\n"), out.lines[2:])
def testLocateTensorElement1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [20])
with self.assertRaisesRegex(ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1])
with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0, 0])
def testLocateTensorElement1DNoEllipsisBatchMode(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
def testBatchModeWithErrors(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [[0, 0], [0]])
with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [[0], [20]])
with self.assertRaisesRegex(ValueError,
r"Indices contain negative value\(s\)"):
tensor_format.locate_tensor_element(out, [[0], [-1]])
with self.assertRaisesRegex(
ValueError, "Input indices sets are not in ascending order"):
tensor_format.locate_tensor_element(out, [[5], [0]])
def testLocateTensorElement1DTinyAndNanValues(self):
a = np.ones([3, 3]) * 1e-8
a[1, 0] = np.nan
a[1, 2] = np.inf
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 100})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
def testLocateTensorElement2DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegex(ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement2DNoEllipsisWithNumericSummary(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_numeric_summary=True)
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"a\":",
"",
"Numeric summary:",
"| 0 + | total |",
"| 1 15 | 16 |",
"| min max mean std |"],
out.lines[:6])
cli_test_utils.assert_array_lines_close(
self, [0.0, 0.9375, 0.46875, 0.28811076429], out.lines[6:7])
cli_test_utils.assert_array_lines_close(self, a, out.lines[8:])
self._checkTensorElementLocations(out, a)
with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegex(ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement3DWithEllipses(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
actual_row_0_0_0, actual_col_0_0_0 = self._findFirst(out.lines, "1000")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 0, 0])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_0_0, row)
self.assertEqual(actual_col_0_0_0, start_col)
self.assertEqual(actual_col_0_0_0 + 4, end_col)
actual_row_0_0_10, _ = self._findFirst(out.lines, "1010")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 0, 10])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_0_10, row)
self.assertIsNone(start_col) # Passes ellipsis.
self.assertIsNone(end_col)
actual_row_0_1_0, actual_col_0_1_0 = self._findFirst(out.lines, "1011")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 1, 0])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_1_0, row)
self.assertEqual(actual_col_0_1_0, start_col)
self.assertEqual(actual_col_0_1_0 + 4, end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 2, 0])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 2, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 8, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
actual_row_0_10_1, actual_col_0_10_1 = self._findFirst(out.lines, "1111")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [0, 10, 1])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_0_10_1, row)
self.assertEqual(actual_col_0_10_1, start_col)
self.assertEqual(actual_col_0_10_1 + 4, end_col)
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [5, 1, 1])
self.assertTrue(is_omitted) # In omitted line.
self.assertIsNone(start_col)
self.assertIsNone(end_col)
actual_row_10_10_10, _ = self._findFirst(out.lines, "2330")
is_omitted, row, start_col, end_col = tensor_format.locate_tensor_element(
out, [10, 10, 10])
self.assertFalse(is_omitted)
self.assertEqual(actual_row_10_10_10, row)
self.assertIsNone(start_col) # Past ellipsis.
self.assertIsNone(end_col)
with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [11, 5, 5])
with self.assertRaisesRegex(ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 5, 5])
with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [5, 5])
def testLocateTensorElement3DWithEllipsesBatchMode(self):
a = (np.arange(11 * 11 * 11) + 1000).reshape([11, 11, 11]).astype(np.int32)
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100,
"edgeitems": 2})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self, ["Tensor \"a\":", ""], out.lines[:2])
self.assertEqual(repr(a).split("\n"), out.lines[2:])
actual_row_0_0_0, actual_col_0_0_0 = self._findFirst(out.lines, "1000")
actual_row_0_0_10, _ = self._findFirst(out.lines, "1010")
actual_row_10_10_10, _ = self._findFirst(out.lines, "2330")
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out, [[0, 0, 0]])
self.assertEqual([False], are_omitted)
self.assertEqual([actual_row_0_0_0], rows)
self.assertEqual([actual_col_0_0_0], start_cols)
self.assertEqual([actual_col_0_0_0 + 4], end_cols)
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [0, 0, 10]])
self.assertEqual([False, False], are_omitted)
self.assertEqual([actual_row_0_0_0, actual_row_0_0_10], rows)
self.assertEqual([actual_col_0_0_0, None], start_cols)
self.assertEqual([actual_col_0_0_0 + 4, None], end_cols)
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [0, 2, 0]])
self.assertEqual([False, True], are_omitted)
self.assertEqual([2, 4], rows)
self.assertEqual(2, len(start_cols))
self.assertEqual(2, len(end_cols))
(are_omitted, rows, start_cols,
end_cols) = tensor_format.locate_tensor_element(out,
[[0, 0, 0], [10, 10, 10]])
self.assertEqual([False, False], are_omitted)
self.assertEqual([actual_row_0_0_0, actual_row_10_10_10], rows)
self.assertEqual([actual_col_0_0_0, None], start_cols)
self.assertEqual([actual_col_0_0_0 + 4, None], end_cols)
def testLocateTensorElementAnnotationsUnavailable(self):
tensor_proto = tensor_pb2.TensorProto(
dtype=types_pb2.DataType.Value("DT_FLOAT"),
tensor_shape=tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)]))
out = tensor_format.format_tensor(
debug_data.InconvertibleTensorProto(tensor_proto, False), "a")
self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor:"],
out.lines[:3])
with self.assertRaisesRegex(
AttributeError, "tensor_metadata is not available in annotations"):
tensor_format.locate_tensor_element(out, [0])
| RichTextLinesTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes1.py | {
"start": 207,
"end": 245
} | class ____:
C: type[A]
app = B()
| B |
python | Textualize__textual | src/textual/canvas.py | {
"start": 3309,
"end": 3980
} | class ____(Primitive):
"""A rectangle."""
origin: Offset
width: int
height: int
color: Color
line_type: CanvasLineType = "thin"
def render(self, canvas: Canvas) -> None:
origin = self.origin
width = self.width
height = self.height
color = self.color
line_type = self.line_type
HorizontalLine(origin, width, color, line_type).render(canvas)
HorizontalLine(origin + (0, height - 1), width, color, line_type).render(canvas)
VerticalLine(origin, height, color, line_type).render(canvas)
VerticalLine(origin + (width - 1, 0), height, color, line_type).render(canvas)
| Rectangle |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 2376,
"end": 2502
} | class ____(TD6):
# This should generate an error because Literal[3] is
# not the same type as int.
a: Literal[3]
| TD7 |
python | kubernetes-client__python | kubernetes/base/dynamic/resource.py | {
"start": 10514,
"end": 13332
} | class ____(object):
""" A parsed instance of an API resource. It exists solely to
ease interaction with API objects by allowing attributes to
be accessed with '.' notation.
"""
def __init__(self, client, instance):
self.client = client
# If we have a list of resources, then set the apiVersion and kind of
# each resource in 'items'
kind = instance['kind']
if kind.endswith('List') and 'items' in instance:
kind = instance['kind'][:-4]
if not instance['items']:
instance['items'] = []
for item in instance['items']:
if 'apiVersion' not in item:
item['apiVersion'] = instance['apiVersion']
if 'kind' not in item:
item['kind'] = kind
self.attributes = self.__deserialize(instance)
self.__initialised = True
def __deserialize(self, field):
if isinstance(field, dict):
return ResourceField(params={
k: self.__deserialize(v) for k, v in field.items()
})
elif isinstance(field, (list, tuple)):
return [self.__deserialize(item) for item in field]
else:
return field
def __serialize(self, field):
if isinstance(field, ResourceField):
return {
k: self.__serialize(v) for k, v in field.__dict__.items()
}
elif isinstance(field, (list, tuple)):
return [self.__serialize(item) for item in field]
elif isinstance(field, ResourceInstance):
return field.to_dict()
else:
return field
def to_dict(self):
return self.__serialize(self.attributes)
def to_str(self):
return repr(self)
def __repr__(self):
return "ResourceInstance[{}]:\n {}".format(
self.attributes.kind,
' '.join(yaml.safe_dump(self.to_dict()).splitlines(True))
)
def __getattr__(self, name):
if not '_ResourceInstance__initialised' in self.__dict__:
return super(ResourceInstance, self).__getattr__(name)
return getattr(self.attributes, name)
def __setattr__(self, name, value):
if not '_ResourceInstance__initialised' in self.__dict__:
return super(ResourceInstance, self).__setattr__(name, value)
elif name in self.__dict__:
return super(ResourceInstance, self).__setattr__(name, value)
else:
self.attributes[name] = value
def __getitem__(self, name):
return self.attributes[name]
def __setitem__(self, name, value):
self.attributes[name] = value
def __dir__(self):
return dir(type(self)) + list(self.attributes.__dict__.keys())
| ResourceInstance |
python | huggingface__transformers | tests/models/dpr/test_modeling_dpr.py | {
"start": 1081,
"end": 6660
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=False,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
projection_dim=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.projection_dim = projection_dim
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return DPRConfig(
projection_dim=self.projection_dim,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_context_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRContextEncoder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_question_encoder(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRQuestionEncoder(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))
def create_and_check_reader(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = DPRReader(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
| DPRModelTester |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 143342,
"end": 147155
} | class ____:
@pytest.fixture
def _team(self, pyramid_user):
self.user = UserFactory.create()
EmailFactory.create(user=self.user, verified=True)
self.submitter = pyramid_user
self.team = TeamFactory.create(name="Example Team")
self.project_name = "exampleproject"
self.role = "Admin"
@pytest.mark.usefixtures("_team")
@pytest.mark.parametrize(
("email_template_name", "send_team_collaborator_email"),
[
("added-as-team-collaborator", email.send_added_as_team_collaborator_email),
(
"removed-as-team-collaborator",
email.send_removed_as_team_collaborator_email,
),
(
"role-changed-as-team-collaborator",
email.send_role_changed_as_team_collaborator_email,
),
("team-collaborator-added", email.send_team_collaborator_added_email),
("team-collaborator-removed", email.send_team_collaborator_removed_email),
(
"team-collaborator-role-changed",
email.send_team_collaborator_role_changed_email,
),
],
)
def test_send_team_collaborator_email(
self,
db_request,
make_email_renderers,
send_email,
email_template_name,
send_team_collaborator_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
email_template_name
)
if "removed" in email_template_name:
result = send_team_collaborator_email(
db_request,
self.user,
team=self.team,
submitter=self.submitter,
project_name=self.project_name,
)
else:
result = send_team_collaborator_email(
db_request,
self.user,
team=self.team,
submitter=self.submitter,
project_name=self.project_name,
role=self.role,
)
if "removed" in email_template_name:
assert result == {
"team_name": self.team.name,
"project": self.project_name,
"submitter": self.submitter.username,
}
else:
assert result == {
"team_name": self.team.name,
"project": self.project_name,
"submitter": self.submitter.username,
"role": self.role,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
| TestTeamCollaboratorEmails |
python | great-expectations__great_expectations | great_expectations/render/exceptions.py | {
"start": 107,
"end": 181
} | class ____(GreatExpectationsTypeError):
pass
| InvalidRenderedContentError |
python | lxml__lxml | test.py | {
"start": 11065,
"end": 14482
} | class ____(TextTestResult):
"""Customised TestResult.
It can show a progress bar, and displays tracebacks for errors and failures
as soon as they happen, in addition to listing them all at the end.
"""
__super = TextTestResult
__super_init = __super.__init__
__super_startTest = __super.startTest
__super_stopTest = __super.stopTest
__super_printErrors = __super.printErrors
def __init__(self, stream, descriptions, verbosity, count, cfg, hooks):
self.__super_init(stream, descriptions, verbosity)
self.count = count
self.cfg = cfg
self.hooks = hooks
if cfg.progress:
self.dots = False
self._lastWidth = 0
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1
def startTest(self, test):
if self.cfg.progress:
# verbosity == 0: 'xxxx/xxxx (xxx.x%)'
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name'
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok'
n = self.testsRun + 1
self.stream.write("\r%4d" % n)
if self.count:
self.stream.write("/%d (%5.1f%%)"
% (self.count, n * 100.0 / self.count))
if self.showAll: # self.cfg.verbosity == 1
self.stream.write(": ")
elif self.cfg.verbosity:
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self.__super_startTest(test)
for hook in self.hooks:
hook.startTest(test)
def stopTest(self, test):
for hook in self.hooks:
hook.stopTest(test)
self.__super_stopTest(test)
def getShortDescription(self, test):
s = self.getDescription(test)
if len(s) > self._maxWidth:
# s is 'testname (package.module.class)'
# try to shorten it to 'testname (...age.module.class)'
# if it is still too long, shorten it to 'testnam...'
# limit case is 'testname (...)'
pos = s.find(" (")
if pos + len(" (...)") > self._maxWidth:
s = s[:self._maxWidth - 3] + "..."
else:
s = "%s...%s" % (s[:pos + 2], s[pos + 5 - self._maxWidth:])
return s
def printErrors(self):
if self.cfg.progress and not (self.dots or self.showAll):
self.stream.writeln()
self.__super_printErrors()
def formatError(self, err):
return "".join(traceback.format_exception(*err))
def printTraceback(self, kind, test, err):
self.stream.writeln()
self.stream.writeln()
self.stream.writeln("%s: %s" % (kind, test))
self.stream.writeln(self.formatError(err))
self.stream.writeln()
def addFailure(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("FAIL", test, err)
self.failures.append((test, self.formatError(err)))
def addError(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("ERROR", test, err)
self.errors.append((test, self.formatError(err)))
| CustomTestResult |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 58635,
"end": 59487
} | class ____(GenericFunction[int]):
r"""The ANSI COUNT aggregate function. With no arguments,
emits COUNT \*.
E.g.::
from sqlalchemy import func
from sqlalchemy import select
from sqlalchemy import table, column
my_table = table("some_table", column("id"))
stmt = select(func.count()).select_from(my_table)
Executing ``stmt`` would emit:
.. sourcecode:: sql
SELECT count(*) AS count_1
FROM some_table
"""
type = sqltypes.Integer()
inherit_cache = True
def __init__(
self,
expression: Union[
_ColumnExpressionArgument[Any], _StarOrOne, None
] = None,
**kwargs: Any,
) -> None:
if expression is None:
expression = literal_column("*")
super().__init__(expression, **kwargs)
| count |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor.py | {
"start": 2837,
"end": 95635
} | class ____(
composite_tensor.CompositeTensor,
internal_types.NativeObject,
internal_types.RaggedTensor,
):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
Note that the `__init__` constructor is private. Please use one of the
following methods to construct a `RaggedTensor`:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s
(see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a
full listing). The term "potentially ragged tensor" may be used to refer to a
tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank
of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for five other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
* `uniform_row_length`: A scalar tensor, specifying the length of every
row. This row-partitioning scheme may only be used if all rows have
the same length.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2)
<tf.RaggedTensor [[3, 1], [4, 1], [5, 9], [2, 6]]>
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print(outer_rt.to_list())
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print(outer_rt.ragged_rank)
2
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),
... row_splits=[0, 2, 5])
>>> print(rt.to_list())
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print(rt.shape)
(2, None, 3)
### Uniform Outer Dimensions
`RaggedTensor`s with uniform outer dimensions can be defined by using
one or more `RaggedTensor` with a `uniform_row_length` row-partitioning
tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be
constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt6)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt6.shape)
(2, 2, None)
Note that `rt6` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt7.shape)
(2, None, None)
Uniform and ragged outer dimensions may be interleaved, meaning that a
tensor with any combination of ragged and uniform dimensions may be created.
For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could
be constructed as follows:
```python
t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]
t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]
t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]
t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]
t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]
```
"""
# =============================================================================
# Constructor (private)
# =============================================================================
@doc_controls.do_not_generate_docs
def __init__(self, values, row_partition, internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_partition: A `RowPartition` object, representing the arrangement of
the lists at the top level.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
ValueError: If internal = False. Note that this method is intended only
for internal use.
TypeError: If values is not a `RaggedTensor` or `Tensor`, or
row_partition is not a `RowPartition`.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
_assert_is_supported_ragged_values_type(values)
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
# Validate shapes.
values.shape.with_rank_at_least(1)
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
assert row_partition.dtype == values._row_partition.dtype
self._values = values
self._row_partition = row_partition
# =============================================================================
# Factory Methods
# =============================================================================
@classmethod
def _from_row_partition(cls, values, row_partition, validate=True):
"""Creates a `RaggedTensor` with a row partition.
This is used as a way for RaggedTensors to share row partitions.
The outer dimension of values must be equal to `partition.nvals()`.
Args:
values: A potentially ragged tensor.
row_partition: a `RowPartition`: can be shared between tensors.
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If partition.nvals() != _nrows(values)
"""
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
values, row_partition = cls._convert_values_and_partition(
values, row_partition, "partition")
if row_partition._has_precomputed_value_rowids(): # pylint: disable=protected-access
value_rowids_shape = row_partition.value_rowids().shape
values.shape[:1].assert_is_compatible_with(value_rowids_shape)
if validate:
msg = "Arguments to _from_row_partition do not form a valid RaggedTensor"
nvals = _nrows(values, row_partition.dtype)
checks = [
check_ops.assert_equal(
math_ops.cast(row_partition.nvals(), row_partition.dtype),
nvals,
message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_partition = row_partition._with_dependencies(checks) # pylint: disable=protected-access
return cls(values=values, internal=True, row_partition=row_partition)
@classmethod
@dispatch.add_dispatch_support
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1] + 1` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
row_partition = RowPartition.from_value_rowids(
value_rowids=value_rowids,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
row_partition = RowPartition.from_row_splits(
row_splits=row_splits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
row_partition = RowPartition.from_row_lengths(
row_lengths=row_lengths,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_starts(
row_starts=row_starts,
nvals=_nrows(values),
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_limits(
row_limits=row_limits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_uniform_row_length(cls,
values,
uniform_row_length,
nrows=None,
validate=True,
name=None):
"""Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`.
This method can be used to create `RaggedTensor`s with multiple uniform
outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]`
can be constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt1)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt1.shape)
(2, 2, None)
Note that `rt1` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt2.shape)
(2, None, None)
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
uniform_row_length: A scalar integer tensor. Must be nonnegative. The
size of the outer axis of `values` must be evenly divisible by
`uniform_row_length`.
nrows: The number of rows in the constructed RaggedTensor. If not
specified, then it defaults to `nvals/uniform_row_length` (or `0` if
`uniform_row_length==0`). `nrows` only needs to be specified if
`uniform_row_length` might be zero. `uniform_row_length*nrows` must be
`nvals`.
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` that corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(uniform_row_length)]
for _ in range(nrows)]
```
`result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromUniformRowLength",
[values, uniform_row_length, nrows]):
values = _convert_to_ragged_tensor_values(values)
uniform_row_length = _convert_row_partition(
uniform_row_length, "UniformRowLength",
_get_optional_partition_dtype(values))
nvals = _nvals_uniform_row_length(values, uniform_row_length)
row_partition = RowPartition.from_uniform_row_length(
uniform_row_length=uniform_row_length,
nvals=nvals,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_value_rowids, tensor_lib.Tensor):
raise TypeError(f"Argument `nested_value_rowids` must be a list of "
f"Tensors. Received {nested_value_rowids}.")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, tensor_lib.Tensor):
raise TypeError(f"Argument `nested_nrows` must be a list of "
f"Tensors. Received {nested_nrows}.")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError(
f"Argument `nested_nrows` must have the same length as "
f"argument `nested_value_rowids`. len(nested_nrows) = "
f"{len(nested_nrows)} vs. len(nested_values_rowids) = "
f"{len(nested_value_rowids)}.")
with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] +
list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(
result, value_rowids, nrows, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_splits, tensor_lib.Tensor):
raise TypeError(f"Argument `nested_row_splits` must be a list of "
f"Tensors. Received {nested_row_splits}.")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_lengths, tensor_lib.Tensor):
raise TypeError(f"Argument `nested_row_lengths` must be a list of "
f"Tensors. Received {nested_row_lengths}.")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _from_nested_row_partitions(cls,
flat_values,
nested_row_partitions,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of row partitions.
Equivalent to:
```python
result = flat_values
for row_partition in reversed(nested_row_partitions):
result = _from_row_partition(result, row_partition)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_partitions: A list of row partitions. The `i`th element is
used as the row partition for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_partitions, RowPartition):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
if isinstance(nested_row_partitions, tensor_lib.Tensor):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
with ops.name_scope(name, "RaggedFromNestedRowPartitions",
[flat_values] + list(nested_row_partitions)):
result = flat_values
for partition in reversed(nested_row_partitions):
result = cls._from_row_partition(result, partition, validate=validate)
return result
@classmethod
def _convert_values_and_partition(cls, values, row_partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors will be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
row_partition: A RowPartition object for the `RaggedTensor` being
constructed.
name: The name of the RowPartition object.
Returns:
A tuple (values, partition).
"""
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
if values._row_partition.dtype != row_partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
# pylint: disable=protected-access
# TODO(edloper): get rid of the `name` parameter.
raise ValueError(
f"Argument `row_partition` of RaggedTensor with name: {name} "
f"must have same dtype as Argument `values`. "
f"({row_partition.dtype} vs. {values._row_partition.dtype}).")
values = values.with_row_splits_dtype(row_partition.dtype)
else:
values = _convert_to_ragged_tensor_values(values)
return (values, row_partition)
# =============================================================================
# Accessors
# =============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([2, None, 2])
"""
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
def get_shape(self) -> tensor_shape.TensorShape:
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Alias for `shape` property.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).get_shape()
TensorShape([2, None])
>>> tf.ragged.constant(
... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()
TensorShape([2, None, 2])
"""
return self.shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> values.ragged_rank
1
>>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> rt.ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
return self._values
@property
def _nested_row_partitions(self):
"""Returns the row partitions for this `RaggedTensor`."""
partitions = [self._row_partition]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
# pylint: disable=protected-access
partitions.append(rt_values._row_partition)
rt_values = rt_values.values
return tuple(partitions)
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.row_splits) # indices of row splits in rt.values
tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
"""
return self._row_partition.row_splits()
@property
def uniform_row_length(self):
"""The length of each row in this ragged tensor, or None if rows are ragged.
>>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(rt1.uniform_row_length) # rows are ragged.
None
>>> rt2 = tf.RaggedTensor.from_uniform_row_length(
... values=rt1, uniform_row_length=2)
>>> print(rt2)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).
tf.Tensor(2, shape=(), dtype=int64)
A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)
if it can be determined statically (at graph construction time) that the
rows all have the same length.
Returns:
A scalar integer `Tensor`, specifying the length of every row in this
ragged tensor (for ragged tensors whose rows are uniform); or `None`
(for ragged tensors whose rows are ragged).
"""
return self._row_partition.uniform_row_length()
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print(rt.flat_values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits):
... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))
Splits for dimension 1: [0 3]
Splits for dimension 2: [0 3 3 5]
Splits for dimension 3: [0 4 4 7 8 8]
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.value_rowids()) # corresponds 1:1 with rt.values
tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)
"""
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return self._row_partition.value_rowids()
def nested_value_rowids(self, name=None):
"""Returns a tuple containing the value_rowids for all ragged dimensions.
`rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors
for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids`
where:
* `value_ids = ()` if `rt.values` is a `Tensor`.
* `value_ids = rt.values.nested_value_rowids` otherwise.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, ids in enumerate(rt.nested_value_rowids()):
... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))
row ids for dimension 1: [0 0 0]
row ids for dimension 2: [0 0 0 2 2]
row ids for dimension 3: [0 0 0 0 2 2 2 3]
"""
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_ids.append(rt_values.value_rowids())
rt_values = rt_values.values
return tuple(rt_nested_ids)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.nrows()) # rt has 5 rows.
tf.Tensor(5, shape=(), dtype=int64)
"""
with ops.name_scope(name, "RaggedNRows", [self]):
if out_type is None:
return self._row_partition.nrows()
else:
return math_ops.cast(self._row_partition.nrows(), dtype=out_type)
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self._row_partition.row_starts()
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_limits()) # indices of row limits in rt.values
tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self._row_partition.row_limits()
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
>>> rt = tf.ragged.constant(
... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> print(rt.row_lengths()) # lengths of rows in rt
tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)
>>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
"""
if axis == 0:
return self._row_partition.nrows()
if axis == 1:
return self._row_partition.row_lengths()
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = array_ops.get_positive_axis(
axis, self.shape.rank, ndims_name="rank(self)")
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors
for all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape().numpy()
array([5, 4])
"""
if out_type is None:
out_type = self._row_partition.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
result = math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
if out_type != self._row_partition.dtype:
result = math_ops.cast(result, out_type)
return result
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = [splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
]
inner_dimensions = flat_values_shape[1:]
if out_type != self._row_partition.dtype:
ragged_dimensions = [
math_ops.cast(d, out_type) for d in ragged_dimensions
]
bbox = array_ops.concat(
[array_ops_stack.stack(ragged_dimensions), inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
# =============================================================================
# Transformation
# =============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values = _convert_to_ragged_tensor_values(new_values)
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_partition.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
values=new_values, row_partition=self._row_partition, internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same number
of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, RaggedTensor):
return self.with_values(self.values.with_flat_values(new_values))
else:
new_values = _convert_to_ragged_tensor_values(new_values)
return self.with_values(new_values)
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError(f"Argument `row_splits` dtype must be int32 or int64. "
f"Received {dtype}.")
if self._row_partition.dtype == dtype:
return self
current_values = self._values
if isinstance(current_values, RaggedTensor):
return RaggedTensor(
values=current_values.with_row_splits_dtype(dtype),
row_partition=self._row_partition.with_dtype(dtype),
internal=True)
else:
return RaggedTensor(
values=current_values,
row_partition=self._row_partition.with_dtype(dtype),
internal=True)
def merge_dims(self, outer_axis, inner_axis):
"""Merges outer_axis...inner_axis into a single dimension.
Returns a copy of this RaggedTensor with the specified range of dimensions
flattened into a single dimension, with elements in row-major order.
#### Examples:
>>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])
>>> print(rt.merge_dims(0, 1))
<tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
>>> print(rt.merge_dims(1, 2))
<tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>
>>> print(rt.merge_dims(0, 2))
tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)
To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1)`. To mimic the behavior of `tf.layers.Flatten` (which
flattens all dimensions except the outermost batch dimension), use
`rt.merge_dims(1, -1)`.
Args:
outer_axis: `int`: The first dimension in the range of dimensions to
merge. May be negative if `self.shape.rank` is statically known.
inner_axis: `int`: The last dimension in the range of dimensions to merge.
May be negative if `self.shape.rank` is statically known.
Returns:
A copy of this tensor, with the specified dimensions merged into a
single dimension. The shape of the returned tensor will be
`self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`
is the total number of slices in the merged dimensions.
"""
outer_axis = array_ops.get_positive_axis(
outer_axis,
self.shape.rank,
axis_name="outer_axis",
ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis,
self.shape.rank,
axis_name="inner_axis",
ndims_name="rank(self)")
if not outer_axis <= inner_axis:
raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or "
f"equal to inner_axis ({inner_axis}).")
return merge_dims(self, outer_axis, inner_axis)
def _set_shape(self, shape):
"""Updates the static shape of `self` to be `shape`.
* If a dimension of `shape` has known rank, and is encoded via
partitioning, then this will update the corresponding partition to
define `_uniform_row_length` and `nrows`.
* If a dimension of `shape` has a known rank, and is encoded as one
of the `flat_values` dimensions, then `flat_values.set_shape()` will
be used to update its shape.
Warning: Using this method to assert an incorrect shape for a RaggedTensor
(i.e., one that's not consistent with its actual shape) can cause
segmentation faults and very difficult-to-diagnose behavior. Only use this
method if you are certain that the shape is correct.
Args:
shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.
"""
# TODO(edloper): Refactor this to not directly access private members
# of RowPartition.
# pylint: disable=protected-access
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
return # Nothing to do.
shape = shape.as_list()
# Outermost dimension
if shape[0] is not None:
self._row_partition._row_splits.set_shape(shape[0] + 1)
# Partitioned dimensions
dtype = self._row_partition.dtype
for i, partition in enumerate(self._nested_row_partitions):
size = shape[i + 1]
if size is not None:
if partition._uniform_row_length is not None:
old_row_length = tensor_util.constant_value(
partition._uniform_row_length)
if old_row_length is not None:
if size == old_row_length:
continue # already have shape info for this axis.
else:
raise ValueError(f"Inconsistent size for axis {i + 1}: "
f"{old_row_length} vs. {size}.")
partition._uniform_row_length = ops.convert_to_tensor(size, dtype)
if partition._nrows is None:
partition._nrows = array_ops.size(
partition._row_splits, out_type=dtype) - 1
# self.flat_values could be a CompositeTensor and doesn't have set_shape.
if hasattr(self.flat_values, "set_shape"):
# Inner dimensions
flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])
self.flat_values.set_shape(flat_shape)
# =============================================================================
# Tensor Type Conversions
# =============================================================================
@classmethod
@dispatch.add_dispatch_support
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
... [[0, 0], [3, 0], [0, 0]],
... [[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
ValueError: If the rank of `tensor` is 0 or 1.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify argument `lengths` or `padding`, but not both.")
if not isinstance(ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Received {ragged_rank}.")
if ragged_rank <= 0:
raise ValueError(f"Argument `ragged_rank` must be greater than 0. "
f"Received {ragged_rank}.")
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
if tensor.shape.rank is not None and tensor.shape.rank < 2:
raise ValueError(f"The rank of a RaggedTensor must be greater than 1, "
f"i.e., a list of scalars won't have ragged "
f"dimensions. Received argument `tensor` with rank "
f"{tensor.shape.rank}.")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle nested row lengths.
if (lengths is not None and isinstance(lengths, (list, tuple)) and
len(lengths) and not isinstance(lengths[0], (int, float))):
if ragged_rank not in (1, len(lengths)):
# Note: we accept `ragged_rank=1` here because it's the default value;
# i.e., if the user passes in a tuple of lengths, but doesn't specify
# ragged_rank, then we should use that tuple to determine ragged_rank.
# We only want to complain if they pass in an explicit ragged_rank
# that doesn't match len(lengths).
raise ValueError(f"If Argument `lengths` is a tuple of row_lengths, "
f"argument `ragged_rank` must be "
f"len(lengths): {len(lengths)}. Received "
f"ragged_rank: {ragged_rank}.")
# Rather than reconstructing the tensor mask directly, we can
# recreate it as a boolean RaggedTensor, then densify that and use
# that as the mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(masked_data, lengths, validate=False)
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
if tensor.shape.is_fully_defined():
input_shape = tensor.shape.as_list()
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = np.cumprod(input_shape)
new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]
else:
dim_size = math_ops.cumprod(input_shape)
new_shape = array_ops.concat(
[[dim_size[ragged_rank - 1]], input_shape[ragged_rank:]], axis=0)
flattened = array_ops.reshape(tensor, new_shape)
result = cls.from_tensor(
flattened, lengths, padding, row_splits_dtype=row_splits_dtype)
for axis in range(ragged_rank - 1, 0, -1):
dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value
if dim_len is None:
dim_len = input_shape[axis]
else:
dim_len = constant_op.constant(dim_len, row_splits_dtype)
result = RaggedTensor.from_uniform_row_length(
values=result,
uniform_row_length=dim_len,
nrows=dim_size[axis - 1],
validate=False)
return result
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, because when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = cond.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault *
array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
values_shape = array_ops.concat(
[[input_shape[0] * input_shape[1]], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value
const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value
if const_nrows is not None:
nrows = constant_op.constant(const_nrows, row_splits_dtype)
else:
nrows = input_shape[0]
if const_ncols is not None:
ncols = constant_op.constant(const_ncols, row_splits_dtype)
else:
ncols = input_shape[1]
return RaggedTensor.from_uniform_row_length(
values=values, uniform_row_length=ncols, nrows=nrows, validate=False)
def to_tensor(self, default_value=None, name=None, shape=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
If `shape` is specified, then the result is padded and/or truncated to
the specified shape.
Examples:
>>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print(rt.to_tensor())
tf.Tensor(
[[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32)
>>> print(rt.to_tensor(shape=[5, 2]))
tf.Tensor(
[[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32)
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
shape: The shape of the resulting dense tensor. In particular,
`result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or
`self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or
equal to `self.rank`.
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)
row_partition_types = [x[0] for x in type_tensor_pairs]
row_partition_tensors = [x[1] for x in type_tensor_pairs]
if default_value is None:
default_value = array_ops.zeros((), self.dtype)
if (isinstance(shape, (list, tuple)) and
any(isinstance(v, tensor_lib.Tensor) for v in shape) and
all(isinstance(v, (int, tensor_lib.Tensor)) for v in shape)):
shape = array_ops_stack.stack(shape)
shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)
tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=shape_tensor,
values=self.flat_values,
default_value=default_value,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors,
)
ragged_shape = self.shape
if ragged_shape.rank is not None and not isinstance(
shape, tensor_lib.Tensor
):
# Merged self.shape and shape, favoring the second one as it takes
# into account potential padding added to the output.
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
output_shape = ragged_shape
else:
# At this point we can assume that hshape.rank == ragged_shape.rank
# because otherwise it would have failed earlier.
output_shape = [
s1 if s1 is not None else s2
for (s1, s2) in zip(shape.as_list(), ragged_shape.as_list())
]
tensor.set_shape(output_shape)
return tensor
@classmethod
@dispatch.add_dispatch_support
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
>>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]]
>>> st = tf.sparse.SparseTensor(indices=indices,
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> tf.RaggedTensor.from_sparse(st).to_list()
[[1, 2, 3], [4], [], [5]]
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError(f"Argument `st_input` must be of type SparseTensor, but "
f"is of type {type(st_input).__name__}.")
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2.")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# that we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`.
Example:
>>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> print(rt.to_sparse())
SparseTensor(indices=tf.Tensor(
[[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],
shape=(6, 2), dtype=int64),
values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),
dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
row_splits_dtype=dtypes.int64,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = tf.stack([et, et])
>>> tf.RaggedTensor._from_variant( # scalar input.
... et, dtype=tf.int32, output_ragged_rank=1).to_list()
[[0], [1, 2]]
>>> tf.RaggedTensor._from_variant( # batched input.
... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()
[[[0], [1, 2]], [[0], [1, 2]]]
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is
optional and inferred dynamically if not provided.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
f"Argument `output_ragged_rank` ({output_ragged_rank}) must be equal "
f"to `input_ragged_rank` + `variant.shape.ndims` "
f"({input_ragged_rank} + {variant.shape.ndims}).")
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, max(output_ragged_rank, 0), dtype,
row_splits_dtype, name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
# =============================================================================
# String Encoding
# =============================================================================
def __repr__(self):
if self._is_eager():
# The np.array2string in _formatter provides a separator argument, but
# doesn't handle recursive calls correctly. The np.printoptions handles
# recursive calls correctly, but doesn't provide a separator argument.
# Combines them together to print elements separated by comma, while
# avoiding the redundant array prefixes and dtypes. For example,
# the value of tf.ragged.constant([[1, 2], [3, 4]]) will look like
#
# [[1, 2],
# [3, 4]]
with np.printoptions(formatter={"all": _formatter}):
value_text = _formatter(self.numpy())
return f"<tf.RaggedTensor {value_text}>"
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self.values,
self.row_splits)
# =============================================================================
# Eager Execution Mode
# =============================================================================
def numpy(self):
"""Returns a numpy `array` with the values for this `RaggedTensor`.
Requires that this `RaggedTensor` was constructed in eager execution mode.
Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and
`rank=1`, where each element is a single row.
#### Examples
In the following example, the value returned by `RaggedTensor.numpy()`
contains three numpy `array` objects: one for each row (with `rank=1` and
`dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`):
>>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy()
array([array([1, 2, 3]), array([4, 5])], dtype=object)
Uniform dimensions are encoded using multidimensional numpy `array`s. In
the following example, the value returned by `RaggedTensor.numpy()` contains
a single numpy `array` object, with `rank=2` and `dtype=int64`:
>>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy()
array([[1, 2, 3], [4, 5, 6]])
Returns:
A numpy `array`.
"""
if not self._is_eager():
raise ValueError("RaggedTensor.numpy() is only supported in eager mode.")
values = self.values.numpy()
splits = self.row_splits.numpy()
rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
if not rows:
return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)
# Note: if `rows` have ragged lengths, then they will be stored in a
# np.ndarray with dtype=object and rank=1. If they have uniform lengths,
# they will be combined into a single np.ndarray with dtype=row.dtype and
# rank=row.rank+1.
#
# Manually set dtype as numpy now complains when given ragged rows.
has_variable_length_rows = any(len(row) != len(rows[0]) for row in rows)
dtype = np.object_ if has_variable_length_rows else None
return np.array(rows, dtype=dtype)
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if not isinstance(self.row_splits, ops.EagerTensor):
raise ValueError("to_list can only be used in eager mode.")
row_splits = self.row_splits.numpy().tolist()
values = self.values
if isinstance(values, RaggedTensor):
return [
values[row_splits[i]:row_splits[i + 1]].to_list()
for i in range(len(row_splits) - 1)
]
else:
# Convert values to a Python list.
if hasattr(values, "numpy"):
values_as_list = values.numpy().tolist()
elif hasattr(values, "to_list"):
values_as_list = values.to_list()
else:
raise ValueError("values must be convertible to a list")
return [
values_as_list[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)
]
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
# =============================================================================
# Operators
# =============================================================================
# To avoid circular dependencies, we define stub methods for operators here,
# and then override them when the ragged_operators module is imported.
def _overloaded_operator(name): # pylint: disable=no-self-argument
def stub(*args, **kwargs):
del args, kwargs
raise ValueError(
f"You must import 'tensorflow.python.ops.ragged.ragged_ops' "
f"before using RaggedTensor.{name}.")
return stub
__getitem__ = _overloaded_operator("__getitem__")
__ge__ = _overloaded_operator("__ge__")
__gt__ = _overloaded_operator("__gt__")
__le__ = _overloaded_operator("__le__")
__lt__ = _overloaded_operator("__lt__")
__and__ = _overloaded_operator("__and__")
__rand__ = _overloaded_operator("__rand__")
__invert__ = _overloaded_operator("__invert__")
__ror__ = _overloaded_operator("__ror__")
__or__ = _overloaded_operator("__or__")
__xor__ = _overloaded_operator("__xor__")
__rxor__ = _overloaded_operator("__rxor__")
__abs__ = _overloaded_operator("__abs__")
__add__ = _overloaded_operator("__add__")
__radd__ = _overloaded_operator("__radd__")
__div__ = _overloaded_operator("__div__")
__rdiv__ = _overloaded_operator("__rdiv__")
__floordiv__ = _overloaded_operator("__floordiv__")
__rfloordiv__ = _overloaded_operator("__rfloordiv__")
__mod__ = _overloaded_operator("__mod__")
__rmod__ = _overloaded_operator("__rmod__")
__mul__ = _overloaded_operator("__mul__")
__rmul__ = _overloaded_operator("__rmul__")
__neg__ = _overloaded_operator("__neg__")
__pow__ = _overloaded_operator("__pow__")
__rpow__ = _overloaded_operator("__rpow__")
__sub__ = _overloaded_operator("__sub__")
__rsub__ = _overloaded_operator("__rsub__")
__truediv__ = _overloaded_operator("__truediv__")
__rtruediv__ = _overloaded_operator("__rtruediv__")
del _overloaded_operator
# =============================================================================
# Name Scope
# =============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
# =============================================================================
# Composite Tensor
# =============================================================================
@property
def _type_spec(self):
return RaggedTensorSpec.from_value(self)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
__composite_gradient__ = (
composite_tensor_gradient.WithValuesCompositeTensorGradient())
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError(f"Unexpected keyword args {kwargs}.")
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(
t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor
) else t
for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
# ===============================================================================
# RaggedTensorSpec
# ===============================================================================
@tf_export("RaggedTensorSpec")
@type_spec_registry.register("tf.RaggedTensorSpec")
| RaggedTensor |
python | pyparsing__pyparsing | examples/inv_regex.py | {
"start": 2133,
"end": 2378
} | class ____:
def __init__(self, exprs):
self.exprs = exprs
def make_generator(self):
def alt_gen():
for e in self.exprs:
yield from e.make_generator()()
return alt_gen
| AlternativeEmitter |
python | pypa__warehouse | warehouse/logging.py | {
"start": 218,
"end": 3655
} | class ____(logging.Formatter):
def format(self, record):
# TODO: Figure out a better way of handling this besides just looking
# at the logger name, ideally this would have some way to
# really differentiate between log items which were logged by
# structlog and which were not.
if not record.name.startswith("warehouse."):
# TODO: Is there a better way to handle this? Maybe we can figure
# out a way to pass this through the structlog processors
# instead of manually duplicating the side effects here?
event_dict = {
"logger": record.name,
"level": record.levelname,
"event": record.msg,
"thread": threading.get_ident(),
}
record.msg = RENDERER(None, record.levelname, event_dict)
return super().format(record)
def _create_id(request):
return str(uuid.uuid4())
def _create_logger(request):
# This has to use **{} instead of just a kwarg because request.id is not
# an allowed kwarg name.
return request_logger.bind(**{"request.id": request.id})
def includeme(config):
# Configure the standard library logging
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {"structlog": {"()": "warehouse.logging.StructlogFormatter"}},
"handlers": {
"primary": {
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "structlog",
},
},
"loggers": {
"datadog.dogstatsd": {"level": "ERROR"},
"gunicorn": {
"propagate": False,
"handlers": ["primary"],
"level": config.registry.settings.get("logging.level", "INFO"),
},
"gunicorn.access": {
"propagate": False,
"handlers": ["primary"],
"level": config.registry.settings.get("logging.level", "INFO"),
},
"gunicorn.server": {
"propagate": False,
"handlers": ["primary"],
"level": config.registry.settings.get("logging.level", "INFO"),
},
},
"root": {
"level": config.registry.settings.get("logging.level", "INFO"),
"handlers": ["primary"],
},
}
)
# Configure structlog
structlog.configure(
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
RENDERER,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
# Give every request a unique identifier
config.add_request_method(_create_id, name="id", reify=True)
# Add a log method to every request.
config.add_request_method(_create_logger, name="log", reify=True)
| StructlogFormatter |
python | pytoolz__toolz | toolz/functoolz.py | {
"start": 18815,
"end": 20995
} | class ____:
""" Creates a function that calls several functions with the same arguments
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a tuple of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> juxt(inc, double)(10)
(11, 20)
>>> juxt([inc, double])(10)
(11, 20)
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
if len(funcs) == 1 and not callable(funcs[0]):
funcs = funcs[0]
self.funcs = tuple(funcs)
def __call__(self, *args, **kwargs):
return tuple(func(*args, **kwargs) for func in self.funcs)
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = state
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
@curry
def flip(func, a, b):
""" Call the function call with the arguments flipped
This function is curried.
>>> def div(a, b):
... return a // b
...
>>> flip(div, 2, 6)
3
>>> div_by_two = flip(div, 2)
>>> div_by_two(4)
2
This is particularly useful for built in functions and functions defined
in C extensions that accept positional only arguments. For example:
isinstance, issubclass.
>>> data = [1, 'a', 'b', 2, 1.5, object(), 3]
>>> only_ints = list(filter(flip(isinstance, int), data))
>>> only_ints
[1, 2, 3]
"""
return func(b, a)
def return_none(exc):
""" Returns None.
"""
return None
| juxt |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 765137,
"end": 765956
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"created_at",
"email",
"is_primary",
"updated_at",
"user_account",
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
email = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="email")
is_primary = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isPrimary"
)
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
user_account = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseServerUserAccount), graphql_name="userAccount"
)
| EnterpriseServerUserAccountEmail |
python | sympy__sympy | sympy/matrices/expressions/matadd.py | {
"start": 823,
"end": 4928
} | class ____(MatrixExpr, Add):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like SymPy Add
Examples
========
>>> from sympy import MatAdd, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
identity = GenericZeroMatrix()
def __new__(cls, *args, evaluate=False, check=None, _sympify=True):
if not args:
return cls.identity
# This must be removed aggressively in the constructor to avoid
# TypeErrors from GenericZeroMatrix().shape
args = list(filter(lambda i: cls.identity != i, args))
if _sympify:
args = list(map(sympify, args))
if not all(isinstance(arg, MatrixExpr) for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
obj = Basic.__new__(cls, *args)
if check is not None:
sympy_deprecation_warning(
"Passing check to MatAdd is deprecated and the check argument will be removed in a future version.",
deprecated_since_version="1.11",
active_deprecations_target='remove-check-argument-from-matrix-operations')
if check is not False:
validate(*args)
if evaluate:
obj = cls._evaluate(obj)
return obj
@classmethod
def _evaluate(cls, expr):
return canonicalize(expr)
@property
def shape(self):
return self.args[0].shape
def could_extract_minus_sign(self):
return _could_extract_minus_sign(self)
def expand(self, **kwargs):
expanded = super(MatAdd, self).expand(**kwargs)
return self._evaluate(expanded)
def _entry(self, i, j, **kwargs):
return Add(*[arg._entry(i, j, **kwargs) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from .trace import trace
return Add(*[trace(arg) for arg in self.args]).doit()
def doit(self, **hints):
deep = hints.get('deep', True)
if deep:
args = [arg.doit(**hints) for arg in self.args]
else:
args = self.args
return canonicalize(MatAdd(*args))
def _eval_derivative(self, x):
# MatAdd does not remove ZeroMatrix unless you call .doit():
return super()._eval_derivative(x).doit()
def _eval_derivative_matrix_lines(self, x):
add_lines = [arg._eval_derivative_matrix_lines(x) for arg in self.args]
return [j for i in add_lines for j in i]
add.register_handlerclass((Add, MatAdd), MatAdd)
factor_of = lambda arg: arg.as_coeff_mmul()[0]
matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
""" Merge explicit MatrixBase arguments
Examples
========
>>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint
>>> from sympy.matrices.expressions.matadd import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X)
[1 0] [1 2]
A + [ ] + [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X))
[2 2]
A + [ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(operator.add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(*rules)))
| MatAdd |
python | astropy__astropy | astropy/coordinates/builtin_frames/itrs.py | {
"start": 1091,
"end": 4268
} | class ____(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth (the ITRF).
For more background on the ITRS, see the references provided in the
:ref:`astropy:astropy-coordinates-seealso` section of the documentation.
This frame also includes frames that are defined *relative* to the center of the Earth,
but that are offset (in both position and velocity) from the center of the Earth. You
may see such non-geocentric coordinates referred to as "topocentric".
Topocentric ITRS frames are convenient for observations of near Earth objects where
stellar aberration is not included. One can merely subtract the observing site's
EarthLocation geocentric ITRS coordinates from the object's geocentric ITRS coordinates,
put the resulting vector into a topocentric ITRS frame and then transform to
`~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`. The other way around is
to transform an observed `~astropy.coordinates.AltAz` or `~astropy.coordinates.HADec`
position to a topocentric ITRS frame and add the observing site's EarthLocation geocentric
ITRS coordinates to yield the object's geocentric ITRS coordinates.
On the other hand, using ``transform_to`` to transform geocentric ITRS coordinates to
topocentric ITRS, observed `~astropy.coordinates.AltAz`, or observed
`~astropy.coordinates.HADec` coordinates includes the difference between stellar aberration
from the point of view of an observer at the geocenter and stellar aberration from the
point of view of an observer on the surface of the Earth. If the geocentric ITRS
coordinates of the object include stellar aberration at the geocenter (e.g. certain ILRS
ephemerides), then this is the way to go.
Note to ILRS ephemeris users: Astropy does not currently consider relativistic
effects of the Earth's gravatational field. Nor do the `~astropy.coordinates.AltAz`
or `~astropy.coordinates.HADec` refraction corrections compute the change in the
range due to the curved path of light through the atmosphere, so Astropy is no
substitute for the ILRS software in these respects.
"""
default_representation = CartesianRepresentation
default_differential = CartesianDifferential
obstime = TimeAttribute(
default=DEFAULT_OBSTIME, doc="The reference time (e.g., time of observation)"
)
location = EarthLocationAttribute(
default=EARTH_CENTER, doc="The location on Earth of the observer"
)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(
x=cart.x + self.location.x,
y=cart.y + self.location.y,
z=cart.z + self.location.z,
)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
| ITRS |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 218762,
"end": 219227
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str):
"""Airbyte Source for Lemlist.
Documentation can be found at https://docs.airbyte.com/integrations/sources/lemlist
Args:
name (str): The name of the destination.
api_key (str): Lemlist API key.
"""
self.api_key = check.str_param(api_key, "api_key")
super().__init__("Lemlist", name)
| LemlistSource |
python | ray-project__ray | python/ray/serve/_private/storage/kv_store.py | {
"start": 484,
"end": 584
} | class ____(Exception):
def __init__(self, rpc_code):
self.rpc_code = rpc_code
| KVStoreError |
python | huggingface__transformers | src/transformers/models/afmoe/modular_afmoe.py | {
"start": 1633,
"end": 1694
} | class ____(LlamaRotaryEmbedding):
pass
| AfmoeRotaryEmbedding |
python | sphinx-doc__sphinx | sphinx/transforms/__init__.py | {
"start": 2280,
"end": 3283
} | class ____(Transformer):
"""A transformer for Sphinx."""
document: nodes.document
env: BuildEnvironment | None = None
def set_environment(self, env: BuildEnvironment) -> None:
self.env = env
def apply_transforms(self) -> None:
if isinstance(self.document, nodes.document):
if not hasattr(self.document.settings, 'env') and self.env:
self.document.settings.env = self.env
super().apply_transforms() # type: ignore[misc]
else:
# wrap the target node by document node during transforming
try:
from sphinx.util.docutils import new_document
document = new_document('')
if self.env:
document.settings.env = self.env
document += self.document
self.document = document
super().apply_transforms()
finally:
self.document = self.document[0]
| SphinxTransformer |
python | scikit-learn__scikit-learn | sklearn/utils/_tags.py | {
"start": 4403,
"end": 5547
} | class ____:
"""Tags for the classifier.
Parameters
----------
poor_score : bool, default=False
Whether the estimator fails to provide a "reasonable" test-set
score, which currently for classification is an accuracy of
0.83 on ``make_blobs(n_samples=300, random_state=0)``. The
datasets and values are based on current estimators in scikit-learn
and might be replaced by something more systematic.
multi_class : bool, default=True
Whether the classifier can handle multi-class
classification. Note that all classifiers support binary
classification. Therefore this flag indicates whether the
classifier is a binary-classifier-only or not.
See :term:`multi-class` in the glossary.
multi_label : bool, default=False
Whether the classifier supports multi-label output: a data point can
be predicted to belong to a variable number of classes.
See :term:`multi-label` in the glossary.
"""
poor_score: bool = False
multi_class: bool = True
multi_label: bool = False
@dataclass(slots=True)
| ClassifierTags |
python | facelessuser__pymdown-extensions | pymdownx/caret.py | {
"start": 4250,
"end": 5127
} | class ____(util.PatternSequenceProcessor):
"""Smart insert and sup processor."""
PATTERNS = [
util.PatSeqItem(re.compile(SMART_INS_SUP, re.DOTALL | re.UNICODE), 'double', 'ins,sup'),
util.PatSeqItem(re.compile(SMART_SUP_INS, re.DOTALL | re.UNICODE), 'double', 'sup,ins'),
util.PatSeqItem(re.compile(SMART_INS_SUP2, re.DOTALL | re.UNICODE), 'double', 'ins,sup'),
util.PatSeqItem(re.compile(SMART_INS_SUP3, re.DOTALL | re.UNICODE), 'double2', 'ins,sup'),
util.PatSeqItem(re.compile(SMART_INS, re.DOTALL | re.UNICODE), 'single', 'ins'),
util.PatSeqItem(re.compile(SMART_SUP_INS2, re.DOTALL | re.UNICODE), 'double2', 'sup,ins'),
util.PatSeqItem(re.compile(SUP2, re.DOTALL | re.UNICODE), 'single', 'sup', True),
util.PatSeqItem(re.compile(SUP, re.DOTALL | re.UNICODE), 'single', 'sup')
]
| CaretSmartProcessor |
python | kamyu104__LeetCode-Solutions | Python/count-nodes-with-the-highest-score.py | {
"start": 29,
"end": 1240
} | class ____(object):
def countHighestScoreNodes(self, parents):
"""
:type parents: List[int]
:rtype: int
"""
def iter_dfs(adj):
result = [0]*2
stk = [(1, (0, [0]))]
while stk:
step, args = stk.pop()
if step == 1:
i, ret = args
cnts = [[0] for _ in xrange(len(adj[i]))]
stk.append((2, (cnts, ret)))
for j, child in enumerate(adj[i]):
stk.append((1, (child, cnts[j])))
elif step == 2:
cnts, ret = args
ret[0] = sum(cnt[0] for cnt in cnts)+1
score = max((len(adj)-ret[0]), 1)*reduce(lambda x, y: x*y[0], cnts, 1)
if score > result[0]:
result[:] = [score, 1]
elif score == result[0]:
result[1] += 1
return result[1]
adj = [[] for _ in xrange(len(parents))] # Space: O(n)
for i in xrange(1, len(parents)):
adj[parents[i]].append(i)
return iter_dfs(adj)
# Time: O(n)
# Space: O(n)
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 941722,
"end": 942409
} | class ____(PredicateComposition):
"""
LogicalNotPredicate schema wrapper.
Parameters
----------
not : str, dict, :class:`Predicate`, :class:`FieldGTPredicate`, :class:`FieldLTPredicate`, :class:`FieldGTEPredicate`, :class:`FieldLTEPredicate`, :class:`LogicalOrPredicate`, :class:`ParameterPredicate`, :class:`FieldEqualPredicate`, :class:`FieldOneOfPredicate`, :class:`FieldRangePredicate`, :class:`FieldValidPredicate`, :class:`LogicalAndPredicate`, :class:`LogicalNotPredicate`, :class:`PredicateComposition`
"""
_schema = {"$ref": "#/definitions/LogicalNot<Predicate>"}
def __init__(self, **kwds):
super().__init__(**kwds)
| LogicalNotPredicate |
python | Textualize__textual | tests/test_markdown.py | {
"start": 705,
"end": 850
} | class ____(Markdown):
def unhandled_token(self, token: Token) -> MarkdownBlock | None:
return UnhandledToken(self, token)
| FussyMarkdown |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-distinct-integers-after-reverse-operations.py | {
"start": 495,
"end": 707
} | class ____(object):
def countDistinctIntegers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return len({y for x in nums for y in (x, int(str(x)[::-1]))})
| Solution2 |
python | PyCQA__pylint | tests/functional/e/enum_subclasses.py | {
"start": 935,
"end": 1001
} | class ____(Enum):
def some_behavior(self):
pass
| BaseEnum |
python | huggingface__transformers | examples/modular-transformers/modular_add_function.py | {
"start": 561,
"end": 704
} | class ____(ZambaAttention):
def __init__(self):
pass
def forward(self):
_ = apply_rotary_pos_emb(1, 1, 1, 1)
| TestAttention |
python | astropy__astropy | astropy/modeling/core.py | {
"start": 115137,
"end": 115439
} | class ____(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
| Fittable1DModel |
python | openai__openai-python | src/openai/resources/conversations/conversations.py | {
"start": 16281,
"end": 16992
} | class ____:
def __init__(self, conversations: Conversations) -> None:
self._conversations = conversations
self.create = _legacy_response.to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> ItemsWithRawResponse:
return ItemsWithRawResponse(self._conversations.items)
| ConversationsWithRawResponse |
python | sphinx-doc__sphinx | sphinx/directives/code.py | {
"start": 14332,
"end": 18701
} | class ____(SphinxDirective):
"""Like ``.. include:: :literal:``, but only warns if the include file is
not found, and does not raise errors. Also has several options for
selecting what to include.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {
'dedent': optional_int,
'linenos': directives.flag,
'lineno-start': int,
'lineno-match': directives.flag,
'tab-width': int,
'language': directives.unchanged_required,
'force': directives.flag,
'encoding': directives.encoding,
'pyobject': directives.unchanged_required,
'lines': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'start-at': directives.unchanged_required,
'end-at': directives.unchanged_required,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
'emphasize-lines': directives.unchanged_required,
'caption': directives.unchanged,
'class': directives.class_option,
'name': directives.unchanged,
'diff': directives.unchanged_required,
}
def run(self) -> list[Node]:
document = self.state.document
if not document.settings.file_insertion_enabled:
return [
document.reporter.warning('File insertion disabled', line=self.lineno)
]
# convert options['diff'] to absolute path
if 'diff' in self.options:
_, path = self.env.relfn2path(self.options['diff'])
self.options['diff'] = path
try:
location = self.state_machine.get_source_and_line(self.lineno)
rel_filename, filename = self.env.relfn2path(self.arguments[0])
self.env.note_dependency(rel_filename)
reader = LiteralIncludeReader(filename, self.options, self.config)
text, lines = reader.read(location=location)
retnode: Element = nodes.literal_block(text, text, source=filename)
retnode['force'] = 'force' in self.options
self.set_source_info(retnode)
if self.options.get('diff'): # if diff is set, set udiff
retnode['language'] = 'udiff'
elif 'language' in self.options:
retnode['language'] = self.options['language']
if (
'linenos' in self.options
or 'lineno-start' in self.options
or 'lineno-match' in self.options
):
retnode['linenos'] = True
retnode['classes'] += self.options.get('class', [])
extra_args = retnode['highlight_args'] = {}
if 'emphasize-lines' in self.options:
hl_lines = parse_line_num_spec(self.options['emphasize-lines'], lines)
if any(i >= lines for i in hl_lines):
logger.warning(
__('line number spec is out of range(1-%d): %r'),
lines,
self.options['emphasize-lines'],
location=location,
)
extra_args['hl_lines'] = [x + 1 for x in hl_lines if x < lines]
extra_args['linenostart'] = reader.lineno_start
if 'caption' in self.options:
caption = self.options['caption'] or self.arguments[0]
retnode = container_wrapper(self, retnode, caption)
# retnode will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
self.add_name(retnode)
return [retnode]
except Exception as exc:
return [document.reporter.warning(exc, line=self.lineno)]
def setup(app: Sphinx) -> ExtensionMetadata:
directives.register_directive('highlight', Highlight)
directives.register_directive('code-block', CodeBlock)
directives.register_directive('sourcecode', CodeBlock)
directives.register_directive('literalinclude', LiteralInclude)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| LiteralInclude |
python | pypa__hatch | tests/backend/builders/test_config.py | {
"start": 53560,
"end": 67138
} | class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.dependencies == builder.config.dependencies == []
def test_target_not_array(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"dependencies": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.foo.dependencies` must be an array"):
_ = builder.config.dependencies
def test_target_dependency_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"dependencies": [9000]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Dependency #1 of field `tool.hatch.build.targets.foo.dependencies` must be a string"
):
_ = builder.config.dependencies
def test_global_not_array(self, isolation):
config = {"tool": {"hatch": {"build": {"dependencies": 9000}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Field `tool.hatch.build.dependencies` must be an array"):
_ = builder.config.dependencies
def test_global_dependency_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"dependencies": [9000]}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Dependency #1 of field `tool.hatch.build.dependencies` must be a string"):
_ = builder.config.dependencies
def test_hook_require_runtime_dependencies_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"require-runtime-dependencies": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Option `require-runtime-dependencies` of build hook `foo` must be a boolean"
):
_ = builder.config.dependencies
def test_hook_require_runtime_features_not_array(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"require-runtime-features": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Option `require-runtime-features` of build hook `foo` must be an array"):
_ = builder.config.dependencies
def test_hook_require_runtime_features_feature_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"require-runtime-features": [9000]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Feature #1 of option `require-runtime-features` of build hook `foo` must be a string"
):
_ = builder.config.dependencies
def test_hook_require_runtime_features_feature_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"require-runtime-features": [""]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
ValueError,
match="Feature #1 of option `require-runtime-features` of build hook `foo` cannot be an empty string",
):
_ = builder.config.dependencies
def test_hook_require_runtime_features_feature_unknown(self, isolation):
config = {
"project": {"name": "my_app", "version": "0.0.1"},
"tool": {"hatch": {"build": {"hooks": {"foo": {"require-runtime-features": ["foo_bar"]}}}}},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
ValueError,
match=(
"Feature `foo-bar` of option `require-runtime-features` of build hook `foo` is not defined in "
"field `project.optional-dependencies`"
),
):
_ = builder.config.dependencies
def test_hook_dependencies_not_array(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"dependencies": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Option `dependencies` of build hook `foo` must be an array"):
_ = builder.config.dependencies
def test_hook_dependency_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"hooks": {"foo": {"dependencies": [9000]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Dependency #1 of option `dependencies` of build hook `foo` must be a string"
):
_ = builder.config.dependencies
def test_correct(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"dependencies": ["bar"],
"hooks": {"foobar": {"dependencies": ["test1"]}},
"targets": {"foo": {"dependencies": ["baz"], "hooks": {"foobar": {"dependencies": ["test2"]}}}},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["baz", "bar", "test2"]
def test_require_runtime_dependencies(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "dependencies": ["foo"]},
"tool": {
"hatch": {
"build": {
"require-runtime-dependencies": True,
"dependencies": ["bar"],
"hooks": {"foobar": {"dependencies": ["test1"]}},
"targets": {"foo": {"dependencies": ["baz"], "hooks": {"foobar": {"dependencies": ["test2"]}}}},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["baz", "bar", "test2", "foo"]
def test_require_runtime_features(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "optional-dependencies": {"bar_baz": ["foo"]}},
"tool": {
"hatch": {
"build": {
"require-runtime-features": ["bar-baz"],
"dependencies": ["bar"],
"hooks": {"foobar": {"dependencies": ["test1"]}},
"targets": {"foo": {"dependencies": ["baz"], "hooks": {"foobar": {"dependencies": ["test2"]}}}},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["baz", "bar", "test2", "foo"]
def test_env_var_no_hooks(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"]},
"bar": {"dependencies": ["bar"]},
"baz": {"dependencies": ["baz"]},
},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with EnvVars({BuildEnvVars.NO_HOOKS: "true"}):
assert builder.config.dependencies == []
def test_hooks_enable_by_default(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"], "enable-by-default": False},
"bar": {"dependencies": ["bar"], "enable-by-default": False},
"baz": {"dependencies": ["baz"]},
},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["baz"]
def test_hooks_env_var_all_override_enable_by_default(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"], "enable-by-default": False},
"bar": {"dependencies": ["bar"], "enable-by-default": False},
"baz": {"dependencies": ["baz"]},
},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with EnvVars({BuildEnvVars.HOOKS_ENABLE: "true"}):
assert builder.config.dependencies == ["foo", "bar", "baz"]
def test_hooks_env_var_specific_override_enable_by_default(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"], "enable-by-default": False},
"bar": {"dependencies": ["bar"], "enable-by-default": False},
"baz": {"dependencies": ["baz"]},
},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with EnvVars({f"{BuildEnvVars.HOOK_ENABLE_PREFIX}FOO": "true"}):
assert builder.config.dependencies == ["foo", "baz"]
def test_hooks_require_runtime_dependencies(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "dependencies": ["baz"]},
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"], "enable-by-default": False},
"bar": {"dependencies": ["bar"], "require-runtime-dependencies": True},
},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["bar", "baz"]
def test_hooks_require_runtime_dependencies_disabled(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "dependencies": ["baz"]},
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {
"dependencies": ["foo"],
"enable-by-default": False,
"require-runtime-dependencies": True,
},
"bar": {"dependencies": ["bar"]},
},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["bar"]
def test_hooks_require_runtime_features(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "optional-dependencies": {"foo_bar": ["baz"]}},
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {"dependencies": ["foo"], "enable-by-default": False},
"bar": {"dependencies": ["bar"], "require-runtime-features": ["foo-bar"]},
},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["bar", "baz"]
def test_hooks_require_runtime_features_disabled(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1", "optional-dependencies": {"foo_bar": ["baz"]}},
"tool": {
"hatch": {
"build": {
"hooks": {
"foo": {
"dependencies": ["foo"],
"enable-by-default": False,
"require-runtime-features": ["foo-bar"],
},
"bar": {"dependencies": ["bar"]},
},
}
}
},
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.dependencies == ["bar"]
| TestDependencies |
python | has2k1__plotnine | plotnine/themes/theme_linedraw.py | {
"start": 136,
"end": 1350
} | class ____(theme_bw):
"""
A theme with only black lines of various widths on white backgrounds
Parameters
----------
base_size : int
Base font size. All text sizes are a scaled versions of
the base font size.
base_family : str
Base font family. If `None`, use [](`plotnine.options.base_family`).
"""
def __init__(self, base_size=11, base_family=None):
super().__init__(base_size, base_family)
self += theme(
axis_text=element_text(color="black", size=base_size * 0.8),
axis_ticks=element_line(color="black", size=0.5),
axis_ticks_minor=element_blank(),
legend_key=element_rect(color="black", size=0.72),
panel_background=element_rect(fill="white"),
panel_border=element_rect(fill="none", color="black", size=1),
panel_grid_major=element_line(color="black", size=0.1),
panel_grid_minor=element_line(color="black", size=0.02),
strip_background=element_rect(fill="black", color="black", size=1),
strip_text_x=element_text(color="white"),
strip_text_y=element_text(color="white", angle=-90),
)
| theme_linedraw |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 87751,
"end": 87804
} | class ____(BaseModel):
block: BaseBlock
| ParentModel |
python | django__django | django/db/models/functions/text.py | {
"start": 2032,
"end": 3776
} | class ____(Func):
"""
Concatenate two arguments together. This is used by `Concat` because not
all backend databases support more than two arguments.
"""
function = "CONCAT"
def pipes_concat_sql(self, compiler, connection, **extra_context):
coalesced = self.coalesce()
return super(ConcatPair, coalesced).as_sql(
compiler,
connection,
template="(%(expressions)s)",
arg_joiner=" || ",
**extra_context,
)
as_sqlite = pipes_concat_sql
def as_postgresql(self, compiler, connection, **extra_context):
c = self.copy()
c.set_source_expressions(
[
(
expression
if isinstance(expression.output_field, (CharField, TextField))
else Cast(expression, TextField())
)
for expression in c.get_source_expressions()
]
)
return c.pipes_concat_sql(compiler, connection, **extra_context)
def as_mysql(self, compiler, connection, **extra_context):
# Use CONCAT_WS with an empty separator so that NULLs are ignored.
return super().as_sql(
compiler,
connection,
function="CONCAT_WS",
template="%(function)s('', %(expressions)s)",
**extra_context,
)
def coalesce(self):
# null on either side results in null for expression, wrap with
# coalesce
c = self.copy()
c.set_source_expressions(
[
Coalesce(expression, Value(""))
for expression in c.get_source_expressions()
]
)
return c
| ConcatPair |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/historical_class_failure_correlation.py | {
"start": 554,
"end": 3027
} | class ____(HeuristicInterface):
"""
This heuristic prioritizes test classes that have historically tended to fail
when the files edited by current PR were modified.
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
ratings = _get_ratings_for_tests(set(tests))
test_ratings = {
TestRun(k): v for (k, v) in ratings.items() if TestRun(k).test_file in tests
}
return TestPrioritizations(tests, normalize_ratings(test_ratings, 0.25))
def _get_historical_test_class_correlations() -> dict[str, dict[str, float]]:
path = REPO_ROOT / ADDITIONAL_CI_FILES_FOLDER / TEST_CLASS_RATINGS_FILE
if not os.path.exists(path):
print(f"could not find path {path}")
return {}
with open(path) as f:
test_class_correlations = cast(dict[str, dict[str, float]], json.load(f))
return test_class_correlations
def _get_ratings_for_tests(
tests_to_run: set[str],
) -> dict[str, float]:
# Get the files edited
try:
changed_files = query_changed_files()
except Exception as e:
warn(f"Can't query changed test files due to {e}")
return {}
test_class_correlations = _get_historical_test_class_correlations()
if not test_class_correlations:
return {}
# Find the tests failures that are correlated with the edited files.
# Filter the list to only include tests we want to run.
ratings: dict[str, float] = defaultdict(float)
for file in changed_files:
for qualified_test_class, score in test_class_correlations.get(
file, {}
).items():
# qualified_test_class looks like "test_file::test_class"
test_file, test_class = qualified_test_class.split("::")
if test_file in tests_to_run:
ratings[qualified_test_class] += score
return ratings
def _rank_correlated_tests(
tests_to_run: list[str],
) -> list[str]:
# Find the tests failures that are correlated with the edited files.
# Filter the list to only include tests we want to run.
# pyrefly: ignore [bad-assignment]
tests_to_run = set(tests_to_run)
# pyrefly: ignore [bad-argument-type]
ratings = _get_ratings_for_tests(tests_to_run)
prioritize = sorted(ratings, key=lambda x: -ratings[x])
return prioritize
| HistoricalClassFailurCorrelation |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_every_event.py | {
"start": 225,
"end": 663
} | class ____(ConditionTestCase):
payload = {"id": EveryEventCondition.id}
def test_dual_write(self) -> None:
# we will create the object but not write to the db
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == Condition.EVERY_EVENT
assert dc.comparison is True
assert dc.condition_result is True
| TestEveryEventCondition |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/base_env.py | {
"start": 6944,
"end": 10541
} | class ____(Mapping):
"""
Contains the data a batch of Agents collected when their episode
terminated. All Agents present in the TerminalSteps have ended their
episode.
- obs is a list of numpy arrays observations collected by the batch of
agent. Each obs has one extra dimension compared to DecisionStep: the
first dimension of the array corresponds to the batch size of the batch.
- reward is a float vector of length batch size. Corresponds to the
rewards collected by each agent since the last simulation step.
- interrupted is an array of booleans of length batch size. Is true if the
associated Agent was interrupted since the last decision step. For example, if the
Agent reached the maximum number of steps for the episode.
- agent_id is an int vector of length batch size containing unique
identifier for the corresponding Agent. This is used to track Agents
across simulation steps.
"""
def __init__(self, obs, reward, interrupted, agent_id, group_id, group_reward):
self.obs: List[np.ndarray] = obs
self.reward: np.ndarray = reward
self.interrupted: np.ndarray = interrupted
self.agent_id: np.ndarray = agent_id
self.group_id: np.ndarray = group_id
self.group_reward: np.ndarray = group_reward
self._agent_id_to_index: Optional[Dict[AgentId, int]] = None
@property
def agent_id_to_index(self) -> Dict[AgentId, int]:
"""
:returns: A Dict that maps agent_id to the index of those agents in
this TerminalSteps.
"""
if self._agent_id_to_index is None:
self._agent_id_to_index = {}
for a_idx, a_id in enumerate(self.agent_id):
self._agent_id_to_index[a_id] = a_idx
return self._agent_id_to_index
def __len__(self) -> int:
return len(self.agent_id)
def __getitem__(self, agent_id: AgentId) -> TerminalStep:
"""
returns the TerminalStep for a specific agent.
:param agent_id: The id of the agent
:returns: obs, reward, done, agent_id and optional action mask for a
specific agent
"""
if agent_id not in self.agent_id_to_index:
raise KeyError(f"agent_id {agent_id} is not present in the TerminalSteps")
agent_index = self._agent_id_to_index[agent_id] # type: ignore
agent_obs = []
for batched_obs in self.obs:
agent_obs.append(batched_obs[agent_index])
group_id = self.group_id[agent_index]
return TerminalStep(
obs=agent_obs,
reward=self.reward[agent_index],
interrupted=self.interrupted[agent_index],
agent_id=agent_id,
group_id=group_id,
group_reward=self.group_reward[agent_index],
)
def __iter__(self) -> Iterator[Any]:
yield from self.agent_id
@staticmethod
def empty(spec: "BehaviorSpec") -> "TerminalSteps":
"""
Returns an empty TerminalSteps.
:param spec: The BehaviorSpec for the TerminalSteps
"""
obs: List[np.ndarray] = []
for sen_spec in spec.observation_specs:
obs += [np.zeros((0,) + sen_spec.shape, dtype=np.float32)]
return TerminalSteps(
obs=obs,
reward=np.zeros(0, dtype=np.float32),
interrupted=np.zeros(0, dtype=bool),
agent_id=np.zeros(0, dtype=np.int32),
group_id=np.zeros(0, dtype=np.int32),
group_reward=np.zeros(0, dtype=np.float32),
)
| TerminalSteps |
python | scipy__scipy | scipy/interpolate/_interpolate.py | {
"start": 68291,
"end": 81898
} | class ____:
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
derivative
antiderivative
integrate
integrate_1d
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex128
else:
return np.float64
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float64)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1-D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1-D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1-D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1D, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1-D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
| NdPPoly |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/managed_kafka.py | {
"start": 1909,
"end": 2118
} | class ____(BaseGoogleLink):
"""Helper class for constructing Apache Kafka Topic link."""
name = "Apache Kafka Topic"
key = "topic_conf"
format_str = MANAGED_KAFKA_TOPIC_LINK
| ApacheKafkaTopicLink |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/source_salesforce/streams.py | {
"start": 19254,
"end": 21264
} | class ____(StreamSlicer):
def __init__(
self,
batched_substream: BatchedSubStream,
sync_mode: SyncMode,
cursor_field: Optional[List[str]],
stream_state: Optional[Mapping[str, Any]],
parend_id_field: str,
) -> None:
self._batched_substream = batched_substream
self._sync_mode = sync_mode
self._cursor_field = cursor_field
self._stream_state = stream_state
self._parend_id_field = parend_id_field
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def get_request_headers(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def get_request_body_data(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Union[Mapping[str, Any], str]:
return {}
def get_request_body_json(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def stream_slices(self) -> Iterable[StreamSlice]:
for batched_parents in self._batched_substream.stream_slices(
sync_mode=self._sync_mode, cursor_field=self._cursor_field, stream_state=self._stream_state
):
yield StreamSlice(
partition={"parents": [parent[self._parend_id_field] for parent in batched_parents["parents"]]}, cursor_slice={}
)
| BulkParentStreamStreamSlicer |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 53827,
"end": 54809
} | class ____(Operation):
def call(self, x, y):
return backend.numpy.bitwise_left_shift(x, y)
def compute_output_spec(self, x, y):
if isinstance(y, int):
dtype = x.dtype
else:
dtype = dtypes.result_type(x.dtype, y.dtype)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(
["keras.ops.bitwise_left_shift", "keras.ops.numpy.bitwise_left_shift"]
)
def bitwise_left_shift(x, y):
"""Shift the bits of an integer to the left.
Bits are shifted to the left by appending `y` 0s at the right of `x`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x` by `2**y`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor.
"""
if any_symbolic_tensors((x, y)):
return BitwiseLeftShift().symbolic_call(x, y)
return backend.numpy.bitwise_left_shift(x, y)
| BitwiseLeftShift |
python | kamyu104__LeetCode-Solutions | Python/number-of-visible-people-in-a-queue.py | {
"start": 564,
"end": 1118
} | class ____(object):
def canSeePersonsCount(self, heights):
"""
:type heights: List[int]
:rtype: List[int]
"""
result = [0]*len(heights)
stk = []
for i in reversed(xrange(len(heights))):
cnt = 0
while stk and heights[stk[-1]] < heights[i]:
stk.pop()
cnt += 1
result[i] = cnt+1 if stk else cnt
if stk and heights[stk[-1]] == heights[i]:
stk.pop()
stk.append(i)
return result
| Solution2 |
python | apache__thrift | lib/py/src/transport/TSocket.py | {
"start": 1641,
"end": 7927
} | class ____(TSocketBase):
"""Socket implementation of TTransport base."""
def __init__(self, host='localhost', port=9090, unix_socket=None,
socket_family=socket.AF_UNSPEC,
socket_keepalive=False):
"""Initialize a TSocket
@param host(str) The host to connect to.
@param port(int) The (TCP) port to connect to.
@param unix_socket(str) The filename of a unix socket to connect to.
(host and port will be ignored.)
@param socket_family(int) The socket family to use with this socket.
@param socket_keepalive(bool) enable TCP keepalive, default off.
"""
self.host = host
self.port = port
self.handle = None
self._unix_socket = unix_socket
self._timeout = None
self._socket_family = socket_family
self._socket_keepalive = socket_keepalive
def setHandle(self, h):
self.handle = h
def isOpen(self):
if self.handle is None:
return False
# this lets us cheaply see if the other end of the socket is still
# connected. if disconnected, we'll get EOF back (expressed as zero
# bytes of data) otherwise we'll get one byte or an error indicating
# we'd have to block for data.
#
# note that we're not doing this with socket.MSG_DONTWAIT because 1)
# it's linux-specific and 2) gevent-patched sockets hide EAGAIN from us
# when timeout is non-zero.
original_timeout = self.handle.gettimeout()
try:
self.handle.settimeout(0)
try:
peeked_bytes = self.handle.recv(1, socket.MSG_PEEK)
# the length will be zero if we got EOF (indicating connection closed)
if len(peeked_bytes) == 1:
return True
except (socket.error, OSError) as exc: # on modern python this is just BlockingIOError
if exc.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
return True
except ValueError:
# SSLSocket fails on recv with non-zero flags; fallback to the old behavior
return True
finally:
self.handle.settimeout(original_timeout)
# The caller may assume that after isOpen() returns False, calling close()
# is not needed, so it is safer to close the socket here.
self.close()
return False
def setTimeout(self, ms):
if ms is None:
self._timeout = None
else:
self._timeout = ms / 1000.0
if self.handle is not None:
self.handle.settimeout(self._timeout)
def _do_open(self, family, socktype):
return socket.socket(family, socktype)
@property
def _address(self):
return self._unix_socket if self._unix_socket else '%s:%d' % (self.host, self.port)
def open(self):
if self.handle:
raise TTransportException(type=TTransportException.ALREADY_OPEN, message="already open")
try:
addrs = self._resolveAddr()
except socket.gaierror as gai:
msg = 'failed to resolve sockaddr for ' + str(self._address)
logger.exception(msg)
raise TTransportException(type=TTransportException.NOT_OPEN, message=msg, inner=gai)
# Preserve the last exception to report if all addresses fail.
last_exc = None
for family, socktype, _, _, sockaddr in addrs:
handle = self._do_open(family, socktype)
# TCP keep-alive
if self._socket_keepalive:
handle.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
handle.settimeout(self._timeout)
try:
handle.connect(sockaddr)
self.handle = handle
return
except socket.error as e:
handle.close()
logger.info('Could not connect to %s', sockaddr, exc_info=True)
last_exc = e
msg = 'Could not connect to any of %s' % list(map(lambda a: a[4],
addrs))
logger.error(msg)
raise TTransportException(type=TTransportException.NOT_OPEN, message=msg, inner=last_exc)
def read(self, sz):
try:
buff = self.handle.recv(sz)
# TODO: remove socket.timeout when 3.10 becomes the earliest version of python supported.
except (socket.timeout, TimeoutError) as e:
raise TTransportException(type=TTransportException.TIMED_OUT, message="read timeout", inner=e)
except socket.error as e:
if (e.args[0] == errno.ECONNRESET and
(sys.platform == 'darwin' or sys.platform.startswith('freebsd'))):
# freebsd and Mach don't follow POSIX semantic of recv
# and fail with ECONNRESET if peer performed shutdown.
# See corresponding comment and code in TSocket::read()
# in lib/cpp/src/transport/TSocket.cpp.
self.close()
# Trigger the check to raise the END_OF_FILE exception below.
buff = ''
else:
raise TTransportException(message="unexpected exception", inner=e)
if len(buff) == 0:
raise TTransportException(type=TTransportException.END_OF_FILE,
message='TSocket read 0 bytes')
return buff
def write(self, buff):
if not self.handle:
raise TTransportException(type=TTransportException.NOT_OPEN,
message='Transport not open')
sent = 0
have = len(buff)
while sent < have:
try:
plus = self.handle.send(buff)
if plus == 0:
raise TTransportException(type=TTransportException.END_OF_FILE,
message='TSocket sent 0 bytes')
sent += plus
buff = buff[plus:]
except socket.error as e:
raise TTransportException(message="unexpected exception", inner=e)
def flush(self):
pass
| TSocket |
python | google__jax | jax/_src/hijax.py | {
"start": 7173,
"end": 8051
} | class ____(metaclass=_BoxMeta): # noqa: F811
_val = None # always clobbered by __new__, but pytype likes this
# We want `Box(x)` to bind a primitive, so we override __new__ and provide a
# raw `_new` method below.
def __new__(cls, init_val=None):
(), treedef = tree_flatten(None)
box = new_box_p.bind(treedef=treedef)
box.set(init_val)
return box
@classmethod
def _new(cls, init_val):
new = super().__new__(cls)
new._val = init_val
return new
def get(self):
return box_get(self)
def set(self, val):
box_set(self, val)
def cur_qdd(self):
return self.type_state()
@property
def ty(self):
return BoxTy()
def type_state(self):
leaves, treedef = tree_flatten(self._val)
leaf_avals = tuple(map(core.typeof, leaves))
return BoxTypeState(leaf_avals, treedef)
register_hitype(Box, lambda b: b.ty)
| Box |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/mirrored_strategy.py | {
"start": 1277,
"end": 4313
} | class ____(distribute_lib.Strategy):
"""Synchronous training across multiple replicas on one machine.
This strategy is typically used for training on one machine with multiple
accelerators (GPUs/TPUs).
For example, a variable created under a `MirroredStrategy` is a distributed
variable with layout replicated on each dimension. The variables will be
placed on the `mesh` that is specified in the __init__.
"""
def __init__(self, devices=None, cross_device_ops=None, *, mesh=None):
"""Synchronous training across multiple replicas on one machine.
Args:
devices: a list of device strings, such as ['/gpu:0', '/gpu:1']. If both
`mesh` and `devices` are None, all the available GPU/TPU will be used.
If no accelerators are found, CPU is used.
cross_device_ops: optional, a descendant of `CrossDeviceOps`. The value is
ignored at the moment, and support will be added later.
mesh: optional DTensor mesh for the computation. Note that either `mesh`
or `devices` should be provided, and not both. The mesh should be 1D,
and will be used to split the input data among that dimension.
"""
self._validate_init_args(mesh, devices)
if not mesh:
mesh = self._build_mesh_from_device_list(devices)
extended = dtensor_strategy_extended.DTensorStrategyExtended(
container_strategy=self, mesh=mesh)
super().__init__(extended)
self._mesh = mesh
self._devices = devices
@classmethod
def _validate_init_args(cls, mesh, devices):
if mesh and devices:
raise ValueError('Mesh and devices can not be provided at the same time. '
f'received mesh = {mesh}, devices = {devices}')
# For mirrored strategy, the mesh should be 1D, and only contains a batch
# dimension, we will use that dimension to shard the inputs.
if mesh and len(mesh.shape()) != 1:
raise ValueError('The mesh for MirroredStrategy must be 1D, received: '
f'{len(mesh.shape())}D')
@classmethod
def _build_mesh_from_device_list(cls, devices):
if devices:
device_type = tf_device.DeviceSpec.from_string(devices[0]).device_type
dtensor_util.initialize_accelerator_system_once(device_type)
mesh = mesh_util.create_mesh(
mesh_dims=[(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME, len(devices))],
devices=devices)
else:
# Trying to detect if there is any GPU/TPUs attached.
device_type = d_config.preferred_device_type()
devices = d_config.local_devices(device_type)
dtensor_util.initialize_accelerator_system_once(device_type)
mesh = mesh_util.create_mesh(
mesh_dims=[(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME, len(devices))],
device_type=device_type)
return mesh
def reduce(self, reduce_op, value, axis):
return dtensor_util.dtensor_reduce(self, reduce_op, value, axis)
@property
def mesh(self):
"""Returns the mesh used by the strategy."""
return self._mesh
| MirroredStrategy |
python | gevent__gevent | src/gevent/tests/test__util.py | {
"start": 7632,
"end": 10274
} | class ____(unittest.TestCase):
def test_time_sleep(self):
# A real blocking function
from time import sleep
# No time given, we detect the failure to switch immediately
with self.assertRaises(util._FailedToSwitch) as exc:
with util.assert_switches():
sleep(0.001)
message = str(exc.exception)
self.assertIn('To any greenlet in', message)
# Supply a max blocking allowed and exceed it
with self.assertRaises(util._FailedToSwitch):
with util.assert_switches(0.001):
sleep(0.1)
# Supply a max blocking allowed, and exit before that happens,
# but don't switch to the hub as requested
with self.assertRaises(util._FailedToSwitch) as exc:
with util.assert_switches(0.001, hub_only=True):
sleep(0)
message = str(exc.exception)
self.assertIn('To the hub in', message)
self.assertIn('(max allowed 0.0010 seconds)', message)
# Supply a max blocking allowed, and exit before that happens,
# and allow any switch (or no switch).
# Note that we need to use a relatively long duration;
# sleep(0) on Windows can actually take a substantial amount of time
# sometimes (more than 0.001s)
with util.assert_switches(1.0, hub_only=False):
sleep(0)
def test_no_switches_no_function(self):
# No blocking time given, no switch performed: exception
with self.assertRaises(util._FailedToSwitch):
with util.assert_switches():
pass
# blocking time given, for all greenlets, no switch performed: nothing
with util.assert_switches(max_blocking_time=1, hub_only=False):
pass
def test_exception_not_supressed(self):
with self.assertRaises(NameError):
with util.assert_switches():
raise NameError()
def test_nested(self):
from greenlet import gettrace
with util.assert_switches() as outer:
self.assertEqual(gettrace(), outer.tracer)
self.assertIsNotNone(outer.tracer.active_greenlet)
with util.assert_switches() as inner:
self.assertEqual(gettrace(), inner.tracer)
self.assertEqual(inner.tracer.previous_trace_function, outer.tracer)
inner.tracer('switch', (self, self))
self.assertIs(self, inner.tracer.active_greenlet)
self.assertIs(self, outer.tracer.active_greenlet)
self.assertEqual(gettrace(), outer.tracer)
| TestAssertSwitches |
python | getsentry__sentry | tests/sentry/web/frontend/test_data_secrecy_error.py | {
"start": 273,
"end": 2314
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user()
self.organization = self.create_organization(name="foo", owner=self.owner)
with assume_test_silo_mode(SiloMode.REGION):
self.organization.flags.prevent_superuser_access = True
self.organization.save()
def test_data_secrecy_renders_for_superuser_access(self) -> None:
user = self.create_user(is_superuser=True, is_staff=True)
self.create_identity_provider(type="dummy", external_id="1234")
self.login_as(user, organization_id=self.organization.id, superuser=True)
path = reverse("sentry-organization-issue-list", args=[self.organization.slug])
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/data-secrecy.html")
@override_options({"staff.ga-rollout": True})
def test_data_secrecy_does_not_render_for_staff_access(self) -> None:
user = self.create_user(is_superuser=True, is_staff=True)
self.create_identity_provider(type="dummy", external_id="1234")
self.login_as(user, organization_id=self.organization.id, staff=True)
path = reverse("sentry-organization-issue-list", args=[self.organization.slug])
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateNotUsed("sentry/data-secrecy.html")
def test_data_secrecy_does_not_render_for_regular_user(self) -> None:
user = self.create_user(is_superuser=False, is_staff=False)
self.create_member(user=user, organization=self.organization)
self.create_identity_provider(type="dummy", external_id="1234")
self.login_as(user, organization_id=self.organization.id)
path = reverse("sentry-organization-issue-list", args=[self.organization.slug])
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateNotUsed("sentry/data-secrecy.html")
| DataSecrecyErrorTest |
python | keon__algorithms | tests/test_strings.py | {
"start": 12502,
"end": 13217
} | class ____(unittest.TestCase):
"""[summary]
Test for the file validate_coordinates.py
Arguments:
unittest {[type]} -- [description]
"""
def test_valid(self):
valid_coordinates = ["-23, 25", "4, -3", "90, 180", "-90, -180"]
for coordinate in valid_coordinates:
self.assertTrue(is_valid_coordinates_0(coordinate))
def test_invalid(self):
invalid_coordinates = ["23.234, - 23.4234", "99.234, 12.324",
"6.325624, 43.34345.345", "0, 1,2",
"23.245, 1e1"]
for coordinate in invalid_coordinates:
self.assertFalse(is_valid_coordinates_0(coordinate))
| TestValidateCoordinates |
python | numpy__numpy | numpy/_core/tests/test__exceptions.py | {
"start": 1919,
"end": 2209
} | class ____:
def test_pickling(self):
""" Test that _UFuncNoLoopError can be pickled """
assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
@pytest.mark.parametrize("args", [
(2, 1, None),
(2, 1, "test_prefix"),
("test message",),
])
| TestUFuncNoLoopError |
python | facebookresearch__faiss | tests/test_build_blocks.py | {
"start": 5426,
"end": 6999
} | class ____(unittest.TestCase):
def test_0s(self):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
m[5:10] = 0
comments = faiss.MatrixStats(m).comments
assert 'has 5 copies' in comments
assert '5 null vectors' in comments
def test_copies(self):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
m[::2] = m[1::2]
comments = faiss.MatrixStats(m).comments
assert '20 vectors are distinct' in comments
def test_dead_dims(self):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
m[:, 5:10] = 0
comments = faiss.MatrixStats(m).comments
assert '5 dimensions are constant' in comments
def test_rogue_means(self):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
m[:, 5:10] += 12345
comments = faiss.MatrixStats(m).comments
assert '5 dimensions are too large wrt. their variance' in comments
def test_normalized(self):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
faiss.normalize_L2(m)
comments = faiss.MatrixStats(m).comments
assert 'vectors are normalized' in comments
def test_hash(self):
cc = []
for _ in range(2):
rs = np.random.RandomState(123)
m = rs.rand(40, 20).astype('float32')
cc.append(faiss.MatrixStats(m).hash_value)
self.assertTrue(cc[0] == cc[1])
| TestMatrixStats |
python | django__django | tests/i18n/forms.py | {
"start": 496,
"end": 794
} | class ____(forms.ModelForm):
cents_paid = forms.DecimalField(max_digits=4, decimal_places=2, localize=True)
products_delivered = forms.IntegerField(localize=True)
date_added = forms.DateTimeField(localize=True)
class Meta:
model = Company
fields = "__all__"
| CompanyForm |
python | astropy__astropy | astropy/modeling/parameters.py | {
"start": 821,
"end": 3035
} | class ____(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array."""
if np.iterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
f"Parameter of {type(value)} could not be converted to float"
)
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)) and not isinstance(value, bool):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean"
)
else:
raise InputParameterError(
f"Don't know how to convert parameter of {type(value)} to float"
)
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
| ParameterDefinitionError |
python | spack__spack | lib/spack/spack/main.py | {
"start": 42581,
"end": 42669
} | class ____(Exception):
"""Raised when SpackCommand execution fails."""
| SpackCommandError |
python | getsentry__sentry | tests/sentry/models/test_commit.py | {
"start": 183,
"end": 3543
} | class ____(TestCase):
def test_multiple_matches_basic(self) -> None:
group = self.create_group()
group2 = self.create_group()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id} {group2.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 2
assert group in groups
assert group2 in groups
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\\Resolved {group.qualified_short_id} {group2.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 2
assert group in groups
assert group2 in groups
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\\Close {group.qualified_short_id} {group2.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 2
assert group in groups
assert group2 in groups
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes: {group.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 1
assert group in groups
def test_multiple_matches_comma_separated(self) -> None:
group = self.create_group()
group2 = self.create_group()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes {group.qualified_short_id}, {group2.qualified_short_id}",
)
groups = commit.find_referenced_groups()
assert len(groups) == 2
assert group in groups
assert group2 in groups
def test_markdown_links(self) -> None:
group = self.create_group()
group2 = self.create_group()
repo = Repository.objects.create(name="example", organization_id=self.group.organization.id)
commit = Commit.objects.create(
key=sha1(uuid4().hex.encode("utf-8")).hexdigest(),
repository_id=repo.id,
organization_id=group.organization.id,
message=f"Foo Biz\n\nFixes [{group.qualified_short_id}](https://sentry.io/), [{group2.qualified_short_id}](https://sentry.io/)",
)
groups = commit.find_referenced_groups()
assert len(groups) == 2
assert group in groups
assert group2 in groups
| FindReferencedGroupsTest |
python | coleifer__peewee | tests/keys.py | {
"start": 14259,
"end": 14331
} | class ____(TestModel):
key = CharField(max_length=16, unique=True)
| FK_A |
python | pytorch__pytorch | test/test_extension_utils.py | {
"start": 151,
"end": 615
} | class ____:
@staticmethod
def is_available():
return True
@staticmethod
def is_autocast_enabled():
return True
@staticmethod
def get_autocast_dtype():
return torch.float16
@staticmethod
def set_autocast_enabled(enable):
pass
@staticmethod
def set_autocast_dtype(dtype):
pass
@staticmethod
def get_amp_supported_dtype():
return [torch.float16]
| DummyPrivateUse1Module |
python | getsentry__sentry | tests/sentry/stacktraces/test_in_app_normalization.py | {
"start": 6033,
"end": 7721
} | class ____(TestCase):
def test_macos_package_in_app_detection(self) -> None:
data: dict[str, Any] = {
"platform": "cocoa",
"debug_meta": {"images": []}, # omitted
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "-[CRLCrashAsyncSafeThread crash]",
"package": "/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/CrashProbe.app/Contents/Frameworks/CrashLib.framework/Versions/A/CrashLib",
"instruction_addr": 4295098388,
},
{
"function": "[KSCrash ]",
"package": "/usr/lib/system/libdyld.dylib",
"instruction_addr": 4295098388,
},
]
},
"type": "NSRangeException",
}
]
},
"contexts": {"os": {"version": "10.12.5", "type": "os", "name": "macOS"}},
}
config = load_grouping_config(get_default_grouping_config_dict())
normalize_stacktraces_for_grouping(data, grouping_config=config)
frames = data["exception"]["values"][0]["stacktrace"]["frames"]
assert frames[0]["in_app"] is True
assert frames[1]["in_app"] is False
| MacOSInAppDetectionTest |
python | openai__openai-python | src/openai/types/beta/threads/runs/function_tool_call.py | {
"start": 609,
"end": 920
} | class ____(BaseModel):
id: str
"""The ID of the tool call object."""
function: Function
"""The definition of the function that was called."""
type: Literal["function"]
"""The type of tool call.
This is always going to be `function` for this type of tool call.
"""
| FunctionToolCall |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-vertical-order-traversal.py | {
"start": 73,
"end": 585
} | class ____(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
cols = collections.defaultdict(list)
queue = [(root, 0)]
for node, i in queue:
if node:
cols[i].append(node.val)
queue += (node.left, i - 1), (node.right, i + 1)
return [cols[i] for i in xrange(min(cols.keys()),
max(cols.keys()) + 1)] if cols else []
| Solution |
python | imageio__imageio | imageio/plugins/freeimage.py | {
"start": 4858,
"end": 6178
} | class ____(FreeimageFormat):
"""A BMP format based on the Freeimage library.
This format supports grayscale, RGB and RGBA images.
The freeimage plugin requires a `freeimage` binary. If this binary
not available on the system, it can be downloaded manually from
<https://github.com/imageio/imageio-binaries> by either
- the command line script ``imageio_download_bin freeimage``
- the Python method ``imageio.plugins.freeimage.download()``
Parameters for saving
---------------------
compression : bool
Whether to compress the bitmap using RLE when saving. Default False.
It seems this does not always work, but who cares, you should use
PNG anyway.
"""
class Writer(FreeimageFormat.Writer):
def _open(self, flags=0, compression=False):
# Build flags from kwargs
flags = int(flags)
if compression:
flags |= IO_FLAGS.BMP_SAVE_RLE
else:
flags |= IO_FLAGS.BMP_DEFAULT
# Act as usual, but with modified flags
return FreeimageFormat.Writer._open(self, flags)
def _append_data(self, im, meta):
im = image_as_uint(im, bitdepth=8)
return FreeimageFormat.Writer._append_data(self, im, meta)
| FreeimageBmpFormat |
python | numba__numba | numba/core/types/npytypes.py | {
"start": 8517,
"end": 8955
} | class ____(SimpleIteratorType, MutableSequence):
"""
Type class for `ndarray.flat()` objects.
"""
def __init__(self, arrty):
self.array_type = arrty
yield_type = arrty.dtype
self.dtype = yield_type
name = "array.flat({arrayty})".format(arrayty=arrty)
super(NumpyFlatType, self).__init__(name, yield_type)
@property
def key(self):
return self.array_type
| NumpyFlatType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 140,
"end": 260
} | class ____:
@staticmethod
def method1():
pass
def method5(self) -> type:
return ClassA
| ClassA |
python | apache__airflow | devel-common/src/tests_common/_internals/capture_warnings.py | {
"start": 1824,
"end": 4809
} | class ____:
category: str
message: str
filename: str
lineno: int
when: WhenTypeDef
node_id: str | None = None
param_id: str | None = None
@classmethod
def from_record(
cls, warning_message: warnings.WarningMessage, root_path: Path, node_id: str | None, when: WhenTypeDef
) -> CapturedWarning:
category = warning_message.category.__name__
if (category_module := warning_message.category.__module__) != "builtins":
category = f"{category_module}.{category}"
param_id = None
if node_id:
# Remove parametrized part from the test node
node_id, _, param_part = node_id.partition("[")
if param_part:
param_id = param_part[:-1] or None
return cls(
category=category,
message=str(warning_message.message),
node_id=node_id,
param_id=param_id,
when=when,
filename=_resolve_warning_filepath(warning_message.filename, os.fspath(root_path)),
lineno=warning_message.lineno,
)
@classmethod
@contextmanager
def capture_warnings(
cls, when: WhenTypeDef, root_path: Path, node_id: str | None = None
) -> Generator[list[CapturedWarning], None, None]:
captured_records: list[CapturedWarning] = []
try:
with warnings.catch_warnings(record=True) as records:
if not sys.warnoptions:
warnings.filterwarnings("always", category=DeprecationWarning, append=True)
warnings.filterwarnings("always", category=PendingDeprecationWarning, append=True)
yield captured_records
finally:
captured_records.extend(
cls.from_record(rec, root_path=root_path, node_id=node_id, when=when) for rec in records
)
@property
def uniq_key(self):
return self.category, self.message, self.lineno, self.lineno
@property
def group(self) -> str:
"""
Determine in which type of files warning raises.
It depends on ``stacklevel`` in ``warnings.warn``, and if it is not set correctly,
it might refer to another file.
There is an assumption that airflow and all dependencies set it correct eventually.
But we should not use it to filter it out, only for show in different groups.
"""
if "/tests/" in self.filename:
return "tests"
if self.filename.startswith("airflow/"):
return "airflow"
if self.filename.startswith("providers/"):
return "providers"
return "other"
def dumps(self) -> str:
return json.dumps(asdict(self))
@classmethod
def loads(cls, obj: str) -> CapturedWarning:
return cls(**json.loads(obj))
def output(self, count: int) -> str:
return json.dumps({**asdict(self), "group": self.group, "count": count})
| CapturedWarning |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 26062,
"end": 26778
} | class ____(MaskedItemTests):
@classmethod
def setup_class(cls):
super().setup_class()
cls.d = np.array(["aa", "bb"])
cls.mask_d = np.array([True, False])
cls.md = Masked(cls.d, cls.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
| TestMaskedArrayItems |
python | doocs__leetcode | solution/0200-0299/0200.Number of Islands/Solution.py | {
"start": 0,
"end": 561
} | class ____:
def numIslands(self, grid: List[List[str]]) -> int:
def dfs(i, j):
grid[i][j] = '0'
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and grid[x][y] == '1':
dfs(x, y)
ans = 0
dirs = (-1, 0, 1, 0, -1)
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
dfs(i, j)
ans += 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 881593,
"end": 882003
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PullRequestTimelineItem", graphql_name="node")
"""The item at the end of the edge."""
| PullRequestTimelineItemEdge |
python | openai__openai-python | tests/api_resources/containers/files/test_content.py | {
"start": 464,
"end": 3319
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
content = client.containers.files.content.retrieve(
file_id="file_id",
container_id="container_id",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
response = client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
with client.containers.files.content.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.containers.files.content.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
| TestContent |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 7178,
"end": 8223
} | class ____(Operation):
def call(self, x):
return backend.nn.silu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.silu",
"keras.ops.nn.silu",
"keras.ops.swish",
"keras.ops.nn.swish",
]
)
def silu(x):
"""Sigmoid Linear Unit (SiLU) activation function, also known as Swish.
The SiLU activation function is computed by the sigmoid function multiplied
by its input. It is defined as `f(x) = x * sigmoid(x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras.ops.sigmoid(x)
array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
>>> keras.ops.silu(x)
array([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Silu().symbolic_call(x)
return backend.nn.silu(x)
| Silu |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 9916,
"end": 15439
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# get queries, keys and values
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self._reshape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._reshape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._reshape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
# expand attention_mask
if attention_mask is not None:
# [seq_len, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
attention_mask = attention_mask.expand(batch_size, 1, *attention_mask.size())
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
f" {attention_mask.size()}"
)
if attention_mask.dtype == torch.bool:
attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(
attention_mask, -torch.inf
)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
| DFineMultiheadAttention |
python | pandas-dev__pandas | pandas/core/accessor.py | {
"start": 548,
"end": 1339
} | class ____:
_accessors: set[str] = set()
_hidden_attrs: frozenset[str] = frozenset()
@final
def _dir_deletions(self) -> set[str]:
"""
Delete unwanted __dir__ for this object.
"""
return self._accessors | self._hidden_attrs
def _dir_additions(self) -> set[str]:
"""
Add additional __dir__ for this object.
"""
return {accessor for accessor in self._accessors if hasattr(self, accessor)}
def __dir__(self) -> list[str]:
"""
Provide method name lookup and completion.
Notes
-----
Only provide 'public' methods.
"""
rv = set(super().__dir__())
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
| DirNamesMixin |
python | PrefectHQ__prefect | tests/utilities/schema_tools/test_validation.py | {
"start": 2801,
"end": 4057
} | class ____:
@pytest.fixture
def schema(self) -> dict:
return {
"title": "Parameters",
"type": "object",
"properties": {
"param": {"title": "param", "position": 0, "type": "integer"}
},
"required": ["param"],
}
@pytest.mark.parametrize(
"obj, expected",
[
({"param": 10}, True), # Valid integer
({"param": "not an integer"}, False), # Invalid type
({}, False), # Missing required field
({"param": None}, False), # Null value
],
)
def test_is_valid(self, schema, obj, expected):
assert is_valid(obj, schema) == expected
@pytest.mark.parametrize(
"obj, expected_errors",
[
({"param": 10}, []), # Valid integer
(
{"param": "not an integer"},
["'not an integer' is not of type 'integer'"],
), # Invalid type
({}, ["'param' is a required property"]), # Missing required field
],
)
def test_validate(self, schema, obj, expected_errors):
errors = validate(obj, schema)
assert [e.message for e in errors] == expected_errors
| TestNumber |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_forms.py | {
"start": 5550,
"end": 8424
} | class ____(OrganizationTestCase):
def test_create_organization_with_empty_slug(self):
data = {
"name": "往事",
"email": "test@example.org",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
"This field is required.", form.errors["slug"][0]
)
def test_create_organization_with_invalid_unicode_slug(self):
data = {
"name": "往事",
"email": "test@example.org",
"slug": "-",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
"Invalid slug, use more valid characters.", form.errors["slug"][0]
)
def test_create_organization_with_big_name(self):
data = {
"name": "a" * 33,
"email": "test@example.org",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertFalse(form.is_valid())
self.assertIn("at most 32 characters", form.errors["name"][0])
def test_create_organization_with_existent_slug(self):
data = {
"name": "Fauxzilla",
"email": "test@example.org",
"slug": "mozilla",
}
form = forms.OrganizationSignupForm(data, user=self.user)
# there is already an organization with the slug ``mozilla`` (lowercase)
self.assertFalse(form.is_valid())
self.assertEqual("Slug is already used by another organization", form.errors["slug"][0])
def test_create_organization_with_nonexistent_slug(self):
data = {
"name": "My New Organization",
"email": "test@example.org",
"slug": "my-new-organization",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertTrue(form.is_valid())
organization = form.save()
self.assertEqual(Organization.objects.filter(slug="my-new-organization").count(), 1)
def test_create_organization_with_invalid_slug(self):
data = {
"name": "My Org",
"email": "test@example.org",
"slug": "invalid-<slug>",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertFalse(form.is_valid())
self.assertIn("consisting of letters, numbers", form.errors["slug"][0])
def test_create_organization_with_dns_invalid_slug(self):
data = {
"name": "My Org",
"email": "test@example.org",
"slug": "-invalid_slug-",
}
form = forms.OrganizationSignupForm(data, user=self.user)
self.assertFalse(form.is_valid())
self.assertIn("Invalid slug, use suggested slug 'invalid-slug' instead", form.errors["slug"][0])
| OrganizationSignupTest |
python | readthedocs__readthedocs.org | readthedocs/api/v2/models.py | {
"start": 318,
"end": 1786
} | class ____(BaseAPIKeyManager):
# pylint: disable=arguments-differ
def create_key(self, project):
"""
Create a new API key for a project.
Build API keys are valid for
- project or default build time limit
- plus 25% to cleanup task once build is finished
- plus extra time to allow multiple retries (concurrency limit reached)
and can be revoked at any time by hitting the /api/v2/revoke/ endpoint.
"""
# delta = (
# project.container_time_limit or settings.BUILD_TIME_LIMIT
# ) * 1.25 + settings.RTD_BUILDS_RETRY_DELAY * settings.RTD_BUILDS_MAX_RETRIES
#
# Use 24 hours for now since we are hitting the expiry date and we shouldn't
# https://github.com/readthedocs/readthedocs.org/issues/12467
#
# NOTE: this is the maximum time this token will be valid, since the
# default behavior is to revoke from the builder itself when the build
# at `after_return` immediately before the build finishes
delta = 60 * 60 * 24 # 24h
expiry_date = timezone.now() + timedelta(seconds=delta)
name_max_length = self.model._meta.get_field("name").max_length
return super().create_key(
# Name is required, so we use the project slug for it.
name=project.slug[:name_max_length],
expiry_date=expiry_date,
project=project,
)
| BuildAPIKeyManager |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cloud/exceptions.py | {
"start": 84,
"end": 188
} | class ____(DbtCloudException):
"""Raised when unable to retrieve dbt Cloud run"""
| DbtCloudGetRunFailed |
python | astropy__astropy | astropy/coordinates/builtin_frames/galactocentric.py | {
"start": 1235,
"end": 1730
} | class ____(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
| _StateProxy |
python | realpython__materials | django-todo-list/source_code_final/todo_app/views.py | {
"start": 2063,
"end": 2275
} | class ____(DeleteView):
model = ToDoList
# You have to use reverse_lazy() instead of reverse(),
# as the urls are not loaded when the file is imported.
success_url = reverse_lazy("index")
| ListDelete |
python | redis__redis-py | tests/test_multidb/test_failover.py | {
"start": 2223,
"end": 5646
} | class ____:
@pytest.mark.parametrize(
"mock_db",
[
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
],
indirect=True,
)
def test_execute_returns_valid_database_with_failover_attempts(
self, mock_db, mock_fs
):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
mock_db,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
for i in range(failover_attempts + 1):
try:
database = executor.execute()
assert database == mock_db
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
sleep(0.11)
pass
assert mock_fs.database.call_count == 4
def test_execute_throws_exception_on_attempts_exceed(self, mock_fs):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
with pytest.raises(NoValidDatabaseException):
for i in range(failover_attempts + 1):
try:
executor.execute()
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
sleep(0.11)
pass
assert mock_fs.database.call_count == 4
def test_execute_throws_exception_on_attempts_does_not_exceed_delay(self, mock_fs):
failover_attempts = 3
mock_fs.database.side_effect = [
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
NoValidDatabaseException,
]
executor = DefaultFailoverStrategyExecutor(
mock_fs, failover_attempts=failover_attempts, failover_delay=0.1
)
with pytest.raises(
TemporaryUnavailableException,
match=(
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
),
):
for i in range(failover_attempts + 1):
try:
executor.execute()
except TemporaryUnavailableException as e:
assert e.args[0] == (
"No database connections currently available. "
"This is a temporary condition - please retry the operation."
)
if i == failover_attempts:
raise e
assert mock_fs.database.call_count == 4
| TestDefaultStrategyExecutor |
python | getsentry__sentry | src/sentry/testutils/helpers/backups.py | {
"start": 40163,
"end": 40561
} | class ____(TransactionTestCase, ExhaustiveFixtures):
"""
Instruments a database state that includes an instance of every Sentry model with every field
set to a non-default, non-null value. This is useful for exhaustive conformance testing. Unlike `BackupTestCase`, this completely resets the database between each test, which can be an expensive operation.
"""
| BackupTransactionTestCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.