language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/container_builder.py | {
"start": 638,
"end": 5770
} | class ____:
"""
SQLContextContainerBuilder.
Build a SQLContextContainer that can be passed to the SQL index
during index construction or during query-time.
NOTE: if context_str is specified, that will be used as context
instead of context_dict
Args:
sql_database (SQLDatabase): SQL database
context_dict (Optional[Dict[str, str]]): context dict
"""
def __init__(
self,
sql_database: SQLDatabase,
context_dict: Optional[Dict[str, str]] = None,
context_str: Optional[str] = None,
):
"""Initialize params."""
self.sql_database = sql_database
# if context_dict provided, validate that all keys are valid table names
if context_dict is not None:
# validate context_dict keys are valid table names
context_keys = set(context_dict.keys())
if not context_keys.issubset(
set(self.sql_database.get_usable_table_names())
):
raise ValueError(
"Invalid context table names: "
f"{context_keys - set(self.sql_database.get_usable_table_names())}"
)
self.context_dict = context_dict or {}
# build full context from sql_database
self.full_context_dict = self._build_context_from_sql_database(
self.sql_database, current_context=self.context_dict
)
self.context_str = context_str
@classmethod
def from_documents(
cls,
documents_dict: Dict[str, List[BaseNode]],
sql_database: SQLDatabase,
**context_builder_kwargs: Any,
) -> "SQLContextContainerBuilder":
"""Build context from documents."""
context_builder = SQLDocumentContextBuilder(
sql_database, **context_builder_kwargs
)
context_dict = context_builder.build_all_context_from_documents(documents_dict)
return SQLContextContainerBuilder(sql_database, context_dict=context_dict)
def _build_context_from_sql_database(
self,
sql_database: SQLDatabase,
current_context: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
"""Get tables schema + optional context as a single string."""
current_context = current_context or {}
result_context = {}
for table_name in sql_database.get_usable_table_names():
table_desc = sql_database.get_single_table_info(table_name)
table_text = f"Schema of table {table_name}:\n{table_desc}\n"
if table_name in current_context:
table_text += f"Context of table {table_name}:\n"
table_text += current_context[table_name]
result_context[table_name] = table_text
return result_context
def _get_context_dict(self, ignore_db_schema: bool) -> Dict[str, str]:
"""Get full context dict."""
if ignore_db_schema:
return self.context_dict
else:
return self.full_context_dict
def derive_index_from_context(
self,
index_cls: Type[BaseIndex],
ignore_db_schema: bool = False,
**index_kwargs: Any,
) -> BaseIndex:
"""Derive index from context."""
full_context_dict = self._get_context_dict(ignore_db_schema)
context_docs = []
for table_name, context_str in full_context_dict.items():
doc = Document(text=context_str, metadata={"table_name": table_name})
context_docs.append(doc)
return index_cls.from_documents(
documents=context_docs,
**index_kwargs,
)
def query_index_for_context(
self,
index: BaseIndex,
query_str: QueryType,
query_tmpl: Optional[str] = DEFAULT_CONTEXT_QUERY_TMPL,
store_context_str: bool = True,
**index_kwargs: Any,
) -> str:
"""
Query index for context.
A simple wrapper around the index.query call which
injects a query template to specifically fetch table information,
and can store a context_str.
Args:
index (BaseIndex): index data structure
query_str (QueryType): query string
query_tmpl (Optional[str]): query template
store_context_str (bool): store context_str
"""
if query_tmpl is None:
context_query_str = query_str
else:
context_query_str = query_tmpl.format(orig_query_str=query_str)
query_engine = index.as_query_engine()
response = query_engine.query(context_query_str)
context_str = str(response)
if store_context_str:
self.context_str = context_str
return context_str
def build_context_container(
self, ignore_db_schema: bool = False
) -> SQLContextContainer:
"""Build index structure."""
full_context_dict = self._get_context_dict(ignore_db_schema)
return SQLContextContainer(
context_str=self.context_str,
context_dict=full_context_dict,
)
| SQLContextContainerBuilder |
python | langchain-ai__langchain | libs/langchain/langchain_classic/retrievers/document_compressors/chain_extract.py | {
"start": 1619,
"end": 4473
} | class ____(BaseDocumentCompressor):
"""LLM Chain Extractor.
Document compressor that uses an LLM chain to extract
the relevant parts of documents.
"""
llm_chain: Runnable
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Callbacks | None = None,
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output_ = self.llm_chain.invoke(_input, config={"callbacks": callbacks})
if isinstance(self.llm_chain, LLMChain):
output = output_[self.llm_chain.output_key]
if self.llm_chain.prompt.output_parser is not None:
output = self.llm_chain.prompt.output_parser.parse(output)
else:
output = output_
if len(output) == 0:
continue
compressed_docs.append(
Document(page_content=cast("str", output), metadata=doc.metadata),
)
return compressed_docs
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Callbacks | None = None,
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
inputs = [self.get_input(query, doc) for doc in documents]
outputs = await self.llm_chain.abatch(inputs, {"callbacks": callbacks})
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata),
)
return compressed_docs
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: PromptTemplate | None = None,
get_input: Callable[[str, Document], str] | None = None,
llm_chain_kwargs: dict | None = None, # noqa: ARG003
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
if _prompt.output_parser is not None:
parser = _prompt.output_parser
else:
parser = StrOutputParser()
llm_chain = _prompt | llm | parser
return cls(llm_chain=llm_chain, get_input=_get_input)
| LLMChainExtractor |
python | lepture__authlib | authlib/integrations/django_client/apps.py | {
"start": 2026,
"end": 3680
} | class ____(DjangoAppMixin, OAuth2Mixin, OpenIDMixin, BaseApp):
client_cls = OAuth2Session
def authorize_access_token(self, request, **kwargs):
"""Fetch access token in one step.
:param request: HTTP request instance from Django view.
:return: A token dict.
"""
if request.method == "GET":
error = request.GET.get("error")
if error:
description = request.GET.get("error_description")
raise OAuthError(error=error, description=description)
params = {
"code": request.GET.get("code"),
"state": request.GET.get("state"),
}
else:
params = {
"code": request.POST.get("code"),
"state": request.POST.get("state"),
}
state_data = self.framework.get_state_data(request.session, params.get("state"))
self.framework.clear_state_data(request.session, params.get("state"))
params = self._format_state_params(state_data, params)
claims_options = kwargs.pop("claims_options", None)
claims_cls = kwargs.pop("claims_cls", None)
leeway = kwargs.pop("leeway", 120)
token = self.fetch_access_token(**params, **kwargs)
if "id_token" in token and "nonce" in state_data:
userinfo = self.parse_id_token(
token,
nonce=state_data["nonce"],
claims_options=claims_options,
claims_cls=claims_cls,
leeway=leeway,
)
token["userinfo"] = userinfo
return token
| DjangoOAuth2App |
python | getsentry__sentry | tests/sentry/incidents/models/test_alert_rule.py | {
"start": 1246,
"end": 3067
} | class ____(TestCase):
def setUp(self) -> None:
self.alert_rule = self.create_alert_rule()
self.subscription = self.alert_rule.snuba_query.subscriptions.get()
def test_updated_subscription(self) -> None:
AlertRule.objects.get_for_subscription(self.subscription)
assert (
cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id)
== self.alert_rule
)
self.subscription.save()
assert cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id) is None
def test_deleted_subscription(self) -> None:
AlertRule.objects.get_for_subscription(self.subscription)
assert (
cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id)
== self.alert_rule
)
subscription_id = self.subscription.id
self.subscription.delete()
assert cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id) is None
# Add the subscription id back in so we don't use `None` in the lookup check.
self.subscription.id = subscription_id
with pytest.raises(AlertRule.DoesNotExist):
AlertRule.objects.get_for_subscription(self.subscription)
def test_deleted_alert_rule(self) -> None:
AlertRule.objects.get_for_subscription(self.subscription)
assert (
cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id)
== self.alert_rule
)
delete_alert_rule(self.alert_rule)
assert cache.get(AlertRule.objects.CACHE_SUBSCRIPTION_KEY % self.subscription.id) is None
with pytest.raises(AlertRule.DoesNotExist):
AlertRule.objects.get_for_subscription(self.subscription)
| IncidentClearSubscriptionCacheTest |
python | openai__gym | gym/wrappers/normalize.py | {
"start": 1730,
"end": 3728
} | class ____(gym.core.Wrapper):
"""This wrapper will normalize observations s.t. each coordinate is centered with unit variance.
Note:
The normalization depends on past trajectories and observations will not be normalized correctly if the wrapper was
newly instantiated or the policy was changed recently.
"""
def __init__(self, env: gym.Env, epsilon: float = 1e-8):
"""This wrapper will normalize observations s.t. each coordinate is centered with unit variance.
Args:
env (Env): The environment to apply the wrapper
epsilon: A stability parameter that is used when scaling the observations.
"""
super().__init__(env)
self.num_envs = getattr(env, "num_envs", 1)
self.is_vector_env = getattr(env, "is_vector_env", False)
if self.is_vector_env:
self.obs_rms = RunningMeanStd(shape=self.single_observation_space.shape)
else:
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.epsilon = epsilon
def step(self, action):
"""Steps through the environment and normalizes the observation."""
obs, rews, terminateds, truncateds, infos = self.env.step(action)
if self.is_vector_env:
obs = self.normalize(obs)
else:
obs = self.normalize(np.array([obs]))[0]
return obs, rews, terminateds, truncateds, infos
def reset(self, **kwargs):
"""Resets the environment and normalizes the observation."""
obs, info = self.env.reset(**kwargs)
if self.is_vector_env:
return self.normalize(obs), info
else:
return self.normalize(np.array([obs]))[0], info
def normalize(self, obs):
"""Normalises the observation using the running mean and variance of the observations."""
self.obs_rms.update(obs)
return (obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.epsilon)
| NormalizeObservation |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 179328,
"end": 187563
} | class ____:
@classmethod
def get_params(self, m):
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, size=2)
x = rng.integers(1, 20, size=(m, 2))
n = x.sum(axis=-1)
return rng, m, alpha, n, x
def test_frozen(self):
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, 10)
x = rng.integers(0, 10, 10)
n = np.sum(x, axis=-1)
d = dirichlet_multinomial(alpha, n)
assert_equal(d.logpmf(x), dirichlet_multinomial.logpmf(x, alpha, n))
assert_equal(d.pmf(x), dirichlet_multinomial.pmf(x, alpha, n))
assert_equal(d.mean(), dirichlet_multinomial.mean(alpha, n))
assert_equal(d.var(), dirichlet_multinomial.var(alpha, n))
assert_equal(d.cov(), dirichlet_multinomial.cov(alpha, n))
def test_pmf_logpmf_against_R(self):
# # Compare PMF against R's extraDistr ddirmnon
# # library(extraDistr)
# # options(digits=16)
# ddirmnom(c(1, 2, 3), 6, c(3, 4, 5))
x = np.array([1, 2, 3])
n = np.sum(x)
alpha = np.array([3, 4, 5])
res = dirichlet_multinomial.pmf(x, alpha, n)
logres = dirichlet_multinomial.logpmf(x, alpha, n)
ref = 0.08484162895927638
assert_allclose(res, ref)
assert_allclose(logres, np.log(ref))
assert res.shape == logres.shape == ()
# library(extraDistr)
# options(digits=16)
# ddirmnom(c(4, 3, 2, 0, 2, 3, 5, 7, 4, 7), 37,
# c(45.01025314, 21.98739582, 15.14851365, 80.21588671,
# 52.84935481, 25.20905262, 53.85373737, 4.88568118,
# 89.06440654, 20.11359466))
rng = np.random.default_rng(28469824356873456)
alpha = rng.uniform(0, 100, 10)
x = rng.integers(0, 10, 10)
n = np.sum(x, axis=-1)
res = dirichlet_multinomial(alpha, n).pmf(x)
logres = dirichlet_multinomial.logpmf(x, alpha, n)
ref = 3.65409306285992e-16
assert_allclose(res, ref)
assert_allclose(logres, np.log(ref))
def test_pmf_logpmf_support(self):
# when the sum of the category counts does not equal the number of
# trials, the PMF is zero
rng, m, alpha, n, x = self.get_params(1)
n += 1
assert_equal(dirichlet_multinomial(alpha, n).pmf(x), 0)
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x), -np.inf)
rng, m, alpha, n, x = self.get_params(10)
i = rng.random(size=10) > 0.5
x[i] = np.round(x[i] * 2) # sum of these x does not equal n
assert_equal(dirichlet_multinomial(alpha, n).pmf(x)[i], 0)
assert_equal(dirichlet_multinomial(alpha, n).logpmf(x)[i], -np.inf)
assert np.all(dirichlet_multinomial(alpha, n).pmf(x)[~i] > 0)
assert np.all(dirichlet_multinomial(alpha, n).logpmf(x)[~i] > -np.inf)
def test_dimensionality_one(self):
# if the dimensionality is one, there is only one possible outcome
n = 6 # number of trials
alpha = [10] # concentration parameters
x = np.asarray([n]) # counts
dist = dirichlet_multinomial(alpha, n)
assert_equal(dist.pmf(x), 1)
assert_equal(dist.pmf(x+1), 0)
assert_equal(dist.logpmf(x), 0)
assert_equal(dist.logpmf(x+1), -np.inf)
assert_equal(dist.mean(), n)
assert_equal(dist.var(), 0)
assert_equal(dist.cov(), 0)
def test_n_is_zero(self):
# similarly, only one possible outcome if n is zero
n = 0
alpha = np.asarray([1., 1.])
x = np.asarray([0, 0])
dist = dirichlet_multinomial(alpha, n)
assert_equal(dist.pmf(x), 1)
assert_equal(dist.pmf(x+1), 0)
assert_equal(dist.logpmf(x), 0)
assert_equal(dist.logpmf(x+1), -np.inf)
assert_equal(dist.mean(), [0, 0])
assert_equal(dist.var(), [0, 0])
assert_equal(dist.cov(), [[0, 0], [0, 0]])
@pytest.mark.parametrize('method_name', ['pmf', 'logpmf'])
def test_against_betabinom_pmf(self, method_name):
rng, m, alpha, n, x = self.get_params(100)
method = getattr(dirichlet_multinomial(alpha, n), method_name)
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
res = method(x)
ref = ref_method(x.T[0])
assert_allclose(res, ref)
@pytest.mark.parametrize('method_name', ['mean', 'var'])
def test_against_betabinom_moments(self, method_name):
rng, m, alpha, n, x = self.get_params(100)
method = getattr(dirichlet_multinomial(alpha, n), method_name)
ref_method = getattr(stats.betabinom(n, *alpha.T), method_name)
res = method()[:, 0]
ref = ref_method()
assert_allclose(res, ref)
def test_moments(self):
rng = np.random.default_rng(28469824356873456)
dim = 5
n = rng.integers(1, 100)
alpha = rng.random(size=dim) * 10
dist = dirichlet_multinomial(alpha, n)
# Generate a random sample from the distribution using NumPy
m = 100000
p = rng.dirichlet(alpha, size=m)
x = rng.multinomial(n, p, size=m)
assert_allclose(dist.mean(), np.mean(x, axis=0), rtol=5e-3)
assert_allclose(dist.var(), np.var(x, axis=0), rtol=1e-2)
assert dist.mean().shape == dist.var().shape == (dim,)
cov = dist.cov()
assert cov.shape == (dim, dim)
assert_allclose(cov, np.cov(x.T), rtol=2e-2)
assert_equal(np.diag(cov), dist.var())
assert np.all(scipy.linalg.eigh(cov)[0] > 0) # positive definite
def test_input_validation(self):
# valid inputs
x0 = np.array([1, 2, 3])
n0 = np.sum(x0)
alpha0 = np.array([3, 4, 5])
text = "`x` must contain only non-negative integers."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf([1, -1, 3], alpha0, n0)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf([1, 2.1, 3], alpha0, n0)
text = "`alpha` must contain only positive values."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, [3, 0, 4], n0)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, [3, -1, 4], n0)
text = "`n` must be a non-negative integer."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, alpha0, 49.1)
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x0, alpha0, -1)
x = np.array([1, 2, 3, 4])
alpha = np.array([3, 4, 5])
text = "`x` and `alpha` must be broadcastable."
with assert_raises(ValueError, match=text):
dirichlet_multinomial.logpmf(x, alpha, x.sum())
@pytest.mark.parametrize('method', ['pmf', 'logpmf'])
def test_broadcasting_pmf(self, method):
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
n = np.array([[6], [7], [8]])
x = np.array([[1, 2, 3], [2, 2, 3]]).reshape((2, 1, 1, 3))
method = getattr(dirichlet_multinomial, method)
res = method(x, alpha, n)
assert res.shape == (2, 3, 4)
for i in range(len(x)):
for j in range(len(n)):
for k in range(len(alpha)):
res_ijk = res[i, j, k]
ref = method(x[i].squeeze(), alpha[k].squeeze(), n[j].squeeze())
assert_allclose(res_ijk, ref)
@pytest.mark.parametrize('method_name', ['mean', 'var', 'cov'])
def test_broadcasting_moments(self, method_name):
alpha = np.array([[3, 4, 5], [4, 5, 6], [5, 5, 7], [8, 9, 10]])
n = np.array([[6], [7], [8]])
method = getattr(dirichlet_multinomial, method_name)
res = method(alpha, n)
assert res.shape == (3, 4, 3) if method_name != 'cov' else (3, 4, 3, 3)
for j in range(len(n)):
for k in range(len(alpha)):
res_ijk = res[j, k]
ref = method(alpha[k].squeeze(), n[j].squeeze())
assert_allclose(res_ijk, ref)
| TestDirichletMultinomial |
python | huggingface__transformers | src/transformers/models/segformer/modeling_segformer.py | {
"start": 1121,
"end": 3506
} | class ____(ImageClassifierOutput):
"""
Base class for outputs of image classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Segformer
| SegFormerImageClassifierOutput |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/tokens.py | {
"start": 3210,
"end": 8717
} | class ____:
"""A class to fetch and sync a set of JSON Web Keys."""
url: str
fetched_at: float = 0
last_fetch_attempt_at: float = 0
client: httpx.AsyncClient = attrs.field(factory=httpx.AsyncClient)
_jwks: jwt.PyJWKSet | None = None
refresh_jwks: bool = True
refresh_interval_secs: int = 3600
refresh_retry_interval_secs: int = 10
def __repr__(self) -> str:
return f"JWKS(url={self.url}, fetched_at={self.fetched_at})"
@classmethod
def from_private_key(cls, *keys: AllowedPrivateKeys | tuple[AllowedPrivateKeys, str]):
obj = cls(url=os.devnull)
keyset = [
# Each `key` is either the key directly or `(key, "my-kid")`
key_to_jwk_dict(*key) if isinstance(key, tuple) else key_to_jwk_dict(key)
for key in keys
]
obj._jwks = jwt.PyJWKSet(keyset)
return obj
async def fetch_jwks(self) -> None:
if not self._should_fetch_jwks():
return
if self.url.startswith("http"):
data = await self._fetch_remote_jwks()
else:
data = self._fetch_local_jwks()
if not data:
return
self._jwks = jwt.PyJWKSet.from_dict(data)
log.debug("Fetched JWKS", url=self.url, keys=len(self._jwks.keys))
async def _fetch_remote_jwks(self) -> dict[str, Any] | None:
try:
log.debug(
"Fetching JWKS",
url=self.url,
last_fetched_secs_ago=int(time.monotonic() - self.fetched_at) if self.fetched_at else None,
)
if TYPE_CHECKING:
assert self.url
self.last_fetch_attempt_at = int(time.monotonic())
response = await self.client.get(self.url)
response.raise_for_status()
self.fetched_at = int(time.monotonic())
await response.aread()
await response.aclose()
return response.json()
except Exception:
log.exception("Failed to fetch remote JWKS", url=self.url)
return None
def _fetch_local_jwks(self) -> dict[str, Any] | None:
try:
with open(self.url) as jwks_file:
content = json.load(jwks_file)
self.fetched_at = int(time.monotonic())
return content
except Exception:
log.exception("Failed to read local JWKS", url=self.url)
return None
def _should_fetch_jwks(self) -> bool:
"""
Check if we need to fetch the JWKS based on the last fetch time and the refresh interval.
If the JWKS URL is local, we only fetch it once. For remote JWKS URLs we fetch it based
on the refresh interval if refreshing has been enabled with a minimum interval between
attempts. The fetcher functions set the fetched_at timestamp to the current monotonic time
when the JWKS is fetched.
"""
if not self.url.startswith("http"):
# Fetch local JWKS only if not already loaded
# This could be improved in future by looking at mtime of file.
return not self._jwks
# For remote fetches we check if the JWKS is not loaded (fetched_at = 0) or if the last fetch was more than
# refresh_interval_secs ago and the last fetch attempt was more than refresh_retry_interval_secs ago
now = time.monotonic()
return self.refresh_jwks and (
not self._jwks
or (
self.fetched_at == 0
or (
now - self.fetched_at > self.refresh_interval_secs
and now - self.last_fetch_attempt_at > self.refresh_retry_interval_secs
)
)
)
async def get_key(self, kid: str) -> jwt.PyJWK:
"""Fetch the JWKS and find the matching key for the token."""
await self.fetch_jwks()
if self._jwks:
return self._jwks[kid]
# It didn't load!
raise KeyError(f"Key ID {kid} not found in keyset")
def status(self):
# https://svcs.hynek.me/en/stable/core-concepts.html#health-checks
if not self._should_fetch_jwks():
# Up-to-date, we are healthy
return
if self.fetched_at == 0:
raise RuntimeError("JWKS never fetched")
last_successful_fetch = time.monotonic() - self.fetched_at
if last_successful_fetch > 3 * self.refresh_interval_secs:
raise RuntimeError(f"JWKS last fetched {last_successful_fetch}s ago")
def _conf_factory(section, key, **kwargs):
def factory() -> str:
from airflow.configuration import conf
return conf.get(section, key, **kwargs, suppress_warnings=True)
return factory
@overload
def _conf_list_factory(section, key, first_only: Literal[True], **kwargs) -> Callable[[], str]: ...
@overload
def _conf_list_factory(
section, key, first_only: Literal[False] = False, **kwargs
) -> Callable[[], list[str]]: ...
def _conf_list_factory(section, key, first_only: bool = False, **kwargs):
def factory() -> list[str] | str:
from airflow.configuration import conf
val = conf.getlist(section, key, **kwargs, suppress_warnings=True)
if first_only and val:
return val[0]
return val or []
return factory
def _to_list(val: str | list[str]) -> list[str]:
if isinstance(val, str):
val = [val]
return val
@attrs.define(kw_only=True)
| JWKS |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 1499,
"end": 1754
} | class ____(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
def __str__(self):
return self.name
| Genre |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 1285,
"end": 1473
} | class ____(StrictBaseModel):
"""Task inlet reference serializer for assets."""
dag_id: str
task_id: str
created_at: datetime
updated_at: datetime
| TaskInletAssetReference |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/integration_tests/lib/duckdb_component/step_two.py | {
"start": 104,
"end": 1328
} | class ____(dg.Component, dg.Model, dg.Resolvable):
"""A component that allows you to write SQL without learning dbt or Dagster's concepts."""
csv_path: str
asset_key: str
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
name = f"run_{self.asset_key}"
asset_specs = [dg.AssetSpec(key=self.asset_key)]
path = (context.path / Path(self.csv_path)).absolute()
assert path.exists(), f"Path {path} does not exist."
@dg.multi_asset(name=name, specs=asset_specs)
def _asset(context: dg.AssetExecutionContext):
return self.execute(context, str(path))
return dg.Definitions(assets=[_asset])
def execute(self, context: dg.AssetExecutionContext, csv_path: str):
# Connect to DuckDB
con = duckdb.connect()
query = f"SELECT * FROM '{csv_path}'"
# Read CSV from parent directory
df = con.execute(query).fetchdf()
md = df.head().to_markdown(index=False)
print(md) # noqa
return dg.MaterializeResult(
metadata={
"query": dg.MetadataValue.md(query),
"df": dg.MetadataValue.md(md),
},
)
| DuckDbComponent |
python | pydantic__pydantic | tests/benchmarks/shared.py | {
"start": 1285,
"end": 1383
} | class ____(BaseModel):
nested: NestedModel
optional_nested: Optional[NestedModel]
| OuterModel |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/rerankers/cohere_reranker.py | {
"start": 246,
"end": 2754
} | class ____(BaseCohereRerankerFinetuningEngine):
"""Cohere Reranker Finetune Engine."""
def __init__(
self,
train_file_name: str = "train.jsonl",
val_file_name: Optional[str] = None,
model_name: str = "exp_finetune",
model_type: str = "RERANK",
base_model: str = "english",
api_key: Optional[str] = None,
) -> None:
"""Init params."""
# This will be None if 'cohere' module is not available
cohere_spec = importlib.util.find_spec("cohere")
if cohere_spec is not None:
import cohere
else:
# Raise an ImportError if 'cohere' is not installed
raise ImportError(
"Cannot import cohere. Please install the package using `pip install cohere`."
)
try:
self.api_key = api_key or os.environ["COHERE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in cohere api key or "
"specify via COHERE_API_KEY environment variable "
)
self._model = cohere.Client(self.api_key, client_name="llama_index")
self._train_file_name = train_file_name
self._val_file_name = val_file_name
self._model_name = model_name
self._model_type = model_type
self._base_model = base_model
self._finetune_model = None
def finetune(self) -> None:
"""Finetune model."""
from cohere.custom_model_dataset import JsonlDataset
if self._val_file_name:
# Uploading both train file and eval file
dataset = JsonlDataset(
train_file=self._train_file_name, eval_file=self._val_file_name
)
else:
# Single Train File Upload:
dataset = JsonlDataset(train_file=self._train_file_name)
self._finetune_model = self._model.create_custom_model(
name=self._model_name,
dataset=dataset,
model_type=self._model_type,
base_model=self._base_model,
)
def get_finetuned_model(self, top_n: int = 5) -> CohereRerank:
"""Gets finetuned model id."""
if self._finetune_model is None:
raise RuntimeError(
"Finetuned model is not set yet. Please run the finetune method first."
)
return CohereRerank(
model=self._finetune_model.id, top_n=top_n, api_key=self.api_key
)
| CohereRerankerFinetuneEngine |
python | ray-project__ray | python/ray/tests/test_autoscaler.py | {
"start": 2004,
"end": 2732
} | class ____(str, Enum):
"""Potential outcomes of DrainNode calls, each of which is handled
differently by the autoscaler.
"""
# Return a reponse indicating all nodes were successfully drained.
Succeeded = "Succeeded"
# Return response indicating at least one node failed to be drained.
NotAllDrained = "NotAllDrained"
# Return an unimplemented gRPC error, indicating an old GCS.
Unimplemented = "Unimplemented"
# Raise a generic unexpected RPC error.
GenericRpcError = "GenericRpcError"
# Raise a generic unexpected exception.
GenericException = "GenericException"
# Tell the autoscaler to fail finding ips during drain
FailedToFindIp = "FailedToFindIp"
| DrainNodeOutcome |
python | scikit-learn__scikit-learn | sklearn/preprocessing/_discretization.py | {
"start": 593,
"end": 20973
} | class ____(TransformerMixin, BaseEstimator):
"""
Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
.. versionadded:: 0.20
Parameters
----------
n_bins : int or array-like of shape (n_features,), default=5
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'
Method used to encode the transformed result.
- 'onehot': Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
- 'onehot-dense': Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
- 'ordinal': Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'
Strategy used to define the widths of the bins.
- 'uniform': All bins in each feature have identical widths.
- 'quantile': All bins in each feature have the same number of points.
- 'kmeans': Values in each bin have the same nearest center of a 1D
k-means cluster.
For an example of the different strategies see:
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_strategies.py`.
quantile_method : {"inverted_cdf", "averaged_inverted_cdf",
"closest_observation", "interpolated_inverted_cdf", "hazen",
"weibull", "linear", "median_unbiased", "normal_unbiased"},
default="linear"
Method to pass on to np.percentile calculation when using
strategy="quantile". Only `averaged_inverted_cdf` and `inverted_cdf`
support the use of `sample_weight != None` when subsampling is not
active.
.. versionadded:: 1.7
dtype : {np.float32, np.float64}, default=None
The desired data-type for the output. If None, output dtype is
consistent with input dtype. Only np.float32 and np.float64 are
supported.
.. versionadded:: 0.24
subsample : int or None, default=200_000
Maximum number of samples, used to fit the model, for computational
efficiency.
`subsample=None` means that all the training samples are used when
computing the quantiles that determine the binning thresholds.
Since quantile computation relies on sorting each column of `X` and
that sorting has an `n log(n)` time complexity,
it is recommended to use subsampling on datasets with a
very large number of samples.
.. versionchanged:: 1.3
The default value of `subsample` changed from `None` to `200_000` when
`strategy="quantile"`.
.. versionchanged:: 1.5
The default value of `subsample` changed from `None` to `200_000` when
`strategy="uniform"` or `strategy="kmeans"`.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling.
Pass an int for reproducible results across multiple function calls.
See the `subsample` parameter for more details.
See :term:`Glossary <random_state>`.
.. versionadded:: 1.1
Attributes
----------
bin_edges_ : ndarray of ndarray of shape (n_features,)
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
n_bins_ : ndarray of shape (n_features,), dtype=np.int64
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Binarizer : Class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
Notes
-----
For a visualization of discretization on different datasets refer to
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization_classification.py`.
On the effect of discretization on linear models see:
:ref:`sphx_glr_auto_examples_preprocessing_plot_discretization.py`.
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
Examples
--------
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(
... n_bins=3, encode='ordinal', strategy='uniform'
... )
>>> est.fit(X)
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
While this preprocessing step can be an optimization, it is important
to note the array returned by ``inverse_transform`` will have an internal type
of ``np.float64`` or ``np.float32``, denoted by the ``dtype`` input argument.
This can drastically increase the memory usage of the array. See the
:ref:`sphx_glr_auto_examples_cluster_plot_face_compress.py`
where `KBinsDescretizer` is used to cluster the image into bins and increases
the size of the image by 8x.
"""
_parameter_constraints: dict = {
"n_bins": [Interval(Integral, 2, None, closed="left"), "array-like"],
"encode": [StrOptions({"onehot", "onehot-dense", "ordinal"})],
"strategy": [StrOptions({"uniform", "quantile", "kmeans"})],
"quantile_method": [
StrOptions(
{
"warn",
"inverted_cdf",
"averaged_inverted_cdf",
"closest_observation",
"interpolated_inverted_cdf",
"hazen",
"weibull",
"linear",
"median_unbiased",
"normal_unbiased",
}
)
],
"dtype": [Options(type, {np.float64, np.float32}), None],
"subsample": [Interval(Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
}
def __init__(
self,
n_bins=5,
*,
encode="onehot",
strategy="quantile",
quantile_method="warn",
dtype=None,
subsample=200_000,
random_state=None,
):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
self.quantile_method = quantile_method
self.dtype = dtype
self.subsample = subsample
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : ndarray of shape (n_samples,)
Contains weight values to be associated with each sample.
.. versionadded:: 1.3
.. versionchanged:: 1.7
Added support for strategy="uniform".
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, dtype="numeric")
if self.dtype in (np.float64, np.float32):
output_dtype = self.dtype
else: # self.dtype is None
output_dtype = X.dtype
n_samples, n_features = X.shape
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.subsample is not None and n_samples > self.subsample:
# Take a subsample of `X`
# When resampling, it is important to subsample **with replacement** to
# preserve the distribution, in particular in the presence of a few data
# points with large weights. You can check this by setting `replace=False`
# in sklearn.utils.test.test_indexing.test_resample_weighted and check that
# it fails as a justification for this claim.
X = resample(
X,
replace=True,
n_samples=self.subsample,
random_state=self.random_state,
sample_weight=sample_weight,
)
# Since we already used the weights when resampling when provided,
# we set them back to `None` to avoid accounting for the weights twice
# in subsequent operations to compute weight-aware bin edges with
# quantiles or k-means.
sample_weight = None
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
# TODO(1.9): remove and switch to quantile_method="averaged_inverted_cdf"
# by default.
quantile_method = self.quantile_method
if self.strategy == "quantile" and quantile_method == "warn":
warnings.warn(
"The current default behavior, quantile_method='linear', will be "
"changed to quantile_method='averaged_inverted_cdf' in "
"scikit-learn version 1.9 to naturally support sample weight "
"equivalence properties by default. Pass "
"quantile_method='averaged_inverted_cdf' explicitly to silence this "
"warning.",
FutureWarning,
)
quantile_method = "linear"
if (
self.strategy == "quantile"
and quantile_method not in ["inverted_cdf", "averaged_inverted_cdf"]
and sample_weight is not None
):
raise ValueError(
"When fitting with strategy='quantile' and sample weights, "
"quantile_method should either be set to 'averaged_inverted_cdf' or "
f"'inverted_cdf', got quantile_method='{quantile_method}' instead."
)
if self.strategy != "quantile" and sample_weight is not None:
# Prepare a mask to filter out zero-weight samples when extracting
# the min and max values of each columns which are needed for the
# "uniform" and "kmeans" strategies.
nnz_weight_mask = sample_weight != 0
else:
# Otherwise, all samples are used. Use a slice to avoid creating a
# new array.
nnz_weight_mask = slice(None)
for jj in range(n_features):
column = X[:, jj]
col_min = column[nnz_weight_mask].min()
col_max = column[nnz_weight_mask].max()
if col_min == col_max:
warnings.warn(
"Feature %d is constant and will be replaced with 0." % jj
)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == "uniform":
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == "quantile":
percentile_levels = np.linspace(0, 100, n_bins[jj] + 1)
# method="linear" is the implicit default for any numpy
# version. So we keep it version independent in that case by
# using an empty param dict.
percentile_kwargs = {}
if quantile_method != "linear" and sample_weight is None:
percentile_kwargs["method"] = quantile_method
if sample_weight is None:
bin_edges[jj] = np.asarray(
np.percentile(column, percentile_levels, **percentile_kwargs),
dtype=np.float64,
)
else:
average = (
True if quantile_method == "averaged_inverted_cdf" else False
)
bin_edges[jj] = _weighted_percentile(
column, sample_weight, percentile_levels, average=average
)
elif self.strategy == "kmeans":
from sklearn.cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(
column[:, None], sample_weight=sample_weight
).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ("quantile", "kmeans"):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn(
"Bins whose width are too small (i.e., <= "
"1e-8) in feature %d are removed. Consider "
"decreasing the number of bins." % jj
)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if "onehot" in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse_output=self.encode == "onehot",
dtype=output_dtype,
)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature."""
orig_bins = self.n_bins
if isinstance(orig_bins, Integral):
return np.full(n_features, orig_bins, dtype=int)
n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError(
"{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int.".format(
KBinsDiscretizer.__name__, indices
)
)
return n_bins
def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
Data in the binned space. Will be a sparse matrix if
`self.encode='onehot'` and ndarray otherwise.
"""
check_is_fitted(self)
# check input and attribute dtypes
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
Xt = validate_data(self, X, copy=True, dtype=dtype, reset=False)
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right")
if self.encode == "ordinal":
return Xt
dtype_init = None
if "onehot" in self.encode:
dtype_init = self._encoder.dtype
self._encoder.dtype = Xt.dtype
try:
Xt_enc = self._encoder.transform(Xt)
finally:
# revert the initial dtype to avoid modifying self.
self._encoder.dtype = dtype_init
return Xt_enc
def inverse_transform(self, X):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
Returns
-------
X_original : ndarray, dtype={np.float32, np.float64}
Data in the original feature space.
"""
check_is_fitted(self)
if "onehot" in self.encode:
X = self._encoder.inverse_transform(X)
Xinv = check_array(X, copy=True, dtype=(np.float64, np.float32))
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError(
"Incorrect number of features. Expecting {}, received {}.".format(
n_features, Xinv.shape[1]
)
)
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)]
return Xinv
def get_feature_names_out(self, input_features=None):
"""Get output feature names.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
if hasattr(self, "_encoder"):
return self._encoder.get_feature_names_out(input_features)
# ordinal encoding
return input_features
| KBinsDiscretizer |
python | keras-team__keras | keras/src/optimizers/adam.py | {
"start": 158,
"end": 5439
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to
`0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates. Defaults to
`0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
to `1e-7`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond". Defaults
to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adam",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.amsgrad = amsgrad
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
if self.built:
return
super().build(var_list)
self._momentums, self._velocities = self.add_optimizer_variables(
var_list, ["momentum", "velocity"]
)
if self.amsgrad:
self._velocity_hats = self.add_optimizer_variables(
var_list, "velocity_hat"
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
beta_2_power = ops.power(
ops.cast(self.beta_2, variable.dtype), local_step
)
m = self._momentums[self._get_variable_index(variable)]
v = self._velocities[self._get_variable_index(variable)]
alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), 1 - self.beta_1)
)
self.assign_add(
v,
ops.multiply(
ops.subtract(ops.square(gradient), v), 1 - self.beta_2
),
)
if self.amsgrad:
v_hat = self._velocity_hats[self._get_variable_index(variable)]
self.assign(v_hat, ops.maximum(v_hat, v))
v = v_hat
self.assign_sub(
variable,
ops.divide(
ops.multiply(m, alpha), ops.add(ops.sqrt(v), self.epsilon)
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
Adam.__doc__ = Adam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| Adam |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 2097,
"end": 3789
} | class ____(Generic[PropertyGetType, PropertySetType]):
"""Descriptor that abstracts away common machinery for other style descriptors.
Args:
default: The default value (or a factory thereof) of the property.
layout: Whether to refresh the node layout on value change.
refresh_children: Whether to refresh the node children on value change.
"""
def __init__(
self,
default: PropertyGetType,
layout: bool = False,
refresh_children: bool = False,
) -> None:
self.default = default
self.layout = layout
self.refresh_children = refresh_children
def validate_value(self, value: object) -> PropertyGetType:
"""Validate the setter value.
Args:
value: The value being set.
Returns:
The value to be set.
"""
# Raise StyleValueError here
return cast(PropertyGetType, value)
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> PropertyGetType:
return obj.get_rule(self.name, self.default) # type: ignore[return-value]
def __set__(self, obj: StylesBase, value: PropertySetType | None) -> None:
_rich_traceback_omit = True
if value is None:
obj.clear_rule(self.name)
obj.refresh(layout=self.layout, children=self.refresh_children)
return
new_value = self.validate_value(value)
if obj.set_rule(self.name, new_value):
obj.refresh(layout=self.layout, children=self.refresh_children)
| GenericProperty |
python | coleifer__peewee | tests/postgres.py | {
"start": 9536,
"end": 14507
} | class ____(ModelTestCase):
database = db
requires = [ArrayModel]
def create_sample(self):
return ArrayModel.create(
tags=['alpha', 'beta', 'gamma', 'delta'],
ints=[[1, 2], [3, 4], [5, 6]])
def test_index_expression(self):
data = (
(['a', 'b', 'c'], []),
(['b', 'c', 'd', 'e'], []))
am_ids = []
for tags, ints in data:
am = ArrayModel.create(tags=tags, ints=ints)
am_ids.append(am.id)
last_tag = fn.array_upper(ArrayModel.tags, 1)
query = ArrayModel.select(ArrayModel.tags[last_tag]).tuples()
self.assertEqual(sorted([t for t, in query]), ['c', 'e'])
q = ArrayModel.select().where(ArrayModel.tags[last_tag] < 'd')
self.assertEqual([a.id for a in q], [am_ids[0]])
q = ArrayModel.select().where(ArrayModel.tags[last_tag] > 'd')
self.assertEqual([a.id for a in q], [am_ids[1]])
def test_hashable_objectslice(self):
ArrayModel.create(tags=[], ints=[[0, 1], [2, 3]])
ArrayModel.create(tags=[], ints=[[4, 5], [6, 7]])
n = (ArrayModel
.update({ArrayModel.ints[0][0]: ArrayModel.ints[0][0] + 1})
.execute())
self.assertEqual(n, 2)
am1, am2 = ArrayModel.select().order_by(ArrayModel.id)
self.assertEqual(am1.ints, [[1, 1], [2, 3]])
self.assertEqual(am2.ints, [[5, 5], [6, 7]])
def test_array_get_set(self):
am = self.create_sample()
am_db = ArrayModel.get(ArrayModel.id == am.id)
self.assertEqual(am_db.tags, ['alpha', 'beta', 'gamma', 'delta'])
self.assertEqual(am_db.ints, [[1, 2], [3, 4], [5, 6]])
def test_array_equality(self):
am1 = ArrayModel.create(tags=['t1'], ints=[[1, 2]])
am2 = ArrayModel.create(tags=['t2'], ints=[[3, 4]])
obj = ArrayModel.get(ArrayModel.tags == ['t1'])
self.assertEqual(obj.id, am1.id)
self.assertEqual(obj.tags, ['t1'])
obj = ArrayModel.get(ArrayModel.ints == [[3, 4]])
self.assertEqual(obj.id, am2.id)
obj = ArrayModel.get(ArrayModel.tags != ['t1'])
self.assertEqual(obj.id, am2.id)
def test_array_db_value(self):
am = ArrayModel.create(tags=('foo', 'bar'), ints=[])
am_db = ArrayModel.get(ArrayModel.id == am.id)
self.assertEqual(am_db.tags, ['foo', 'bar'])
def test_array_search(self):
def assertAM(where, *instances):
query = (ArrayModel
.select()
.where(where)
.order_by(ArrayModel.id))
self.assertEqual([x.id for x in query], [x.id for x in instances])
am = self.create_sample()
am2 = ArrayModel.create(tags=['alpha', 'beta'], ints=[[1, 1]])
am3 = ArrayModel.create(tags=['delta'], ints=[[3, 4]])
am4 = ArrayModel.create(tags=['中文'], ints=[[3, 4]])
am5 = ArrayModel.create(tags=['中文', '汉语'], ints=[[3, 4]])
AM = ArrayModel
T = AM.tags
assertAM((Value('beta') == fn.ANY(T)), am, am2)
assertAM((Value('delta') == fn.Any(T)), am, am3)
assertAM(Value('omega') == fn.Any(T))
# Check the contains operator.
assertAM(SQL("tags::text[] @> ARRAY['beta']"), am, am2)
# Use the nicer API.
assertAM(T.contains('beta'), am, am2)
assertAM(T.contains('omega', 'delta'))
assertAM(T.contains('汉语'), am5)
assertAM(T.contains('alpha', 'delta'), am)
assertAM(T.contained_by('alpha', 'beta', 'delta'), am2, am3)
assertAM(T.contained_by('alpha', 'beta', 'gamma', 'delta'),
am, am2, am3)
# Check for any.
assertAM(T.contains_any('beta'), am, am2)
assertAM(T.contains_any('中文'), am4, am5)
assertAM(T.contains_any('omega', 'delta'), am, am3)
assertAM(T.contains_any('alpha', 'delta'), am, am2, am3)
def test_array_index_slice(self):
self.create_sample()
AM = ArrayModel
I, T = AM.ints, AM.tags
row = AM.select(T[1].alias('arrtags')).dicts().get()
self.assertEqual(row['arrtags'], 'beta')
row = AM.select(T[2:4].alias('foo')).dicts().get()
self.assertEqual(row['foo'], ['gamma', 'delta'])
row = AM.select(I[1][1].alias('ints')).dicts().get()
self.assertEqual(row['ints'], 4)
row = AM.select(I[1:2][0].alias('ints')).dicts().get()
self.assertEqual(row['ints'], [[3], [5]])
@requires_models(DecimalArray)
def test_field_kwargs(self):
vl1, vl2 = [Dc('3.1'), Dc('1.3')], [Dc('3.14'), Dc('1')]
da1, da2 = [DecimalArray.create(values=vl) for vl in (vl1, vl2)]
da1_db = DecimalArray.get(DecimalArray.id == da1.id)
da2_db = DecimalArray.get(DecimalArray.id == da2.id)
self.assertEqual(da1_db.values, [Dc('3.1'), Dc('1.3')])
self.assertEqual(da2_db.values, [Dc('3.1'), Dc('1.0')])
| TestArrayField |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_distance_to_address_to_be_between.py | {
"start": 670,
"end": 6883
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.geometry.distance_to_address"
condition_value_keys = (
"column_shape_format",
"place",
"geocoder",
"geocoder_config",
"min_value",
"max_value",
"strict_min",
"strict_max",
"units",
)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs): # noqa: C901 - 24
column_shape_format = kwargs.get("column_shape_format")
place = kwargs.get("place")
geocoder = kwargs.get("geocoder")
geocoder_config = kwargs.get("geocoder_config")
min_value = kwargs.get("min_value")
max_value = kwargs.get("max_value")
strict_min = kwargs.get("strict_min")
strict_max = kwargs.get("strict_max")
units = kwargs.get("units")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None") # noqa: TRY003
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value") # noqa: TRY003
if geocoder not in ["nominatim", "pickpoint", "openmapquest"]:
raise NotImplementedError("The geocoder is not implemented for this method.")
# find the reference shape with the geocoder.
if geocoder is not None:
try:
# Specify the default parameters for Nominatim and run query. User is responsible for config and query params otherwise.
query_params = dict(exactly_one=True, geometry="wkt")
location = cls.geocode(geocoder, geocoder_config, place, query_params)
except Exception:
raise Exception( # noqa: TRY002, TRY003
"Geocoding configuration and query failed to produce a valid result."
)
else:
raise Exception( # noqa: TRY002, TRY003
"A valid geocoder must be provided for this method. See GeoPy for reference."
)
# Load the column into a pygeos Geometry vector from numpy array (Series not supported).
if column_shape_format == "wkt":
shape_test = geos.from_wkt(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "wkb":
shape_test = geos.from_wkb(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "lonlat":
shape_df = pd.DataFrame(column.to_list(), columns=("lon", "lat"))
shape_test = geos.points(shape_df.lon, y=shape_df.lat)
elif column_shape_format == "latlon":
shape_df = pd.DataFrame(column.to_list(), columns=("lat", "lon"))
shape_test = geos.points(shape_df.lon, y=shape_df.lat)
else:
raise NotImplementedError("Column values shape format not implemented.")
# verify that all shapes are points and if not, convert to centroid point.
points_test = pd.Series(shape_test)
if not points_test.apply(lambda x: geos.get_type_id(x) == 0).all():
points_test = points_test.map(geos.centroid)
# convert the geos point to a geopy point.
points_test = points_test.apply(lambda x: lonlat(geos.get_x(x), geos.get_y(x)))
if location is None:
raise Exception("Geocoding failed to return a result.") # noqa: TRY002, TRY003
else:
point_ref = lonlat(location.longitude, location.latitude)
# calculate the distance between the points using geopy
if units in ["km", "kilometers", "kilometres", "kilometer", "kilometre"]:
column_dist = points_test.apply(lambda p: distance(p, point_ref).km)
elif units in ["m", "meters", "metres", "meter", "metre"]:
column_dist = points_test.apply(lambda p: distance(p, point_ref).m)
elif units in ["mi", "miles", "mile"]:
column_dist = points_test.apply(lambda p: distance(p, point_ref).mi)
elif units in ["ft", "feet", "foot"]:
column_dist = points_test.apply(lambda p: distance(p, point_ref).ft)
else:
raise NotImplementedError(
"Unit conversion has not yet been implemented. Please use one of km, m, mi, ft"
)
# Evaluate the between statement (from column_values_between.py)
if min_value is None:
if strict_max:
return column_dist < max_value
else:
return column_dist <= max_value
elif max_value is None:
if strict_min:
return min_value < column_dist
else:
return min_value <= column_dist
else:
if strict_min and strict_max:
return (min_value < column_dist) & (column_dist < max_value)
elif strict_min:
return (min_value < column_dist) & (column_dist <= max_value)
elif strict_max:
return (min_value <= column_dist) & (column_dist < max_value)
else:
return (min_value <= column_dist) & (column_dist <= max_value)
@staticmethod
def geocode(geocoder, config, query, query_config):
cls = geocoders.get_geocoder_for_service(geocoder)
geolocator = cls(**config)
location = geolocator.geocode(query, **query_config)
return location
# #This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# #This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesGeometryDistanceToAddress |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 40218,
"end": 40694
} | class ____:
def __init__(
self, info: milvus_types.AnalyzerResult, with_hash: bool = False, with_detail: bool = False
) -> None:
if not with_detail and not with_hash:
self.tokens = [token.token for token in info.tokens]
else:
self.tokens = [AnalyzeToken(token, with_hash, with_detail) for token in info.tokens]
def __str__(self) -> str:
return str(self.tokens)
__repr__ = __str__
@dataclass
| AnalyzeResult |
python | kamyu104__LeetCode-Solutions | Python/find-the-middle-index-in-array.py | {
"start": 29,
"end": 343
} | class ____(object):
def findMiddleIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = sum(nums)
accu = 0
for i, x in enumerate(nums):
if accu*2 == total-x:
return i
accu += x
return -1
| Solution |
python | kamyu104__LeetCode-Solutions | Python/permutation-sequence.py | {
"start": 71,
"end": 544
} | class ____(object):
def getPermutation(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
seq, k, fact = "", k - 1, math.factorial(n - 1)
perm = [i for i in xrange(1, n + 1)]
for i in reversed(xrange(n)):
curr = perm[k / fact]
seq += str(curr)
perm.remove(curr)
if i > 0:
k %= fact
fact /= i
return seq
| Solution |
python | walkccc__LeetCode | solutions/892. Surface Area of 3D Shapes/892.py | {
"start": 0,
"end": 365
} | class ____:
def surfaceArea(self, grid: list[list[int]]) -> int:
ans = 0
for i in range(len(grid)):
for j in range(len(grid)):
if grid[i][j]:
ans += grid[i][j] * 4 + 2
if i > 0:
ans -= min(grid[i][j], grid[i - 1][j]) * 2
if j > 0:
ans -= min(grid[i][j], grid[i][j - 1]) * 2
return ans
| Solution |
python | prabhupant__python-ds | data_structures/binary_trees/print_nodes_with_no_sibling.py | {
"start": 0,
"end": 488
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def no_siblings(root):
if root is None:
return
if root.left is not None and root.right is not None:
no_siblings(root.left)
no_siblings(root.right)
elif root.right is not None:
print(root.right.val)
no_siblings(root.right)
elif root.left is not None:
print(root.left.val)
no_siblings(root.left)
| Node |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_server_tool_use_block_param.py | {
"start": 575,
"end": 1230
} | class ____(TypedDict, total=False):
id: Required[str]
input: Required[Dict[str, object]]
name: Required[
Literal[
"web_search",
"web_fetch",
"code_execution",
"bash_code_execution",
"text_editor_code_execution",
"tool_search_tool_regex",
"tool_search_tool_bm25",
]
]
type: Required[Literal["server_tool_use"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
caller: Caller
"""Tool invocation directly from the model."""
| BetaServerToolUseBlockParam |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/json_query.py | {
"start": 3412,
"end": 9036
} | class ____(BaseQueryEngine):
"""
GPT JSON Query Engine.
Converts natural language to JSON Path queries.
Args:
json_value (JSONType): JSON value
json_schema (JSONType): JSON schema
json_path_prompt (BasePromptTemplate): The JSON Path prompt to use.
output_processor (Callable): The output processor that executes the
JSON Path query.
output_kwargs (dict): Additional output processor kwargs for the
output_processor function.
verbose (bool): Whether to print verbose output.
"""
def __init__(
self,
json_value: JSONType,
json_schema: JSONType,
llm: Optional[LLM] = None,
json_path_prompt: Optional[BasePromptTemplate] = None,
output_processor: Optional[Callable] = None,
output_kwargs: Optional[dict] = None,
synthesize_response: bool = True,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._json_value = json_value
self._json_schema = json_schema
self._llm = llm or Settings.llm
self._json_path_prompt = json_path_prompt or DEFAULT_JSON_PATH_PROMPT
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
self._verbose = verbose
self._synthesize_response = synthesize_response
self._response_synthesis_prompt = (
response_synthesis_prompt or DEFAULT_RESPONSE_SYNTHESIS_PROMPT
)
super().__init__(callback_manager=Settings.callback_manager)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"json_path_prompt": self._json_path_prompt,
"response_synthesis_prompt": self._response_synthesis_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "json_path_prompt" in prompts:
self._json_path_prompt = prompts["json_path_prompt"]
if "response_synthesis_prompt" in prompts:
self._response_synthesis_prompt = prompts["response_synthesis_prompt"]
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _get_schema_context(self) -> str:
"""Get JSON schema context."""
return json.dumps(self._json_schema)
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
schema = self._get_schema_context()
json_path_response_str = self._llm.predict(
self._json_path_prompt,
schema=schema,
query_str=query_bundle.query_str,
)
if self._verbose:
print_text(
f"> JSONPath Instructions:\n```\n{json_path_response_str}\n```\n"
)
json_path_output = self._output_processor(
json_path_response_str,
self._json_value,
**self._output_kwargs,
)
# removes JSONPath: prefix from returned JSON path prompt call
if self._json_path_prompt == DEFAULT_JSON_PATH_PROMPT:
json_path_response_str = default_output_response_parser(
json_path_response_str
)
if self._verbose:
print_text(f"> JSONPath Output: {json_path_output}\n")
if self._synthesize_response:
response_str = self._llm.predict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
json_schema=self._json_schema,
json_path=json_path_response_str,
json_path_value=json_path_output,
)
else:
response_str = json.dumps(json_path_output)
response_metadata = {
"json_path_response_str": json_path_response_str,
}
return Response(response=response_str, metadata=response_metadata)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
schema = self._get_schema_context()
json_path_response_str = await self._llm.apredict(
self._json_path_prompt,
schema=schema,
query_str=query_bundle.query_str,
)
# removes JSONPath: prefix from returned JSON path prompt call
if self._json_path_prompt == DEFAULT_JSON_PATH_PROMPT:
json_path_response_str = default_output_response_parser(
json_path_response_str
)
if self._verbose:
print_text(
f"> JSONPath Instructions:\n```\n{json_path_response_str}\n```\n"
)
json_path_output = self._output_processor(
json_path_response_str,
self._json_value,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> JSONPath Output: {json_path_output}\n")
if self._synthesize_response:
response_str = await self._llm.apredict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
json_schema=self._json_schema,
json_path=json_path_response_str,
json_path_value=json_path_output,
)
else:
response_str = json.dumps(json_path_output)
response_metadata = {
"json_path_response_str": json_path_response_str,
}
return Response(response=response_str, metadata=response_metadata)
| JSONQueryEngine |
python | pallets__itsdangerous | tests/test_itsdangerous/test_timed.py | {
"start": 758,
"end": 2996
} | class ____(FreezeMixin, TestSigner):
@pytest.fixture()
def signer_factory(self):
return partial(TimestampSigner, secret_key="secret-key")
def test_max_age(self, signer, ts, freeze):
signed = signer.sign("value")
freeze.tick()
assert signer.unsign(signed, max_age=10) == b"value"
freeze.tick(timedelta(seconds=10))
with pytest.raises(SignatureExpired) as exc_info:
signer.unsign(signed, max_age=10)
assert exc_info.value.date_signed == ts
def test_return_timestamp(self, signer, ts):
signed = signer.sign("value")
assert signer.unsign(signed, return_timestamp=True) == (b"value", ts)
def test_timestamp_missing(self, signer):
other = Signer("secret-key")
signed = other.sign("value")
with pytest.raises(BadTimeSignature) as exc_info:
signer.unsign(signed)
assert "missing" in str(exc_info.value)
assert exc_info.value.date_signed is None
def test_malformed_timestamp(self, signer):
other = Signer("secret-key")
signed = other.sign(b"value.____________")
with pytest.raises(BadTimeSignature) as exc_info:
signer.unsign(signed)
assert "Malformed" in str(exc_info.value)
assert exc_info.value.date_signed is None
def test_malformed_future_timestamp(self, signer):
signed = b"value.TgPVoaGhoQ.AGBfQ6G6cr07byTRt0zAdPljHOY"
with pytest.raises(BadTimeSignature) as exc_info:
signer.unsign(signed)
assert "Malformed" in str(exc_info.value)
assert exc_info.value.date_signed is None
def test_future_age(self, signer):
signed = signer.sign("value")
with freeze_time("1971-05-31"):
with pytest.raises(SignatureExpired) as exc_info:
signer.unsign(signed, max_age=10)
assert isinstance(exc_info.value.date_signed, datetime)
def test_sig_error_date_signed(self, signer):
signed = signer.sign("my string").replace(b"my", b"other", 1)
with pytest.raises(BadTimeSignature) as exc_info:
signer.unsign(signed)
assert isinstance(exc_info.value.date_signed, datetime)
| TestTimestampSigner |
python | google__jax | jax/experimental/mosaic/gpu/constraints.py | {
"start": 13073,
"end": 16151
} | class ____:
"""States that `source` layout must be transferable across memory spaces to `target` layout."""
source: Expression
target: Expression
# TODO(allanrenucci): Can this be derived from the layouts?
shape: tuple[int, ...]
def supported_tmem_transfers(
self, packing: int
) -> list[tuple[tcgen05.TMEMLayout, fa.FragmentedLayout]]:
"""Returns the list of supported TMEM <-> Register transfers."""
assert len(self.shape) == 2
columns = self.shape[1]
tmem_default_layout = tcgen05.tmem_default_layout(packing)
return [
(tmem_default_layout, fa.TCGEN05_LAYOUT),
(tmem_default_layout, fa.TMEM_NATIVE_LAYOUT),
(tcgen05.tmem_half_lane_layout(columns, packing), fa.WGMMA_LAYOUT),
(
tcgen05.tmem_m64_collective_layout(columns, packing),
tcgen05.fa_m64_collective_layout(columns),
),
]
def _is_valid_tmem_transfer(
self, tmem_layout: tcgen05.TMEMLayout, reg_layout: fa.FragmentedLayout
) -> bool:
packing = tmem_layout.vector_length
return (tmem_layout, reg_layout) in self.supported_tmem_transfers(packing)
def _is_valid_smem_transfer(
self,
smem_layout: lc.TileTransform | None,
reg_layout: fa.FragmentedLayout,
) -> bool:
# TODO(b/447079781): This is way too restrictive. We need to make it more
# precise by:
# - Consider whether the op is annotated with optimized copies or not.
# - If copies do not have to be optimized, always return True.
# - If copies have to be optimized, determine if the transfer is optimal by
# calling fragmented_array.plan_tiled_transfer.
if inference_utils.is_mma_layout(reg_layout):
return smem_layout is not None and len(smem_layout.tiling) == 2
return smem_layout is None
def holds(self) -> bool | None:
"""Returns whether the constraint holds.
Returns `None` if the constraint can't be checked.
"""
source = self.source
target = self.target
if isinstance(source, TMEMLayout) and isinstance(target, RegisterLayout):
return self._is_valid_tmem_transfer(source.value, target.value)
if isinstance(target, TMEMLayout) and isinstance(source, RegisterLayout):
return self._is_valid_tmem_transfer(target.value, source.value)
if isinstance(source, TMEMLayout) and isinstance(target, TMEMLayout):
return source == target
if isinstance(source, SMEMTiling) and isinstance(target, RegisterLayout):
return self._is_valid_smem_transfer(source.value, target.value)
if isinstance(target, SMEMTiling) and isinstance(source, RegisterLayout):
return self._is_valid_smem_transfer(target.value, source.value)
if isinstance(target, Constant) and isinstance(source, Constant):
source_type = type(source).__name__
target_type = type(target).__name__
raise NotImplementedError(f"Unsupported transfer: {source_type} -> {target_type}")
return None
def __str__(self):
return f"IsTransferable({self.source} ⟶ {self.target})"
@dataclasses.dataclass(frozen=True)
| IsTransferable |
python | allegroai__clearml | clearml/backend_api/services/v2_23/workers.py | {
"start": 44766,
"end": 47526
} | class ____(Request):
"""
Returns count of active company workers in the selected time range.
:param from_date: Starting time (in seconds from epoch) for collecting
statistics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting statistics
:type to_date: float
:param interval: Time interval in seconds for a single statistics point. The
minimal value is 1
:type interval: int
"""
_service = "workers"
_action = "get_activity_report"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single statistics point. The minimal value is 1",
"type": "integer",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting statistics",
"type": "number",
},
},
"required": ["from_date", "to_date", "interval"],
"type": "object",
}
def __init__(self, from_date: float, to_date: float, interval: int, **kwargs: Any) -> None:
super(GetActivityReportRequest, self).__init__(**kwargs)
self.from_date = from_date
self.to_date = to_date
self.interval = interval
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
| GetActivityReportRequest |
python | getsentry__sentry | src/sentry/consumers/__init__.py | {
"start": 24767,
"end": 25497
} | class ____(ProcessingStrategyFactory):
"""
This wrapper is used to validate the schema of the event before
passing to the rest of the pipeline. Since the message is currently decoded
twice, it should only be run in dev or on a small fraction of prod data.
"""
def __init__(self, topic: str, enforce_schema: bool, inner: ProcessingStrategyFactory) -> None:
self.topic = topic
self.enforce_schema = enforce_schema
self.inner = inner
def create_with_partitions(self, commit, partitions) -> ProcessingStrategy:
rv = self.inner.create_with_partitions(commit, partitions)
return ValidateSchema(self.topic, self.enforce_schema, rv)
| ValidateSchemaStrategyFactoryWrapper |
python | pypa__setuptools | setuptools/msvc.py | {
"start": 3900,
"end": 8383
} | class ____:
"""
Microsoft Visual Studio related registry information.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (
winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT,
)
def __init__(self, platform_info: PlatformInfo) -> None:
self.pi = platform_info
@property
def visualstudio(self) -> LiteralString:
"""
Microsoft Visual Studio root registry key.
Return
------
str
Registry key
"""
return 'VisualStudio'
@property
def sxs(self) -> LiteralString:
"""
Microsoft Visual Studio SxS registry key.
Return
------
str
Registry key
"""
return os.path.join(self.visualstudio, 'SxS')
@property
def vc(self) -> LiteralString:
"""
Microsoft Visual C++ VC7 registry key.
Return
------
str
Registry key
"""
return os.path.join(self.sxs, 'VC7')
@property
def vs(self) -> LiteralString:
"""
Microsoft Visual Studio VS7 registry key.
Return
------
str
Registry key
"""
return os.path.join(self.sxs, 'VS7')
@property
def vc_for_python(self) -> LiteralString:
"""
Microsoft Visual C++ for Python registry key.
Return
------
str
Registry key
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self) -> LiteralString:
"""
Microsoft SDK registry key.
Return
------
str
Registry key
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self) -> LiteralString:
"""
Microsoft Windows/Platform SDK registry key.
Return
------
str
Registry key
"""
return os.path.join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self) -> LiteralString:
"""
Microsoft .NET Framework SDK registry key.
Return
------
str
Registry key
"""
return os.path.join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self) -> LiteralString:
"""
Microsoft Windows Kits Roots registry key.
Return
------
str
Registry key
"""
return r'Windows Kits\Installed Roots'
@overload
def microsoft(self, key: LiteralString, x86: bool = False) -> LiteralString: ...
@overload
def microsoft(self, key: str, x86: bool = False) -> str: ... # type: ignore[misc]
def microsoft(self, key: str, x86: bool = False) -> str:
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: bool
Force x86 software registry.
Return
------
str
Registry key
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key)
def lookup(self, key: str, name: str) -> str | None:
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str | None
value
"""
key_read = winreg.KEY_READ
openkey = winreg.OpenKey
closekey = winreg.CloseKey
ms = self.microsoft
for hkey in self.HKEYS:
bkey = None
try:
bkey = openkey(hkey, ms(key), 0, key_read)
except OSError:
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, key_read)
except OSError:
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except OSError:
pass
finally:
if bkey:
closekey(bkey)
return None
| RegistryInfo |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/action_info.py | {
"start": 154,
"end": 831
} | class ____(NamedTuple):
"""
A NamedTuple containing actions and related quantities to the policy forward
pass. Additionally contains the agent ids in the corresponding DecisionStep
:param action: The action output of the policy
:param env_action: The possibly clipped action to be executed in the environment
:param outputs: Dict of all quantities associated with the policy forward pass
:param agent_ids: List of int agent ids in DecisionStep
"""
action: Any
env_action: Any
outputs: ActionInfoOutputs
agent_ids: List[AgentId]
@staticmethod
def empty() -> "ActionInfo":
return ActionInfo([], [], {}, [])
| ActionInfo |
python | sqlalchemy__sqlalchemy | test/engine/test_processors.py | {
"start": 8763,
"end": 9057
} | class ____(_DistillArgsTest):
@classmethod
def setup_test_class(cls):
from sqlalchemy.engine import _util_cy
from sqlalchemy.util.langhelpers import load_uncompiled_module
_py_util = load_uncompiled_module(_util_cy)
cls.module = _py_util
| PyDistillArgsTest |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/databricks.py | {
"start": 2856,
"end": 3247
} | class ____(BaseSettings):
databricks_token: str
databricks_host: str
databricks_http_path: str
def connection_string(self, schema: str) -> str:
return (
"databricks://token:"
f"{self.databricks_token}@{self.databricks_host}:443"
f"?http_path={self.databricks_http_path}&catalog=ci&schema={schema}"
)
| DatabricksConnectionConfig |
python | huggingface__transformers | src/transformers/models/instructblipvideo/configuration_instructblipvideo.py | {
"start": 10507,
"end": 15539
} | class ____(PreTrainedConfig):
r"""
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the Instructblipvideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PreTrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
video_token_index (`int`, *optional*):
Token index of special video token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVideoVisionConfig,
... InstructBlipVideoQFormerConfig,
... OPTConfig,
... InstructBlipVideoConfig,
... InstructBlipVideoForConditionalGeneration,
... )
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoConfig()
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PreTrainedConfig
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
>>> vision_config = InstructBlipVideoVisionConfig()
>>> qformer_config = InstructBlipVideoQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipVideoConfig(vision_config=vision_config, qformer_config=qformer_config, text_config=text_config)
```"""
model_type = "instructblipvideo"
attribute_map = {
"video_token_id": "video_token_index",
}
sub_configs = {
"text_config": AutoConfig,
"qformer_config": InstructBlipVideoQFormerConfig,
"vision_config": InstructBlipVideoVisionConfig,
}
def __init__(
self,
vision_config=None,
qformer_config=None,
text_config=None,
num_query_tokens=32,
video_token_index=None,
**kwargs,
):
if text_config is None:
text_config = CONFIG_MAPPING["opt"]()
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
elif isinstance(text_config, dict):
text_model_type = text_config.get("model_type", "opt")
text_config = CONFIG_MAPPING[text_model_type](**text_config)
if qformer_config is None:
qformer_config = InstructBlipVideoQFormerConfig()
logger.info("qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.")
elif isinstance(qformer_config, dict):
qformer_config = InstructBlipVideoQFormerConfig(**qformer_config)
if vision_config is None:
vision_config = InstructBlipVideoVisionConfig()
logger.info(
"`vision_config` is `None`. initializing the `InstructBlipVideoVisionConfig` with default values."
)
elif isinstance(vision_config, dict):
vision_config = InstructBlipVideoVisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.qformer_config = qformer_config
self.num_query_tokens = num_query_tokens
self.video_token_index = video_token_index
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
super().__init__(**kwargs)
__all__ = ["InstructBlipVideoConfig", "InstructBlipVideoQFormerConfig", "InstructBlipVideoVisionConfig"]
| InstructBlipVideoConfig |
python | pytorch__pytorch | test/dynamo/test_export.py | {
"start": 1143,
"end": 116364
} | class ____(torch._dynamo.test_case.TestCase):
# TODO(voz): Refactor to a shared test function.
# The tests in this file are a little redundant,
# They all take a func, run it with eager, then export it, then compare
def test_export(self):
def pre_attention_state_ops(input, mems, state):
lc_key = state[0]
lc_val = state[1]
bar = []
for _ in range(4):
bar2 = []
for _ in range(3):
bar2.append(
lc_key + lc_val + torch.tensor([0.1, 0.25, 0.4, 0.5, 0.1])
)
bar.append(bar2)
return bar
def func():
mems = torch.tensor([[[1.8364, 0.2724, -1.4917, -0.4367, 0.8640]]])
state = [
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
]
i = torch.tensor(
[
[0.0313, -0.1487, -0.3846, -0.5321],
[-1.7073, 1.3331, -0.0890, -1.4935],
[-0.8314, -0.1862, -0.5935, 1.5232],
]
)
return pre_attention_state_ops(i, mems, state)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func()
torch._dynamo.reset()
exported = torch._dynamo.export(func)()
out_graph = exported[0]
dynamo_result = out_graph()
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_no_tensor_computation_fail(self):
with self.assertRaisesRegex(
AssertionError,
"Failed to produce a graph",
):
inp = [torch.randn(3)]
inp2 = 2
inps = [inp, inp2]
def func(x, y):
return x
torch._dynamo.export(func, same_signature=False)(*inps)
def test_no_tensor_computation(self):
inp = [torch.randn(3)]
inp2 = 2
inps = [inp, inp2]
def func(x, y):
return x
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, x, y):
arg0, arg1, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)
x = arg0
return pytree.tree_unflatten([x], self._out_spec)""",
)
def test_no_tensor_computation_2(self):
inp = torch.randn(3)
inp2 = 2
inps = [inp, inp2]
def func(x, y):
return y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, x, y):
arg0, arg1, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)
x = arg0
return pytree.tree_unflatten([2], self._out_spec)""",
)
def test_export_mismatched_out(self):
def func(x):
y = x + 1
return ([x, x], (y, y))
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(torch.tensor([[[1.3737, 0.1]]]))
torch._dynamo.reset()
exported = torch._dynamo.export(func)(torch.tensor([[[1.3737, 0.1]]]))
out_graph = exported[0]
dynamo_result = out_graph(torch.tensor([[[1.3737, 0.1]]]))
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_shape_control_flow_1(self):
def func(x):
if x.shape[0] > 10:
return x.cos()
return x.sin()
opt_func = torch.compile(func, backend="eager")
real_result = opt_func(torch.ones(6, 4))
torch._dynamo.reset()
exported = torch._dynamo.export(func)(torch.ones(6, 4))
out_graph, out_guards = exported
dynamo_result = out_graph(torch.ones(6, 4))
from torch._guards import GuardSource
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
hit = False
for guard in out_guards:
if guard.source == GuardSource.SHAPE_ENV:
hit = True
self.assertExpectedInline(
guard.code_list,
"""["L['x'].stride()[0] == L['x'].size()[1]", "L['x'].stride()[1] == 1", "L['x'].storage_offset() == 0", "2 <= L['x'].size()[0] and L['x'].size()[0] <= 10", "2 <= L['x'].size()[1]"]""", # noqa: B950
)
break
self.assertTrue(hit)
def test_export_control_flow_with_getattr(self):
class Animal(Enum):
COW = "moo"
class MyModule(torch.nn.Module):
def __init__(self, a):
super().__init__()
self.a = a
def forward(self, x):
if self.a == Animal.COW.value:
return x * x
else:
raise ValueError("bad")
module = MyModule("moo")
input = (torch.ones(4, 3),)
resA = module(*input)
graph, _ = torch._dynamo.export(module)(*input)
resB = graph(*input)
self.assertTrue(torch._dynamo.utils.same(resA, resB))
def test_export_graph_bypass(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
]
def func(x):
first = x[2]
second = x[2]
return first * second
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_list_unpack(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
]
def func(x):
first = x[2]
second = x[2]
return x[0], first * second, x[1], x[2]
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_with_shallow_list_copy_wo_side_effects(self):
def f(x):
y = x.copy()
return y[0] + y[1]
inp = [torch.tensor([1.3, 3.77, 0.1]), torch.tensor([8.7, 6.23, 9.9])]
gm = torch._dynamo.export(f, aten_graph=True, tracing_mode="symbolic")(
inp
).graph_module
self.assertTrue(torch._dynamo.utils.same(gm(inp), f(inp)))
def test_export_with_shallow_list_copy_with_side_effects(self):
def f(x):
y = x.copy()
x[0] = x[1]
y.append(torch.tensor([[100]]))
return x[0] + x[1], y[0] + y[1], y[2]
inp = [torch.tensor([1.3, 3.77, 0.1]), torch.tensor([8.7, 6.23, 9.9])]
gm = torch._dynamo.export(f, aten_graph=True, tracing_mode="symbolic")(
inp
).graph_module
res = gm(inp)
ref = f(inp)
self.assertTrue(torch._dynamo.utils.same(res, ref))
self.assertEqual(res[0], res[1])
def test_export_mismatched_out_2(self):
def func(x):
y = x + 1
return ([x, x], (y, y))
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(torch.tensor([[[1.3737, 0.1]]]))
torch._dynamo.reset()
exported = torch._dynamo.export(func)(torch.tensor([[[1.3737, 0.1]]]))
out_graph = exported[0]
dynamo_result = out_graph(torch.tensor([[[1.3737, 0.1]]]))
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_graph_with_list(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
torch.tensor([0.4, 0.4]),
]
def func(x):
first = x[2]
second = x[2]
return first * second, x
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_graph_with_complex_reorder(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
torch.tensor([0.4, 0.4]),
]
def func(x):
first = x[0]
second = x[1]
third = x[2]
return third, first, second, first * second, first * third
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_immutable_list_dict(self):
class M(torch.nn.Module):
def forward(self, x1, x2):
return [x1 + x2], {"moo1": x1 * x1, "moo2": x2 * x2}
x1 = torch.randn(2, 3)
x2 = torch.randn(2, 3)
model = M()
fx_model = make_fx(
model,
tracing_mode="symbolic",
_allow_non_fake_inputs=True,
_error_on_data_dependent_ops=True,
)(*[x1, x2])
ep = torch.export.export(fx_model, (x1, x2))
res = torch.compile(ep.module(), dynamic=True, fullgraph=True)(x1, x2)
self.assertTrue(torch._dynamo.utils.same(res, M()(x1, x2)))
def test_dupes(self):
inp = torch.tensor([0.1, 0.1])
def func(x):
y = x + 1
return y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_2(self):
inp = torch.tensor([0.1, 0.1])
def func(x):
y = x + 1
return y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.4, 0.4])
inps = [inp, inp2]
def func(x, z):
y = x + 1
return y, y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass_with_non_tensor_arg(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return y, y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass_reorder_with_non_tensor_arg(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return z, y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_dupes_and_bypass_with_non_tensor_output(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return y[0].item(), y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_zeroes_in_and_out_different_shape_on_test(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
return [[a], [b, c], [a + b], [[c + c]]]
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_zeroes_in_new_shape_scalar_out(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
return a[0].item() + b[0].item() + c[0].item()
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_zeroes_in_new_shape_scalar_out_permute(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
return b[0].item() + c[0].item() + a[0].item() + a[0].item()
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_zeroes_in_new_shape_scalar_out_permute_dupe_and_bypass(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
return a, b[0].item() + c[0].item() + a[0].item() + a[0].item(), a
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_func_return(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
x = a + b + c
def func2(y):
return x * y
return func2(x)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dict_return(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
x = a + b + c
return {"a": x}
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_with_aten_graph(self):
def pre_attention_state_ops(input, mems, state):
lc_key = state[0]
lc_val = state[1]
bar = []
for _ in range(4):
bar2 = []
for _ in range(3):
bar2.append(
lc_key + lc_val + torch.tensor([0.1, 0.25, 0.4, 0.5, 0.1])
)
bar.append(bar2)
return bar
def func():
mems = torch.tensor([[[1.8364, 0.2724, -1.4917, -0.4367, 0.8640]]])
state = [
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
torch.tensor([[[1.0517, 0.3848, -0.6472, 0.0823, 0.9116]]]),
]
i = torch.tensor(
[
[0.0313, -0.1487, -0.3846, -0.5321],
[-1.7073, 1.3331, -0.0890, -1.4935],
[-0.8314, -0.1862, -0.5935, 1.5232],
]
)
return pre_attention_state_ops(i, mems, state)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func()
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)()
out_graph = exported[0]
dynamo_result = out_graph()
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_no_tensor_computation_with_aten_graph(self):
inp = [torch.randn(3)]
inp2 = 2
inps = [inp, inp2]
def func(x, y):
return x
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, x, y):
arg0, arg1, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)
arg0_1 = arg0
return pytree.tree_unflatten([arg0_1], self._out_spec)""",
)
def test_no_tensor_computation_2_with_aten_graph(self):
inp = torch.randn(3)
inp2 = 2
inps = [inp, inp2]
def func(x, y):
return y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, x, y):
arg0, arg1, = fx_pytree.tree_flatten_spec(([x, y], {}), self._in_spec)
arg0_1 = arg0
return pytree.tree_unflatten([2], self._out_spec)""",
)
def test_export_mismatched_out_with_aten_graph(self):
def func(x):
y = x + 1
return ([x, x], (y, y))
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(torch.tensor([[[1.3737, 0.1]]]))
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(
torch.tensor([[[1.3737, 0.1]]])
)
out_graph = exported[0]
dynamo_result = out_graph(torch.tensor([[[1.3737, 0.1]]]))
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_graph_bypass_with_aten_graph(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
]
def func(x):
first = x[2]
second = x[2]
return first * second
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_list_unpack_with_aten_graph(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
]
def func(x):
first = x[2]
second = x[2]
return x[0], first * second, x[1], x[2]
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_mismatched_out_2_with_aten_graph(self):
def func(x):
y = x + 1
return ([x, x], (y, y))
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(torch.tensor([[[1.3737, 0.1]]]))
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(
torch.tensor([[[1.3737, 0.1]]])
)
out_graph = exported[0]
dynamo_result = out_graph(torch.tensor([[[1.3737, 0.1]]]))
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_graph_with_list_with_aten_graph(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
torch.tensor([0.4, 0.4]),
]
def func(x):
first = x[2]
second = x[2]
return first * second, x
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_graph_with_complex_reorder_with_aten_graph(self):
inp = [
torch.tensor([0.1, 0.1]),
torch.tensor([0.2, 0.2]),
torch.tensor([0.3, 0.3]),
torch.tensor([0.4, 0.4]),
]
def func(x):
first = x[0]
second = x[1]
third = x[2]
return third, first, second, first * second, first * third
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
def func(x):
y = x + 1
return y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_2_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
def func(x):
y = x + 1
return y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(inp)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.4, 0.4])
inps = [inp, inp2]
def func(x, z):
y = x + 1
return y, y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass_with_non_tensor_arg_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return y, y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dupes_and_bypass_reorder_with_non_tensor_arg_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return z, y, y
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_dupes_and_bypass_with_non_tensor_output_with_aten_graph(self):
inp = torch.tensor([0.1, 0.1])
inp2 = torch.tensor([0.1, 0.1])
inp3 = 4
inps = [inp, inp2, inp3]
def func(x, z, k):
y = x + k
return y[0].item(), y, z
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_zeroes_in_and_out_different_shape_on_test_with_aten_graph(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
return [[a], [b, c], [a + b], [[c + c]]]
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_func_return_with_aten_graph(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
x = a + b + c
def func2(y):
return x * y
return func2(x)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_dict_return_with_aten_graph(self):
inp = torch.zeros(10)
inp2 = torch.zeros(10)
inp3 = torch.zeros(10)
inps = [inp, inp2, inp3]
inps_rand = [torch.randn(10), torch.randn(10), torch.randn(10)]
def func(a, b, c):
x = a + b + c
return {"a": x}
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps_rand)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps_rand)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_with_stack_trace(self):
inp = torch.randn(4, 4)
class MyBlock(torch.nn.Module):
def forward(self, x):
x = torch.nn.functional.linear(x, torch.randn(4, 4))
return torch.cos(x).relu() + 1
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block = MyBlock()
def forward(self, x):
out = self.block(x)
return out
exported = torch._dynamo.export(MyModule(), aten_graph=False)(inp)
out_graph = exported[0]
for node in out_graph.graph.nodes:
if node.op not in {"placeholder", "output"}:
self.assertTrue(node.stack_trace is not None)
self.assertTrue(node.meta["nn_module_stack"] is not None)
self.assertTrue(node.meta["source_fn_stack"] is not None)
torch._dynamo.reset()
exported = torch._dynamo.export(MyModule(), aten_graph=True)(inp)
out_graph = exported[0]
for node in out_graph.graph.nodes:
if node.op == "call_function":
self.assertTrue(node.stack_trace is not None)
self.assertTrue(node.meta["nn_module_stack"] is not None)
self.assertTrue(node.meta["source_fn_stack"] is not None)
self.assertTrue(node.meta["val"] is not None)
self.assertTrue(node.meta["original_aten"] is not None)
def test_export_preserves_nn_module_stack_for_get_attr(self):
inp = torch.randn(4, 4)
class MyBlock(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(1, 1))
self.buffer = torch.nn.Buffer(torch.ones(1, 1))
def forward(self, x):
x = torch.nn.functional.linear(x, torch.randn(4, 4))
return torch.cos(x).relu() + self.weight + self.buffer
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block = MyBlock()
def forward(self, x):
out = self.block(x)
return out
m = MyModule()
exported = torch._dynamo.export(m, aten_graph=False)(inp)
out_graph = exported[0]
attr_access_count = 0
for node in out_graph.graph.nodes:
if node.op == "get_attr":
attr_access_count += 1
self.assertTrue(node.meta["nn_module_stack"] is not None)
self.assertEqual(attr_access_count, 2)
torch._dynamo.reset()
exported = torch._dynamo.export(m, aten_graph=True)(inp)
out_graph = exported[0]
attr_access_count = 0
for node in out_graph.graph.nodes:
if node.op == "get_attr":
attr_access_count += 1
self.assertTrue(node.meta["nn_module_stack"] is not None)
self.assertEqual(attr_access_count, 2)
def test_export_compare_optimize_with_make_fx(self):
inp = torch.tensor([0.1, 0.1])
linear = torch.nn.Linear(2, 2)
def func(x):
x = x + 1
y = x.t()
y = y.relu()
y = linear(y)
return y
exported = torch._dynamo.export(func, aten_graph=True)(inp)
out_graph = exported[0]
export_result = out_graph(inp)
torch._dynamo.reset()
def compiler(gm, sample_inputs):
def fw(*args):
aten_gm = make_fx(gm)(*args)
return aten_gm(*args)
return fw
opt_func = torch.compile(func, backend=compiler, fullgraph=True, dynamic=True)
make_fx_result_through_backend = opt_func(inp)
fx_g = make_fx(func)(inp)
make_fx_result_through_direct = fx_g(inp)
self.assertTrue(
torch._dynamo.utils.same(make_fx_result_through_backend, export_result)
)
self.assertTrue(
torch._dynamo.utils.same(make_fx_result_through_direct, export_result)
)
def test_export_with_constant_method_on_module(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 2))
self.linear = torch.nn.Linear(2, 2)
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return torch.nonzero(x)
def forward(self, x):
y = torch.sin(x)
x = self.linear(x)
y = self.helper_fn(x)
return y
module = MyModule()
real_result = module(torch.tensor([[1.0, 0], [0, 0]]))
module = MyModule()
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
result = graph(torch.tensor([[1.0, 0.0], [0, 0]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(torch.tensor([[1, 0], [0.25, 0.25]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_method_on_module_invoke_twice(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 2))
self.linear = torch.nn.Linear(2, 2)
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return torch.nonzero(x)
def forward(self, x):
y = torch.sin(x)
x = self.linear(x)
y = self.helper_fn(x) + self.helper_fn(x)
return y
module = MyModule()
real_result = module(torch.tensor([[1.0, 0], [0, 0]]))
module = MyModule()
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
result = graph(torch.tensor([[1.0, 0.0], [0, 0]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(torch.tensor([[1, 0], [0.25, 0.25]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_free_function(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
return torch.nonzero(x)
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 2))
self.linear = torch.nn.Linear(2, 2)
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return torch.nonzero(x)
def forward(self, x):
y = torch.sin(x)
x = self.linear(x)
y = helper_fn(x) + self.helper_fn(x)
return y
module = MyModule()
real_result = module(torch.tensor([[1.0, 0], [0, 0]]))
module = MyModule()
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
result = graph(torch.tensor([[1.0, 0.0], [0, 0]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(torch.tensor([[1, 0], [0.25, 0.25]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_global_function(self):
class MyModule(torch.nn.Module):
def forward(self):
a = dynamo_assume_constant_result_global_function()
b = dynamo_assume_constant_result_global_function()
return a + b
module = MyModule()
graph, _ = torch._dynamo.export(module)()
result = graph()
self.assertEqual(result, "testtest")
def test_export_with_constant_free_function_and_class_method(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
return torch.nonzero(x)
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 2))
self.linear = torch.nn.Linear(2, 2)
def forward(self, x):
y = torch.sin(x)
x = self.linear(x)
y = helper_fn(x)
return y
module = MyModule()
real_result = module(torch.tensor([[1.0, 0], [0, 0]]))
module = MyModule()
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
result = graph(torch.tensor([[1.0, 0.0], [0, 0]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(torch.tensor([[1, 0], [0.25, 0.25]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_free_function_and_class_method_multiarg(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
return torch.nonzero(x)
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 2))
self.linear = torch.nn.Linear(2, 2)
def forward(self, x, z):
y = torch.sin(x)
x = self.linear(x)
y = helper_fn(x) + helper_fn(z)
return y
module = MyModule()
real_result = module(
torch.tensor([[1.0, 0], [0, 0]]), torch.tensor([[1.0, 0], [0, 0]])
)
module = MyModule()
graph, _ = torch._dynamo.export(module)(
torch.tensor([[0.0, 0], [0, 0]]), torch.tensor([[1.0, 0], [0, 0]])
)
result = graph(
torch.tensor([[1.0, 0.0], [0, 0]]), torch.tensor([[1.0, 0.0], [0, 0]])
)
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(
torch.tensor([[1, 0], [0.25, 0.25]]), torch.tensor([[1, 0], [0.25, 0.25]])
)
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_free_function_and_class_method_multiarg_diff(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
return torch.nonzero(x)
class MyModule(torch.nn.Module):
def forward(self, x, z):
y = helper_fn(x) + helper_fn(z)
return y
module = MyModule()
real_result = module(
torch.tensor([[1.0, 0], [0, 0]]), torch.tensor([[1.0, 0], [0, 0]])
)
module = MyModule()
graph, _ = torch._dynamo.export(module)(
torch.tensor([[0.0, 0], [0, 0]]), torch.tensor([[0.0, 0], [0.5, 0]])
)
result = graph(
torch.tensor([[1.0, 0.0], [0, 0]]), torch.tensor([[0.0, 1.0], [0, 0]])
)
self.assertTrue(torch._dynamo.utils.same(result, real_result))
result = graph(
torch.tensor([[1, 0], [0.25, 0.25]]),
torch.tensor([[0.33, 0.33], [0.25, 0.25]]),
)
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_tuple_nonzero(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return (torch.nonzero(x), torch.nonzero(x))
def forward(self, x):
y = torch.tensor([0.5])
elements = self.helper_fn(x)
all_y = []
for element in elements:
for item in element:
all_y.append(y * item)
return all_y
module = MyModule()
real_result = module(torch.tensor([1.0, 1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([1.0, 1.0]))
# Tensor input can be almost anything here, and the result will capture what we
# made constant at compile time.
result = graph(torch.tensor([[[1.0, 0], [0, 0]], [[1.0, 0], [0, 0]]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_list_nonzero(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return [torch.nonzero(x), torch.nonzero(x)]
def forward(self, x):
y = torch.tensor([0.5])
elements = self.helper_fn(x)
all_y = []
for element in elements:
for item in element:
all_y.append(y * item)
return all_y
module = MyModule()
real_result = module(torch.tensor([1.0, 1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([1.0, 1.0]))
# Tensor input can be almost anything here, and the result will capture what we
# made constant at compile time.
result = graph(torch.tensor([[[1.0, 0], [0, 0]], [[1.0, 0], [0, 0]]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_list_nonzero_free_function(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
return [torch.nonzero(x), torch.nonzero(x)]
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.tensor([0.5])
elements = helper_fn(x)
all_y = []
for element in elements:
for item in element:
all_y.append(y * item)
return all_y
module = MyModule()
real_result = module(torch.tensor([1.0, 1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([1.0, 1.0]))
# Tensor input can be almost anything here, and the result will capture what we
# made constant at compile time.
result = graph(torch.tensor([[[1.0, 0], [0, 0]], [[1.0, 0], [0, 0]]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_dict_values(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return {"x": x, "x^2": x * x}
def forward(self, x):
y = torch.tensor([0.5])
elements = self.helper_fn(x)
y = y * elements["x"]
y = y * elements["x^2"]
return y
module = MyModule()
real_result = module(torch.tensor([2.0, 2.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([2.0, 2.0]))
# Tensor input can be almost anything here, and the result will capture what we
# made constant at compile time.
result = graph(torch.tensor([[[1.0, 0], [0, 0]], [[1.0, 0], [0, 0]]]))
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_none_control_flow(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
if x.item() < 0:
return None
else:
return x
def forward(self, x):
y = torch.tensor([0.5])
x = self.helper_fn(x)
if x is None:
return y
return y * x
module = MyModule()
real_result = module(torch.tensor([-1]))
# X is negative, so .item() < 0, which means we return y
self.assertEqual(real_result, torch.tensor([0.5]))
graph, _ = torch._dynamo.export(module)(torch.tensor([-1]))
result = graph(torch.tensor([2]))
# X is positive, but we compiled helper_fn to return None, so it will still return y
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_not_none_control_flow(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
if x.item() < 0:
return None
else:
return x
def forward(self, x):
y = torch.tensor([0.5])
x = self.helper_fn(x)
if x is None:
return y
return y * x
module = MyModule()
real_result = module(torch.tensor([2]))
# X is positive, so .item() > 0, which means we return y * x
self.assertEqual(real_result, torch.tensor([1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([2]))
result = graph(torch.tensor([-0.5]))
# X is negative, but we compiled helper_fn to return x, so it will still return y * x
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_none_control_flow_free_func(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
if x.item() < 0:
return None
else:
return x
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.tensor([0.5])
x = helper_fn(x)
if x is None:
return y
return y * x
module = MyModule()
real_result = module(torch.tensor([-1]))
# X is negative, so .item() < 0, which means we return y
self.assertEqual(real_result, torch.tensor([0.5]))
graph, _ = torch._dynamo.export(module)(torch.tensor([-1]))
result = graph(torch.tensor([2]))
# X is positive, but we compiled helper_fn to return None, so it will still return y
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_not_none_control_flow_pos(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
if x.item() < 0:
return None
else:
return x
def forward(self, x):
y = torch.tensor([0.5])
x = self.helper_fn(x)
if x is None:
return y
return y * x
module = MyModule()
real_result = module(torch.tensor([2]))
# X is positive, so .item() > 0, which means we return y * x
self.assertEqual(real_result, torch.tensor([1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([2]))
result = graph(torch.tensor([-0.5]))
# X is negative, but we compiled helper_fn to return x, so it will still return y * x
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_not_none_control_flow_free_func(self):
@torch._dynamo.assume_constant_result
def helper_fn(x):
if x.item() < 0:
return None
else:
return x
class MyModule(torch.nn.Module):
def forward(self, x):
y = torch.tensor([0.5])
x = helper_fn(x)
if x is None:
return y
return y * x
module = MyModule()
real_result = module(torch.tensor([2]))
# X is positive, so .item() > 0, which means we return y * x
self.assertEqual(real_result, torch.tensor([1.0]))
graph, _ = torch._dynamo.export(module)(torch.tensor([2]))
result = graph(torch.tensor([-0.5]))
# X is negative, but we compiled helper_fn to return x, so it will still return y * x
self.assertTrue(torch._dynamo.utils.same(result, real_result))
def test_export_with_constant_not_return_const(self):
class MyModule(torch.nn.Module):
@torch._dynamo.assume_constant_result
def helper_fn(self, x):
return self.val
def forward(self, x):
y = torch.tensor([0.5])
x = self.helper_fn(x)
if x == "A":
return y
return -1
module = MyModule()
module.val = "A"
resA = module(torch.tensor([2]))
graph, _ = torch._dynamo.export(module)(torch.tensor([2]))
module.val = "B"
resB = graph(torch.tensor([2]))
self.assertTrue(torch._dynamo.utils.same(resA, resB))
def test_export_with_builtin_op_on_assume_constant(self):
@torch._dynamo.assume_constant_result
def get_y(y) -> torch.Tensor:
return y
class Bob(torch.nn.Module):
def __init__(self, p, val) -> None:
super().__init__()
self.p = p
self.y = torch.nn.Parameter(torch.tensor(val))
def forward(self, x: torch.Tensor) -> torch.Tensor:
# This only looks dynamic but it's actually a constant value
if get_y(self.y) < self.p:
return torch.cat([x, x])
else:
return x
model = Bob(0.5, 0.3)
inp = torch.ones(3, 4)
graph, _ = torch._dynamo.export(model)(inp)
self.assertEqual(model(inp), graph(inp))
def test_export_with_constant_in_unspecialized_nn_module(self):
class Module(torch.nn.Module):
def __init__(self, y):
super().__init__()
self.y = y
@torch._dynamo.assume_constant_result
def check(self):
return self.y[0].item() == 1
def forward(self, x):
# This line leads to module obj being tracked as UnspecializedNNModuleVariable in dynamo
self.device = x.device
if self.check():
return x + 1
else:
return x + 2
model = Module(torch.tensor([1]))
inp = torch.ones(3, 4)
graph, _ = torch._dynamo.export(model)(inp)
self.assertEqual(model(inp), graph(inp))
def test_export_decomp(self):
def f(x):
return x.t() + x.t()
def nop(x):
return x.cos()
graph, _ = torch._dynamo.export(
f,
aten_graph=True,
decomposition_table={torch.ops.aten.t.default: nop},
)(torch.randn(5))
self.assertEqual(
len([n for n in graph.graph.nodes if n.target == torch.ops.aten.t.default]),
0,
)
graph, _ = torch._dynamo.export(f, aten_graph=True, decomposition_table=None)(
torch.randn(5)
)
self.assertEqual(
len([n for n in graph.graph.nodes if n.target == torch.ops.aten.t.default]),
2,
)
def test_export_decomp_asserts_bad_args(self):
def f(x):
return x.t() + x.t()
def nop(x):
return x.cos()
with self.assertRaises(AssertionError):
torch._dynamo.export(
f,
(torch.randn(5)),
aten_graph=False,
decomposition_table={torch.ops.aten.t.default: nop},
)
@config.patch(capture_scalar_outputs=True)
def test_export_with_module_layer(self):
from functorch.experimental.control_flow import cond
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, pred, x):
def true_fn(val):
return self.linear(val) * torch.tensor(2)
def false_fn(val):
return self.linear(val) * torch.tensor(-1)
return cond(pred, true_fn, false_fn, [x])
mod = Module()
x = torch.randn([3, 3])
pred = torch.tensor(x[0][0].item() < 0)
real_result = mod.forward(pred, x)
torch._dynamo.reset()
exported = torch._dynamo.export(mod.forward)(pred, x)
out_graph = exported[0]
dynamo_result = out_graph(pred, x)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
# New X, just to show we did not specialize
x = x * -1
pred = torch.tensor(x[0][0].item() < 0)
real_result_2 = mod.forward(pred, x)
dynamo_result_2 = out_graph(pred, x)
self.assertTrue(torch._dynamo.utils.same(real_result_2, dynamo_result_2))
@config.patch(capture_scalar_outputs=True)
def test_export_with_cond_branches_calling_methods(self):
from functorch.experimental.control_flow import cond
class Module(torch.nn.Module):
# ok
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def t(self, val):
return val + 1
def f(self, val):
return val - 1
def true_fn(self, val):
return self.linear(val) + self.t(val)
def false_fn(self, val):
return self.linear(val) - self.f(val)
def forward(self, pred, x):
return cond(pred, self.true_fn, self.false_fn, [x])
mod = Module()
x = torch.randn([3, 3])
pred = torch.tensor(x[0][0].item() < 0)
real_result = mod.forward(pred, x)
out_graph, _ = torch._dynamo.export(mod.forward)(pred, x)
dynamo_result = out_graph(pred, x)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
@config.patch(capture_scalar_outputs=True)
def test_export_with_cond_closure(self):
from functorch.experimental.control_flow import cond
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, pred, x):
def true_fn(x):
return x * 2
def false_fn(x):
return x - 2
return cond(pred, true_fn, false_fn, [x])
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, pred, x):
def true_fn(x):
return x * 2
def false_fn(x):
return x - 2
return cond(pred, true_fn, false_fn, [x + 1])
class FooBar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, pred, x):
y = x + x
def true_fn(x, y):
return self.linear(x) * (x + y)
def false_fn(x, y):
return x * (y - x)
return cond(pred, true_fn, false_fn, [x, y])
for Module in [Foo, Bar, FooBar]:
mod = Module()
x = torch.randn([3, 3], requires_grad=True)
pred = torch.tensor(x[0][0].item() < 0)
real_result = mod.forward(pred, x)
out_graph, _ = torch._dynamo.export(mod.forward)(pred, x)
dynamo_result = out_graph(pred, x)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_with_cond_with_closed_function(self):
def hello(x):
return x + 1
def hi(x):
return x + 2
def foo(pred, x):
def true_fn(x):
return hello(x)
def false_fn(x):
return hi(x)
return cond(pred, true_fn, false_fn, [x])
x = torch.randn(5)
pred = x[0] > 0
real_result = foo(pred, x)
out_graph, _ = torch._dynamo.export(foo)(pred, x)
dynamo_result = out_graph(pred, x)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_with_cond_dynamic_shape_pred(self):
from functorch.experimental.control_flow import cond
class Module(torch.nn.Module):
def forward(self, x):
def true_fn(x):
return x + x
def false_fn(x):
return x[:2].clone()
return cond(x.shape[0] <= 2, true_fn, false_fn, [x])
class Module2(torch.nn.Module):
def forward(self, x):
def true_fn(x):
return x + x
def false_fn(x):
return x[:2].clone()
return cond(x.shape[0] <= 2, true_fn, false_fn, (x,))
mods = [Module(), Module2()]
for mod in mods:
x = torch.randn(2, 2)
out_graph, _ = torch._dynamo.export(mod)(x)
self.assertExpectedInline(
out_graph.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
l_x_ = arg0
sym_size_int = torch.ops.aten.sym_size.int(l_x_, 0)
le = sym_size_int <= 2; sym_size_int = None
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(le, cond_true_0, cond_false_0, (l_x_,)); le = cond_true_0 = cond_false_0 = l_x_ = None
getitem_3 = cond[0]
sym_size_int_1 = torch.ops.aten.sym_size.int(getitem_3, 0); getitem_3 = None
ge = sym_size_int_1 >= 2; sym_size_int_1 = None
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 2 on node 'ge'"); ge = _assert_scalar_default = None
getitem_2 = cond[0]; cond = None
return pytree.tree_unflatten([getitem_2], self._out_spec)""", # noqa: B950
)
self.assertExpectedInline(
out_graph.cond_true_0.code.strip(),
"""\
def forward(self, l_x_):
l_x__1 = l_x_
add = l_x__1 + l_x__1; l_x__1 = None
return (add,)""",
)
self.assertExpectedInline(
out_graph.cond_false_0.code.strip(),
"""\
def forward(self, l_x_):
l_x__1 = l_x_
getitem = l_x__1[slice(None, 2, None)]; l_x__1 = None
clone = getitem.clone(); getitem = None
return (clone,)""",
)
# We could successfully export branches that return different sizes
torch._dynamo.export(mod)(torch.randn(3, 2))
# We specialize into one of the branches since predicate is a python boolean.
test_x = torch.randn(3, 2)
mod(test_x)
def test_export_with_map_cond(self):
from functorch.experimental.control_flow import cond, map
class Module(torch.nn.Module):
def inner(self, x, pred):
def true_fn(x):
return x + x
def false_fn(x):
return x * x
return cond(pred, true_fn, false_fn, [x])
def forward(self, pred, xs):
def body(x, pred):
return self.inner(x, pred)
return map(body, xs, pred)
mod = Module()
x = torch.randn(3, 2, 1)
pred_x = torch.tensor(True)
y = torch.randn(4, 3, 2)
pred_y = torch.tensor(False)
real_result = mod(pred_y, y)
out_graph, _ = torch._dynamo.export(mod)(pred_x, x)
self.assertEqual(real_result, out_graph(pred_y, y))
def test_export_with_map_zero_sized_tensor(self):
from functorch.experimental.control_flow import map
class Module(torch.nn.Module):
def forward(self, xs):
def body(x):
return x + 1
return map(body, xs)
mod = Module()
xs = torch.randn(0, 2)
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"Observed exception",
):
torch._dynamo.export(mod)(xs)
def test_export_meta_val(self):
def f(x, y, z):
return x * y + z
gm, _ = torch._dynamo.export(
f,
aten_graph=True,
)(
torch.ones(3, 2),
torch.zeros(3, 2),
torch.ones(3, 2),
)
for node in gm.graph.nodes:
if node.op == "placeholder":
self.assertIn("val", node.meta)
def test_input_container_type(self):
def f(x: torch.Tensor, y: list[torch.Tensor]) -> dict[str, torch.Tensor]:
return {"a": x.sum() + sum(y).sum()}
inp = (torch.randn(6, 5), [torch.randn(6, 5), torch.randn(6, 5)])
gm, _ = torch._dynamo.export(f, aten_graph=True)(*inp)
self.assertEqual(gm(*inp), f(*inp))
@config.patch(assume_static_by_default=False)
def test_export_symbolic_shape(self):
def f(x: torch.Tensor) -> torch.Tensor:
return torch.empty(x.shape[0] * 2)
inp = (torch.randn(6, 5),)
gm, _ = torch._dynamo.export(f, aten_graph=True)(*inp)
has_sym_size = False
for node in gm.graph.nodes:
if node.target is torch.ops.aten.sym_size.int:
has_sym_size = True
self.assertTrue(has_sym_size)
@config.patch(assume_static_by_default=False)
def test_dynamic_slicing(self):
def f(x):
return x[: x.shape[0] - 2, x.shape[1] - 1 :: 2]
gm_aten_mode, _ = torch._dynamo.export(f, aten_graph=True)(torch.randn(4, 5))
inp = torch.randn(6, 7)
self.assertEqual(gm_aten_mode(inp).shape, f(inp).shape)
count = 0
# aten graph should flatten getitem calls to actual
# slice kernel call.
for node in gm_aten_mode.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.slice.Tensor
):
count += 1
self.assertEqual(count, 2)
gm_torch_mode, _ = torch._dynamo.export(f, aten_graph=False)(torch.randn(4, 5))
# In torch mode, the graph should contain 3 getitem methods
# one for x.shape[0]-2 and one for x.shape[1]-1 and one for slice
# this is because Tensor class has its' own getitem method
# which gets translated to aten.Slice later.
count = 0
for node in gm_torch_mode.graph.nodes:
if node.op == "call_function" and node.target == operator.getitem:
count += 1
self.assertEqual(count, 1)
self.assertEqual(gm_torch_mode(inp).shape, f(inp).shape)
@config.patch(capture_scalar_outputs=True)
def test_dynamic_slicing_simple(self):
def f(x):
return x[slice(None, None, None)]
gm, _ = torch._dynamo.export(f, aten_graph=True)(torch.randn(4, 5))
inp = torch.randn(6, 7)
self.assertEqual(gm(inp), f(inp))
def test_pre_dispatch_simple(self):
def f(x):
y = torch.ones_like(x)
return torch.matmul(x, y)
gm, _ = torch._dynamo.export(
f,
aten_graph=True,
pre_dispatch=True,
tracing_mode="fake",
)(
torch.randn(5, 5),
)
inp = torch.randn(6, 6)
self.assertEqual(gm(inp), f(inp))
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
arg0_1 = arg0
ones_like = torch.ops.aten.ones_like.default(arg0_1, pin_memory = False)
matmul = torch.ops.aten.matmul.default(arg0_1, ones_like); arg0_1 = ones_like = None
return pytree.tree_unflatten([matmul], self._out_spec)""",
)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_export_cond_in_aten_symbolic(self):
class ConditionOp(torch.nn.Module):
def true_fn(self, x, y):
return x * y
def false_fn(self, x, y):
return x + y
def forward(self, pred, x, y):
return cond(pred, self.true_fn, self.false_fn, [x, y])
model = ConditionOp()
inp = (
torch.tensor(False),
torch.randn(4, 4),
torch.randn(4, 4),
)
gm, _ = torch._dynamo.export(model, aten_graph=True)(*inp)
gm.print_readable()
self.assertEqual(gm(*inp), model(*inp))
def test_export_with_kwargs(self):
def fn_with_kwargs(pos0, tuple0, *myargs, mykw0=None, **mykwargs):
out = pos0
for arg in tuple0:
out *= arg
for arg in myargs:
out *= arg
out *= mykw0
out *= mykwargs["input0"] * mykwargs["input1"]
return out
mykwargs = {"input0": torch.randn(4), "input1": torch.randn(4)}
tuple0 = (torch.randn(4), torch.randn(4))
mykw0 = torch.randn(4)
pos0 = torch.randn(4)
myargs = [torch.randn(4), torch.randn(4)]
expected_argument_names = [
"pos0",
"tuple0",
"myargs_0",
"myargs_1",
"mykw0",
"input0",
"input1",
]
self._test_export_preserving_original_signature(
fn_with_kwargs,
expected_argument_names,
pos0,
tuple0,
*myargs,
mykw0=mykw0,
**mykwargs,
)
def test_export_with_kwargs_and_empty_args(self):
def fn_with_kwargs(mykw0=None, **mykwargs):
out = mykw0
out *= mykwargs["input0"] * mykwargs["input1"]
return out
mykwargs = {"input0": torch.randn(4), "input1": torch.randn(4)}
mykw0 = torch.randn(4)
expected_argument_names = ["mykw0"] + list(mykwargs.keys())
self._test_export_preserving_original_signature(
fn_with_kwargs, expected_argument_names, mykw0, **mykwargs
)
def test_export_with_args_and_empty_kwargs(self):
def fn_with_kwargs(pos0, tuple0, *myargs):
out = pos0
for arg in tuple0:
out *= arg
for arg in myargs:
out *= arg
return out
tuple0 = (torch.randn(4), torch.randn(4))
pos0 = torch.randn(4)
myargs = [torch.randn(4), torch.randn(4)]
expected_argument_names = ["pos0", "tuple0", "myargs_0", "myargs_1"]
self._test_export_preserving_original_signature(
fn_with_kwargs, expected_argument_names, pos0, tuple0, *myargs
)
@common_utils.parametrize(
"default_value",
[
common_utils.subtest(None, name="None"),
common_utils.subtest(42.0, name="float"),
common_utils.subtest(
# FIXME: AssertionError: Dynamo input and output is a strict subset of traced input/output
torch.randn(4),
name="tensor",
decorators=[unittest.expectedFailure],
),
common_utils.subtest(
# FIXME: AssertionError: Dynamo input and output is a strict subset of traced input/output
(torch.randn(4),),
name="tuple",
decorators=[unittest.expectedFailure],
),
],
)
def test_export_with_args_with_default(self, default_value):
def fn(pos0, pos1_default=default_value):
out = pos0
if pos1_default is None:
pos1_default = torch.randn(4)
if isinstance(pos1_default, tuple):
pos1_default = pos1_default[0]
out *= pos1_default
return out
pos0 = torch.randn(4)
expected_argument_names = ["pos0"]
self._test_export_preserving_original_signature(
fn, expected_argument_names, pos0
)
@common_utils.parametrize(
"default_value",
[
common_utils.subtest(None, name="None"),
common_utils.subtest(42.0, name="float"),
common_utils.subtest(
# FIXME: AssertionError: Dynamo input and output is a strict subset of traced input/output
torch.randn(4),
name="tensor",
decorators=[unittest.expectedFailure],
),
common_utils.subtest(
# FIXME: AssertionError: Dynamo input and output is a strict subset of traced input/output
(torch.randn(4),),
name="tuple",
decorators=[unittest.expectedFailure],
),
],
)
def test_export_with_kwargs_with_default(self, default_value):
def fn(pos0, *, kw0, kw1_default=default_value, **kwargs):
out = pos0
out += kw0
if kw1_default is None:
kw1_default = torch.randn(4)
elif isinstance(kw1_default, tuple):
kw1_default = kw1_default[0]
out += kw1_default
out += kwargs["kw2"]
return out
pos0 = torch.randn(4)
kw0 = torch.randn(4)
kw2 = torch.randn(4)
args = (pos0,)
kwargs = {"kw0": kw0, "kw2": kw2}
expected_argument_names = ["pos0", "kw0", "kw2"]
self._test_export_preserving_original_signature(
fn, expected_argument_names, *args, **kwargs
)
def test_export_with_wrapped_fn(self):
# To ensure dynamo.export is robust to wrapped functions
# when it cannot use `inspect` to retrieve original signature
# info.
def _fn(pos0, pos1=1.0, *args, kw0, kw1=2.0, **kwargs):
out = pos0
out += pos1
out += kw0
out += kw1
for arg in args:
out += arg
for kwarg in kwargs.values():
out += kwarg
return out
def wrapped_fn(*args, **kwargs):
return _fn(*args, **kwargs)
pos0 = torch.randn(4)
kw0 = torch.randn(4)
args = (pos0, torch.randn(4), torch.randn(4))
kwargs = {"kw0": kw0, "kw2": torch.randn(4)}
expected_argument_names = [f"args_{i}" for i in range(len(args))] + list(
kwargs.keys()
)
self._test_export_preserving_original_signature(
wrapped_fn, expected_argument_names, *args, **kwargs
)
def test_export_with_functools_wrapped_method(self):
def test_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x
@test_decorator
def method_to_test(self, pos0, pos1=1.0, *args, kw0, kw1=2.0, **kwargs):
out = pos0
out += pos1
out += kw0
out += kw1
for arg in args:
out += arg
for kwarg in kwargs.values():
out += kwarg
return out
pos0 = torch.randn(4)
pos1 = torch.randn(4)
unnamed_pos = torch.randn(4)
kw0 = torch.randn(4)
args = (pos0, pos1, unnamed_pos)
kwargs = {"kw0": kw0, "kw2": torch.randn(4), "unnamed_kw": torch.randn(4)}
expected_argument_names = [
"pos0",
"pos1",
"args_0", # 3rd unnamed positional argument
] + list(kwargs.keys())
m = MyModule()
self._test_export_preserving_original_signature(
m.method_to_test, expected_argument_names, *args, **kwargs
)
def test_export_with_functools_wrapped_fn(self):
def test_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@test_decorator
def _fn(pos0, pos1=1.0, *args, kw0, kw1=2.0, **kwargs):
out = pos0
out += pos1
out += kw0
out += kw1
for arg in args:
out += arg
for kwarg in kwargs.values():
out += kwarg
return out
def wrapped_fn(*args, **kwargs):
return _fn(*args, **kwargs)
pos0 = torch.randn(4)
kw0 = torch.randn(4)
args = (pos0, torch.randn(4), torch.randn(4))
kwargs = {"kw0": kw0, "kw2": torch.randn(4)}
expected_argument_names = [f"args_{i}" for i in range(len(args))] + list(
kwargs.keys()
)
self._test_export_preserving_original_signature(
wrapped_fn, expected_argument_names, *args, **kwargs
)
def _test_export_preserving_original_signature(
self, fn, expected_argument_names: Sequence[str], *args, **kwargs
):
torch._dynamo.reset()
exported = torch._dynamo.export(
fn,
*args,
**kwargs,
aten_graph=False,
)
out_graph = exported[0]
dynamo_result = out_graph(*args, **kwargs)
real_result = fn(*args, **kwargs)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
# Check that the exported graph preserves same argument names.
self.assertEqual(
inspect.getfullargspec(out_graph.forward).args[1:], expected_argument_names
)
def test_dataclass_input_output(self):
from dataclasses import dataclass
@dataclass
class Tensors:
x: torch.Tensor
y: torch.Tensor
def f(t):
return t.x + t.y
with self.assertRaisesRegex(
UserError,
"It looks like one of the inputs with type .*Tensors.* "
"is not supported or pytree-flattenable",
):
torch._dynamo.export(f, aten_graph=False)(
Tensors(x=torch.randn(10), y=torch.randn(10))
)
def f(x, y):
return Tensors(x=x.sin(), y=y.cos())
with self.assertRaisesRegex(
UserError,
"It looks like one of the outputs with type .*Tensors.* "
"is not supported or pytree-flattenable",
):
torch._dynamo.export(f, aten_graph=False)(torch.randn(10), torch.randn(10))
def test_empty(self):
def f(x):
return x
exported = torch._dynamo.export(f)(torch.randn(3, 3))
out_graph = exported[0]
inp = torch.randn(3, 3)
self.assertTrue(torch._dynamo.utils.same(inp, out_graph(inp)))
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.ones(3, 3)
def forward(self):
return self.a
exported = torch._dynamo.export(M())()
out_graph = exported[0]
self.assertTrue(torch._dynamo.utils.same(torch.ones(3, 3), out_graph()))
def test_export_meta(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.p = torch.nn.Parameter(torch.ones(2, 3))
def forward(self, x):
return self.p + x
with torch.device("meta"):
m = MyModule()
inp = torch.ones(2, 3, device="meta")
exported = torch._dynamo.export(m)(inp)
out_graph = exported[0]
dynamo_result = out_graph(inp)
self.assertEqual(dynamo_result, m(inp))
def test_constraint_violation_error_messages(self):
class Foo(torch.nn.Module):
def forward(self, x):
if x.shape[0] == x.shape[1] * 2:
return x + 1
else:
return x + 2
foo = Foo()
t = torch.zeros([8, 4])
dim0 = torch.export.Dim("dim0", min=3, max=10)
dim1 = torch.export.Dim("dim1")
dynamic_shapes = {"x": (dim0, dim1)}
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
"Constraints violated .*!(.*\n)*.*"
"by dim0 = 2\\*dim1(.*\n)*.*"
"Not all values of dim1 .* satisfy the generated guard 2 <= .* and .* <= 5(.*\n)*.*",
):
torch.export.export(foo, (t,), dynamic_shapes=dynamic_shapes, strict=True)
class Bar(torch.nn.Module):
def forward(self, x):
if x.shape[0] == 5:
return x + 1
else:
return x + 2
bar = Bar()
t = torch.zeros([5])
dim0 = torch.export.Dim("dim0", min=3, max=8)
dynamic_shapes = {"x": (dim0,)}
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
"You marked.*but your code specialized it to be a constant.*"
"If you're using Dim.DYNAMIC, replace it with either Dim.STATIC or Dim.AUTO",
):
torch.export.export(bar, (t,), dynamic_shapes=dynamic_shapes, strict=True)
class Qux(torch.nn.Module):
def forward(self, x):
if x.shape[0] > 5 and x.shape[0] < 10:
return x + 1
else:
return x + 2
qux = Qux()
t = torch.zeros([7])
dim0 = torch.export.Dim("dim0", min=3, max=8)
dynamic_shapes = {"x": (dim0,)}
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
"Not all values.*satisfy the generated guard",
):
torch.export.export(qux, (t,), dynamic_shapes=dynamic_shapes, strict=True)
def test_untracked_inputs_in_constraints(self):
from copy import copy
class Foo(torch.nn.Module):
def forward(self, x, y):
return y + 1
foo = Foo()
x = torch.randn(2)
y = torch.randn(5, 4)
dim0_x, dim0_y = torch.export.dims("dim0_x", "dim0_y")
dynamic_shapes = {"x": {0: dim0_x}, "y": {0: dim0_y}}
example_inputs = (copy(x), y)
ep = torch.export.export(
foo, example_inputs, dynamic_shapes=dynamic_shapes, strict=True
)
ep.module()(torch.randn(3), y) # no specialization error
def test_export_raise_guard_full_constraint(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
if x.shape[0] == 3:
return x.sin()
return x.cos()
torch._dynamo.export(my_dyn_fn)(y)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(
my_dyn_fn, dynamic_shapes=({0: torch.export.Dim("dimx")},)
)(y)
def test_export_module_specify_constraints_signature(self):
y = torch.randn([3, 3, 3])
class Mod(torch.nn.Module):
def forward(self, x):
if x.shape[0] == 3:
return x.sin()
return x.cos()
mod = Mod()
torch._dynamo.export(mod)(y)
with self.assertRaisesRegex(ConstraintViolationError, "dimx = 3"):
torch._dynamo.export(mod, dynamic_shapes=({0: torch.export.Dim("dimx")},))(
y
)
def test_export_raise_guard_partial_constraint(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
if x.shape[0] > 3:
return x.sin()
return x.cos()
torch._dynamo.export(my_dyn_fn)(y)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(
my_dyn_fn, dynamic_shapes=({0: torch.export.Dim("dimx")},)
)(y)
def test_export_raise_on_relationship(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(a, b, c):
if a.shape[0] == b.shape[1] == c.shape[2]:
return a.sin()
return a.cos()
torch._dynamo.export(my_dyn_fn)(y, y, y)
dim = torch.export.Dim("dim")
dynamic_shapes = ({0: dim}, {0: dim}, {0: dim})
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(y, y, y)
dynamic_shapes = ({0: dim}, {1: dim}, {2: dim})
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(y, y, y)
def test_export_no_raise(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(a, b, c):
if a.shape[1] == 3:
return a.cos()
return a * b * c
torch._dynamo.export(my_dyn_fn)(y, y, y)
dim = torch.export.Dim("dim")
dynamic_shapes = ({0: dim}, {0: dim}, {0: dim})
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(y, y, y)
def test_export_multi_dynamic_dim_unsafe_relationship(self):
x = torch.randn([3, 3, 3])
y = torch.randn([2, 2, 2])
z = torch.randn([3, 3, 3])
def my_dyn_fn(a, b, c):
if a.shape[0] == c.shape[0]:
return a.cos()
return a * c, b
torch._dynamo.export(my_dyn_fn)(x, y, z)
dimx, dimy, dimz = torch.export.dims("dimx", "dimy", "dimz")
dynamic_shapes = ({0: dimx}, {0: dimy}, {0: dimz})
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(x, y, z)
dimz = dimx
dynamic_shapes = ({0: dimx}, {0: dimy}, {0: dimz})
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(x, y, z)
def test_remove_redundant_dynamic_dim_in_error_message(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if x.shape[0] == y["k"].shape[0]:
return x + 1
else:
return x - 1
foo = Foo()
a = torch.randn(3)
b = torch.randn(3)
dim0_a, dim0_b = torch.export.dims("dim0_a", "dim0_b")
with self.assertRaisesRegex(torch._dynamo.exc.UserError, "dim0_b = dim0_a"):
torch.export.export(
foo,
(a, {"k": b}),
dynamic_shapes={"x": {0: dim0_a}, "y": {"k": {0: dim0_b}}},
strict=True,
)
def test_enforce_equalities(self):
class Bar(torch.nn.Module):
def forward(self, x, y):
return torch.matmul(x, y)
bar = Bar()
batch, size = torch.export.dims("batch", "size")
dynamic_shapes = {"x": (batch, size, size), "y": (batch, size, size)}
x = torch.randn(10, 3, 3)
y = torch.randn(10, 3, 4)
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
".*y.*size.*2.* = 4 is not equal to .*x.*size.*1.* = 3",
):
with torch._export.config.patch(use_new_tracer_experimental=True):
torch.export.export(
bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True
)
y = torch.randn(10, 3, 3)
with torch._export.config.patch(use_new_tracer_experimental=True):
ebar = torch.export.export(
bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True
)
for node in ebar.graph_module.graph.nodes:
if node.op == "placeholder":
shape = node.meta["val"].shape
self.assertEqual(shape[1], shape[2])
@torch._dynamo.config.patch(
capture_dynamic_output_shape_ops=True,
specialize_int=True,
capture_scalar_outputs=True,
)
def test_export_preserve_constraints_as_metadata_tensor(self):
def f(x):
b = x.nonzero()
torch._check(b.shape[0] >= 2)
torch._check(b.shape[0] <= 5)
return b
y = torch.tensor([8, 8, 6])
torch._dynamo.export(
f,
aten_graph=True,
tracing_mode="symbolic",
)(y)
@config.patch(
capture_dynamic_output_shape_ops=True,
specialize_int=True,
capture_scalar_outputs=True,
)
def test_exported_graph_serialization(self):
def f(x, y):
b = x.item()
return torch.empty((b, y.shape[0]))
x = torch.tensor([3])
y = torch.randn([8, 8, 6])
example_inputs = [x, y]
dynamic_shapes = (None, {0: torch.export.Dim("dimy", min=6, max=10)})
gm, _ = torch._dynamo.export(
f,
dynamic_shapes=dynamic_shapes,
aten_graph=True,
tracing_mode="symbolic",
)(*example_inputs)
# Ensure the exported graph module with metadata is serializable,
# metadata won't be saved in the serialized module
buffer = io.BytesIO()
torch.save(gm, buffer)
def test_export_dynamic_dim_not_1(self):
x = torch.randn([1, 1, 1])
def my_dyn_fn(a):
if a.shape[0] != 1:
return a.cos()
return a * a
torch._dynamo.export(my_dyn_fn)(x)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(
my_dyn_fn, dynamic_shapes=({0: torch.export.Dim("dimx")},)
)(x)
def test_symbool(self):
def f(x):
a = torch.scalar_tensor(x.shape[0] > 4)
return x.sin().sum() + a.sum()
gm, _ = torch._dynamo.export(f, aten_graph=True)(torch.ones(6, 4))
self.assertEqual(gm(torch.ones(3, 4)), f(torch.ones(3, 4)))
def test_export_multi_dynamic_dim_constraint(self):
x = torch.randn([3, 3, 3])
y = torch.randn([2, 2, 2])
z = torch.randn([3, 3, 3])
def my_dyn_fn(a, b, c):
if a.shape[0] == c.shape[0]:
return a.cos()
return a * c, b
torch._dynamo.export(my_dyn_fn)(x, y, z)
dimx_0, dimx_1, dimx_2 = torch.export.dims("dimx_0", "dimx_1", "dimx_2")
dynamic_shapes = ({0: dimx_0, 1: dimx_1, 2: dimx_2}, None, None)
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(x, y, z)
dynamic_shapes = ({0: dimx_0, 1: dimx_1, 2: dimx_2}, None, {0: dimx_0})
torch._dynamo.export(my_dyn_fn, dynamic_shapes=dynamic_shapes)(x, y, z)
def test_export_dynamic_dim_range_constraint(self):
x = torch.ones(6, 4, 4)
dynamic_shapes = ({0: torch.export.Dim("dimx", min=5, max=6)},)
def foo(x):
if x.shape[0] > 3: # ok
return x.sin()
return x.cos()
torch._dynamo.export(
foo,
dynamic_shapes=dynamic_shapes,
aten_graph=True,
)(x)
def bar(x):
if x.shape[0] > 5: # error
return x.sin()
return x.cos()
with self.assertRaises(ConstraintViolationError):
torch._dynamo.export(
bar,
dynamic_shapes=dynamic_shapes,
aten_graph=True,
)(x)
def test_trivial_constraint(self):
class Foo(torch.nn.Module):
def forward(self, x):
# complex divisibility condition
if (2 * x.shape[0] + 3) % (x.shape[0] - 3) == 0:
return x + 1
else:
return x - 1
foo = Foo()
class Bar(torch.nn.Module):
def forward(self, x):
# trivially true
if (2 * x.shape[0] + 2) % (x.shape[0] + 1) == 0:
return x + 1
else:
return x - 1
bar = Bar()
class Qux(torch.nn.Module):
def forward(self, x):
# simple divisibility condition (not trivially true)
if (3 * x.shape[0]) % 2 == 0:
return x + 1
else:
return x - 1
qux = Qux()
x = torch.randn(12)
dim0 = torch.export.Dim("dim0", max=100)
dynamic_shapes = {"x": (dim0,)}
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
r"Constraints violated \(dim0\)",
):
torch.export.export(foo, (x,), dynamic_shapes=dynamic_shapes, strict=True)
torch.export.export(bar, (x,), dynamic_shapes=dynamic_shapes, strict=True)
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
r"Constraints violated \(dim0\)",
):
torch.export.export(qux, (x,), dynamic_shapes=dynamic_shapes, strict=True)
def test_list_contains(self):
def func(x):
assert x.size(-1) in [4, 5, 6], "bad"
return x + x
inps = (torch.randn(1, 5),)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_list_not_contains(self):
def func(x):
assert x.size(0) not in [4, 5, 6], "bad1"
assert "monkey" not in ["cow", "pig"], "bad2"
return x + x
inps = (torch.randn(1, 5),)
opt_func = torch.compile(func, backend="eager", fullgraph=True, dynamic=True)
real_result = opt_func(*inps)
torch._dynamo.reset()
exported = torch._dynamo.export(func, aten_graph=True)(*inps)
out_graph = exported[0]
dynamo_result = out_graph(*inps)
self.assertTrue(torch._dynamo.utils.same(real_result, dynamo_result))
def test_export_identity(self):
inp = torch.tensor([0.1, 0.1])
def func(x):
return x
torch._dynamo.reset()
exported, _ = torch._dynamo.export(func)(inp)
dynamo_result = exported(inp)
self.assertTrue(torch._dynamo.utils.same(inp, dynamo_result))
def test_export_specialized_int(self):
class Foo(torch.nn.Module):
def __init__(
self,
input_dim,
):
super().__init__()
self.torch_module = torch.nn.LayerNorm(
input_dim, eps=1e-5, elementwise_affine=True
)
self.int_val = 100
def forward(self, input):
return input.cos() * self.int_val * self.torch_module.eps
mod = Foo(128)
inp = torch.randn(3, 128)
# In export, int & float in forward should always be specialized
gm, _ = torch._dynamo.export(mod, aten_graph=True)(inp)
count = 0
for node in gm.graph.nodes:
if node.op == "placeholder":
count += 1
self.assertEqual(count, 1)
def test_export_with_nonzero_static(self):
class BasicModule(torch.nn.Module):
def __init__(self, static_size):
super().__init__()
self.static_size = static_size
def forward(self, x):
return torch.nonzero_static(x, size=self.static_size)
input_tensors = torch.tensor([6, 8]), torch.zeros(2, 3)
static_sizes = 3, 4
for input_tensor, static_size in zip(input_tensors, static_sizes):
m = BasicModule(static_size)
gm, _ = torch._dynamo.export(m, aten_graph=True)(input_tensor)
res = gm(input_tensor)
self.assertEqual(res.size(0), static_size)
self.assertTrue(
torch._dynamo.utils.same(
res, torch.nonzero_static(input_tensor, size=static_size)
)
)
def test_export_pass_arg_by_name(self):
class BasicModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.my_lin = torch.nn.Linear(3, 4, bias=True)
def forward(self, x):
return self.my_lin(x)
mod, input_tensor = BasicModule(), torch.randn(2, 3)
gm, _ = torch._dynamo.export(mod, aten_graph=True)(input_tensor)
ref = mod(x=input_tensor)
res = gm(x=input_tensor)
self.assertTrue(torch._dynamo.utils.same(ref, res))
def test_export_pass_arg_by_name_star_args(self):
class BasicModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.my_lin = torch.nn.Linear(3, 4, bias=True)
def forward(self, *args):
return self.my_lin(args[0]) * self.my_lin(args[1])
mod, input_tensor, input_tensor2 = (
BasicModule(),
torch.randn(2, 3),
torch.randn(2, 3),
)
gm, _ = torch._dynamo.export(mod, aten_graph=True)(input_tensor, input_tensor2)
ref = mod(input_tensor, input_tensor2)
res = gm(input_tensor, input_tensor2)
self.assertTrue(torch._dynamo.utils.same(ref, res))
def test_export_dynamic_dim_cleanup(self):
y = torch.randn([3, 3, 3])
def my_dyn_fn(x):
return x.cos()
torch._dynamo.export(my_dyn_fn, dynamic_shapes=({0: torch.export.Dim("dim")},))(
y
)
@config.patch(capture_dynamic_output_shape_ops=True)
def test_export_dynamic_control_flow_error(self):
def f(x):
if x.nonzero() > 3:
return x.cos()
return x.sin()
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"Data-dependent branching",
):
torch._dynamo.export(f, aten_graph=True)(torch.randn(5, 6))
@config.patch(assume_static_by_default=False)
def test_export_persist_assert(self):
def f(x):
assert x[0].sum() > 4, "Shape must be more than 4"
return x.cos() + x.sin()
gm, _ = torch._dynamo.export(f, aten_graph=True, tracing_mode="symbolic")(
torch.ones(5, 4, 6)
)
def has_aten_op(gm, op):
for node in gm.graph.nodes:
if node.target == op:
return True
return False
self.assertTrue(has_aten_op(gm, torch.ops.aten._assert_async.msg))
gm.graph.eliminate_dead_code()
gm.recompile()
self.assertTrue(has_aten_op(gm, torch.ops.aten._assert_async.msg))
with self.assertRaisesRegex(RuntimeError, "Shape must be more than 4"):
gm(torch.zeros(3, 4, 5))
@common_utils.parametrize(
"type_fn",
[
common_utils.subtest(type, name="builtin"),
common_utils.subtest(lambda obj: obj.__class__, name="attr"),
],
)
def test_access_class_method_from_user_class(self, type_fn):
class A:
@classmethod
def func(cls):
return torch.Tensor([4, 5])
def f(x):
a = A()
return x.sum() + type_fn(a).func().sum()
gm, _ = torch._dynamo.export(f, aten_graph=True)(torch.ones(6, 4))
self.assertEqual(f(torch.ones(6, 4)), gm(torch.ones(6, 4)))
def test_not_functionalize(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buffer1 = torch.nn.Buffer(torch.ones(6, 2))
def forward(self, x):
x.add_(2)
return x.sum() + self.buffer1.sum()
example_inputs = (torch.ones(1, 2, 3),)
gm, _ = torch._dynamo.export(
Foo(),
aten_graph=True,
tracing_mode="symbolic",
)(*example_inputs)
count = 0
for node in gm.graph.nodes:
if node.target == torch.ops.aten.add_.Tensor:
count += 1
self.assertEqual(count, 1)
test_inp = (torch.ones(1, 2, 3),)
test_inp_v2 = (torch.ones(1, 2, 3),)
self.assertEqual(gm(*test_inp), Foo()(*test_inp_v2))
def test_round_dynamic_shapes(self):
def f(x):
return x[: round(x.shape[0] / 2)]
gm, _ = torch._dynamo.export(f, aten_graph=True)(torch.ones(6, 4))
self.assertEqual(f(torch.ones(6, 4)), gm(torch.ones(6, 4)))
def test_cond_supported_pred_types(self):
def true_fn(x):
return x.cos()
def false_fn(x):
return x.sin()
def f_pred_traced_as_symnode_var(x):
return cond(x.shape[0] > 2, true_fn, false_fn, [x])
def f_pred_traced_as_tensor_var(x):
return cond(x.all(), true_fn, false_fn, [x])
def f_pred_complex_expression_traced_as_symnode_var(x):
return cond(
x.dim() > 1 and x.shape[1] > 5 and x.shape[1] <= 10,
true_fn,
false_fn,
[x],
)
example_inputs = (torch.rand(5, 8),)
for f in [
f_pred_traced_as_symnode_var,
f_pred_traced_as_tensor_var,
f_pred_complex_expression_traced_as_symnode_var,
]:
gm, _ = torch._dynamo.export(f, aten_graph=True)(*example_inputs)
self.assertEqual(gm(*example_inputs), f(*example_inputs))
def test_sum_param(self):
# Setting a new attribute inside forward()
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.randn(3, 2)
def forward(self, x):
self.b = 2
return x.sum() + self.a.sum() + self.b
torch._dynamo.export(Foo())(torch.randn(3, 2))
def test_mixed_real_and_fake_inputs(self):
class _TestPattern(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
def forward(self, input):
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.conv.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.conv.weight.shape)
bias_shape[1] = -1
scaled_weight = self.conv.weight * scale_factor.reshape(weight_shape)
zero_bias = torch.zeros_like(self.conv.bias, dtype=input.dtype)
conv = self.conv._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
conv_orig = conv_orig + self.conv.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
return conv
example_inputs = (torch.randn(1, 1, 3, 3),)
torch._dynamo.export(
_TestPattern(),
aten_graph=True,
)(*example_inputs)
@config.patch(
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
assume_static_by_default=False,
)
def test_sym_contains(self):
def f(x, y):
return x.size(0) in y
gm, _ = torch._dynamo.export(f, aten_graph=True)(torch.ones(2), torch.ones(3))
true_inp = (torch.Tensor([6, 4, 5]), torch.ones(6, 4).add_(5))
false_inp = (torch.Tensor([6, 4, 5]), torch.ones(6, 4).add_(2))
self.assertEqual(gm(*true_inp), f(*true_inp))
self.assertEqual(gm(*false_inp), f(*false_inp))
def test_cond_raise_user_error_on_missing_args(self):
def true_fn(x):
return x.cos()
def false_fn(x):
return x.sin()
def f(x):
return cond(x.shape[0] > 10, true_fn, false_fn)
# Now we allow torch.cond to handle empty args
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
TypeError,
r"false_fn\(\) missing 1 required positional argument: 'x'",
):
f(*example_inputs)
def test_cond_raise_user_error_on_unsupported_pred(self):
def f_unsupported_pred(x):
pred = torch.nn.Module()
return cond(pred, lambda x: x.sin(), lambda x: x.cos(), [x])
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
RuntimeError,
"Expected pred to be bool or tensor, but got Module()",
):
f_unsupported_pred(*example_inputs)
def test_cond_raise_user_error_on_non_list_operands(self):
def f_non_list_operands(x):
return cond(torch.tensor(True), lambda x: x.sin(), lambda x: x.cos(), x)
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
RuntimeError,
r"Expect operands to be a tuple of possibly nested dict/list/tuple",
):
f_non_list_operands(*example_inputs)
def test_cond_raise_user_error_on_non_tensor_operands(self):
def f_non_tensor_operands(x):
a: float = 3.14
return cond(
torch.tensor(1234), lambda x, a: x.sin(), lambda x, a: x.cos(), [x, a]
)
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
RuntimeError,
r"Expect operands to be a tuple of possibly nested dict/list/tuple",
):
f_non_tensor_operands(*example_inputs)
def test_cond_raise_user_error_on_branch_args_mismatch(self):
def true_fn(x, y):
return x.sin()
def false_fn(x):
return x.cos()
def f_branch_args_mismatch(x, y):
return cond(torch.tensor([[[[True]]]]), true_fn, false_fn, [x, y])
example_inputs = (torch.rand(5), torch.rand(2))
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Cond doesn't work unless it is captured completely with torch.compil",
):
torch._dynamo.export(
f_branch_args_mismatch,
aten_graph=True,
)(
*example_inputs,
)
@config.patch(suppress_errors=True)
def test_uncaptured_higher_order_op_error_not_suppresed(self):
def true_fn(x, y):
return x.sin()
def false_fn(x):
return x.cos()
def f_branch_args_mismatch(x, y):
return cond(torch.tensor([[[[100]]]]), true_fn, false_fn, [x, y])
example_inputs = (torch.rand(5), torch.rand(2))
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Cond doesn't work unless it is captured completely with torch.compile",
):
torch._dynamo.export(
f_branch_args_mismatch,
aten_graph=True,
)(
*example_inputs,
)
def test_cond_raise_user_error_on_branch_return_non_tensor(self):
def f_branch_return_non_tensor(x):
return cond(x.shape[0] <= 5, lambda x: 3.14, lambda x: 3.14, [x])
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
torch._dynamo.exc.UncapturedHigherOrderOpError,
"Cond doesn't work unless it is captured completely with torch.compile",
):
torch._dynamo.export(
f_branch_return_non_tensor,
aten_graph=True,
)(*example_inputs)
def test_cond_raise_user_error_on_branch_return_multiple_tensors(self):
def f_branch_return_multiple_tensors(pred, x, y):
return cond(
pred,
lambda x: (x.clone(), x.clone()),
lambda x: (x.clone(), x.clone()),
[y],
)
example_inputs = (torch.tensor(True), torch.randn(4), torch.randn(2))
gm, _ = torch._dynamo.export(
f_branch_return_multiple_tensors,
aten_graph=True,
)(*example_inputs)
self.assertEqual(
gm(*example_inputs), f_branch_return_multiple_tensors(*example_inputs)
)
def test_multiple_outputs_op_with_evaluator(self):
class TopKModel(torch.nn.Module):
def forward(self, x):
values, _ = torch.topk(x, 3)
return torch.sum(values)
x = torch.arange(1.0, 6.0, requires_grad=True)
torch._dynamo.export(TopKModel())(x)
def test_cond_raise_user_error_on_mismatch_return_length(self):
def true_fn(x):
return x.clone()
def false_fn(x):
return (x.clone(), x.clone())
def f_mismatch_return_length(x):
return cond(torch.tensor(100), true_fn, false_fn, [x])
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
torch._dynamo.exc.TorchRuntimeError,
"Unmatched output spec from torch.cond branches",
):
torch._dynamo.export(
f_mismatch_return_length,
aten_graph=True,
)(*example_inputs)
def test_cond_raise_user_error_on_mismatch_return_tensor_meta(self):
def true_fn(x):
return torch.tensor([[3], [2]])
def false_fn(x):
return torch.tensor([3.14])
def f_return_tensor_mismatch(x):
return cond(x.shape[0] < 3, true_fn, false_fn, [x])
example_inputs = (torch.rand(5),)
with self.assertRaisesRegex(
torch._dynamo.exc.TorchRuntimeError,
"When merging two branches' output in torch.cond",
):
torch._dynamo.export(f_return_tensor_mismatch, aten_graph=True)(
*example_inputs,
)
def test_byte_tensor_does_not_crash(self):
# See https://github.com/pytorch/pytorch/issues/100455
def func(text):
tensor = torch.ByteTensor(list(bytes(text, "utf8")))
return tensor + tensor
text = "".join(chr(a % 90 + 40) for a in range(111))
opt_func = torch.compile(func, backend="eager", dynamic=True)
for i in [99, 100]:
input = text[:i]
opt_func(input)
def test_export_defaults_ok(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
gm, _ = torch._dynamo.export(DynamicSliceExportMod(), aten_graph=True)(
torch.randn(5, 5, 5),
)
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, x):
arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
arg0_1 = arg0
sym_size_int = torch.ops.aten.sym_size.int(arg0_1, 0)
slice_1 = torch.ops.aten.slice.Tensor(arg0_1, 2, 0, 3)
sub = sym_size_int - 1
slice_2 = torch.ops.aten.slice.Tensor(arg0_1, 0, 0, sub); sub = None
slice_3 = torch.ops.aten.slice.Tensor(slice_2, 1, 1, sym_size_int); slice_2 = None
slice_4 = torch.ops.aten.slice.Tensor(slice_3, 2, 1, 3); slice_3 = None
sub_1 = sym_size_int - 2
slice_5 = torch.ops.aten.slice.Tensor(arg0_1, 0, 0, sub_1); sub_1 = None
slice_6 = torch.ops.aten.slice.Tensor(slice_5, 1, 2, sym_size_int); slice_5 = None
slice_7 = torch.ops.aten.slice.Tensor(slice_6, 2, 2, 3); slice_6 = None
sub_2 = sym_size_int - 3
slice_8 = torch.ops.aten.slice.Tensor(arg0_1, 0, 0, sub_2); arg0_1 = sub_2 = None
slice_9 = torch.ops.aten.slice.Tensor(slice_8, 1, 3, sym_size_int); slice_8 = sym_size_int = None
slice_10 = torch.ops.aten.slice.Tensor(slice_9, 2, 3, 3); slice_9 = None
return pytree.tree_unflatten([slice_1, slice_4, slice_7, slice_10], self._out_spec)""",
)
def test_capture_symbolic_tracing_simple_within_fake_mode(self):
from torch._dynamo.output_graph import config
def f(x):
y = torch.randn(3)
return x + x * y
with fake_tensor.FakeTensorMode(
shape_env=ShapeEnv(
allow_scalar_outputs=config.capture_scalar_outputs,
allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops,
),
):
x = torch.randn(3)
for aten_graph in [True, False]:
gm, _ = torch._dynamo.export(f, aten_graph=aten_graph)(x)
self.assertTrue(
isinstance(gm, torch.fx.GraphModule),
msg="test_capture_symbolic_tracing_simple_within_fake_mode_aten_graph_"
+ str(aten_graph),
)
def test_export_with_symbool_inputs(self):
def f(pred: bool, x: torch.Tensor):
if pred:
return x.sin()
else:
return x.cos()
x = torch.randn([3, 4])
def test_symbool_guards(
f, size_tests, exp_graph, exp_guard_code, exp_shape_env_guards
):
shape_env = ShapeEnv()
with fake_tensor.FakeTensorMode(
shape_env=shape_env,
) as fake_mode:
fake_x = fake_mode.from_tensor(
x,
symbolic_context=StatelessSymbolicContext(
dynamic_sizes=[DimDynamic.DYNAMIC for _ in range(x.dim())],
),
)
for i, size in enumerate(size_tests):
pred = fake_x.size(0) == size
gm, guards = torch._dynamo.export(f)(pred, x)
actual = normalize_gm(gm.print_readable(print_output=False))
# TODO: This is naughty, EXPECTTEST_ACCEPT=1 doesn't work
self.assertExpectedInline(actual, exp_graph[i].format(size=size))
dynamo_shape_env_guards = [
guard
for guard in guards
if guard.guard_types is not None
and "SHAPE_ENV" in guard.guard_types
]
self.assertEqual(len(dynamo_shape_env_guards), 1)
guard_code_on_predicate = [
code
for code in dynamo_shape_env_guards[0].code_list
if "L['pred']" in code
]
self.assertEqual(guard_code_on_predicate, exp_guard_code[i])
outter_shape_env_guards = [
str(guard.expr) for guard in shape_env.guards
]
self.assertEqual(outter_shape_env_guards, exp_shape_env_guards[i])
true_graph = """\
| ExportTests |
python | ansible__ansible | test/units/module_utils/datatag/test_datatag.py | {
"start": 5163,
"end": 7597
} | class ____(AnsibleProfileJSONDecoder):
_profile = RoundTripEverything
@pytest.mark.parametrize("untaggable_instance", [None, True, False])
def test_silent_untaggable(untaggable_instance):
post_tag = ExampleSingletonTag().tag(untaggable_instance)
assert post_tag is untaggable_instance
def test_try_tag() -> None:
untaggable_value = object()
taggable_value = "Hello"
assert ExampleSingletonTag().try_tag(untaggable_value) is untaggable_value
assert ExampleSingletonTag.is_tagged_on(ExampleSingletonTag().try_tag(taggable_value))
def no_op() -> None:
"""No-op function."""
@pytest.mark.parametrize("untaggable_instance", [object(), no_op])
def test_fatal_untaggable(untaggable_instance):
with pytest.raises(NotTaggableError):
ExampleSingletonTag().tag(untaggable_instance)
def test_ensure_empty_mapping_singleton() -> None:
assert type(_EMPTY_INTERNAL_TAGS_MAPPING)() is _EMPTY_INTERNAL_TAGS_MAPPING
assert copy.copy(_EMPTY_INTERNAL_TAGS_MAPPING) is _EMPTY_INTERNAL_TAGS_MAPPING
assert copy.deepcopy(_EMPTY_INTERNAL_TAGS_MAPPING) is _EMPTY_INTERNAL_TAGS_MAPPING
def test_get_tags_mapping_from_magicmock() -> None:
assert _try_get_internal_tags_mapping(unittest.mock.MagicMock()) is _EMPTY_INTERNAL_TAGS_MAPPING
def test_unexpected_reduce_type() -> None:
with pytest.raises(TypeError):
ExampleSingletonTag().tag("")._reduce("str") # type: ignore
_str_override_method_args: t.Dict[str, t.Tuple[tuple, t.Dict[str, t.Any]]] = {
'partition': ((' ',), {}),
'removeprefix': ((' ',), {}),
'removesuffix': ((' ',), {}),
}
def test_tag_types() -> None:
value = ExampleSingletonTag().tag(AnotherExampleSingletonTag().tag("hi"))
assert AnsibleTagHelper.tag_types(value) == {ExampleSingletonTag, AnotherExampleSingletonTag}
assert AnsibleTagHelper.tag_types("hi") is _empty_frozenset
def test_deprecated_invalid_date_type() -> None:
with pytest.raises(TypeError):
Deprecated(msg="test", date=42) # type: ignore
def test_tag_with_invalid_tag_type() -> None:
with pytest.raises(TypeError):
AnsibleTagHelper.tag("", ["not a tag"]) # type: ignore
def test_tag_value_type_specified_untagged() -> None:
value = AnsibleTagHelper.tag(iter((1, 2, 3)), tuple(), value_type=list)
assert isinstance(value, list)
assert value == [1, 2, 3]
@dataclasses.dataclass(frozen=True)
| RoundTripEverythingDecoder |
python | pypa__pipenv | pipenv/vendor/click/types.py | {
"start": 7427,
"end": 11333
} | class ____(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices: t.Sequence[str], case_sensitive: bool = True) -> None:
self.choices = choices
self.case_sensitive = case_sensitive
def to_info_dict(self) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict["choices"] = self.choices
info_dict["case_sensitive"] = self.case_sensitive
return info_dict
def get_metavar(self, param: "Parameter") -> str:
choices_str = "|".join(self.choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
def get_missing_message(self, param: "Parameter") -> str:
return _("Choose from:\n\t{choices}").format(choices=",\n\t".join(self.choices))
def convert(
self, value: t.Any, param: t.Optional["Parameter"], ctx: t.Optional["Context"]
) -> t.Any:
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
choices_str = ", ".join(map(repr, self.choices))
self.fail(
ngettext(
"{value!r} is not {choice}.",
"{value!r} is not one of {choices}.",
len(self.choices),
).format(value=value, choice=choices_str, choices=choices_str),
param,
ctx,
)
def __repr__(self) -> str:
return f"Choice({list(self.choices)})"
def shell_complete(
self, ctx: "Context", param: "Parameter", incomplete: str
) -> t.List["CompletionItem"]:
"""Complete choices that start with the incomplete value.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from pipenv.vendor.click.shell_completion import CompletionItem
str_choices = map(str, self.choices)
if self.case_sensitive:
matched = (c for c in str_choices if c.startswith(incomplete))
else:
incomplete = incomplete.lower()
matched = (c for c in str_choices if c.lower().startswith(incomplete))
return [CompletionItem(c) for c in matched]
| Choice |
python | huggingface__transformers | tests/models/video_llama_3/test_processing_video_llama_3.py | {
"start": 1371,
"end": 13201
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = VideoLlama3Processor
model_id = "lkhl/VideoLLaMA3-2B-Image-HF"
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(model_id, patch_size=4, max_pixels=56 * 56, min_pixels=28 * 28, **kwargs)
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
def prepare_image_inputs(self, batch_size: int | None = None):
"""This function prepares a list of PIL images for testing"""
if batch_size is None:
return prepare_image_inputs()[0]
if batch_size < 1:
raise ValueError("batch_size must be greater than 0")
return prepare_image_inputs() * batch_size
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
num_frames=2, # by default no more than 2 frames, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 192
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4",
}
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 180)
# Load with `fps` arg
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 80)
# Load with `fps` and `num_frames` args, should raise an error
with self.assertRaises(ValueError):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
num_frames=num_frames,
)
# Load without any arg should load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1200)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
"https://www.ilankelman.org/stopsigns/australia.jpg",
"https://www.ilankelman.org/stopsigns/australia.jpg",
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 192)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
def test_kwargs_overrides_custom_image_processor_kwargs(self):
processor = self.get_processor()
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 52)
inputs = processor(text=input_str, images=image_input, max_pixels=56 * 56 * 4, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 52)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=20,
)
| VideoLlama3ProcessorTest |
python | numba__numba | numba/tests/test_random.py | {
"start": 4627,
"end": 6940
} | class ____(BaseTest):
"""
Test low-level internals of the implementation.
"""
def _check_get_set_state(self, ptr):
state = _helperlib.rnd_get_state(ptr)
i, ints = state
self.assertIsInstance(i, int)
self.assertIsInstance(ints, list)
self.assertEqual(len(ints), N)
j = (i * 100007) % N
ints = [i * 3 for i in range(N)]
# Roundtrip
_helperlib.rnd_set_state(ptr, (j, ints))
self.assertEqual(_helperlib.rnd_get_state(ptr), (j, ints))
def _check_shuffle(self, ptr):
# We test shuffling against CPython
r = random.Random()
ints, index = _copy_py_state(r, ptr)
# Force shuffling in CPython generator
for i in range(index, N + 1, 2):
r.random()
_helperlib.rnd_shuffle(ptr)
# Check new integer keys
mt = r.getstate()[1]
ints, index = mt[:-1], mt[-1]
self.assertEqual(_helperlib.rnd_get_state(ptr)[1], list(ints))
def _check_init(self, ptr):
# We use the same integer seeding as Numpy
# (CPython is different: it treats the integer as a byte array)
r = np.random.RandomState()
for i in [0, 1, 125, 2**32 - 5]:
# Need to cast to a C-sized int (for Numpy <= 1.7)
r.seed(np.uint32(i))
st = r.get_state()
ints = list(st[1])
index = st[2]
assert index == N # sanity check
_helperlib.rnd_seed(ptr, i)
self.assertEqual(_helperlib.rnd_get_state(ptr), (index, ints))
def _check_perturb(self, ptr):
states = []
for i in range(10):
# Initialize with known state
_helperlib.rnd_seed(ptr, 0)
# Perturb with entropy
_helperlib.rnd_seed(ptr, os.urandom(512))
states.append(tuple(_helperlib.rnd_get_state(ptr)[1]))
# No two identical states
self.assertEqual(len(set(states)), len(states))
def test_get_set_state(self):
self._check_get_set_state(get_py_state_ptr())
def test_shuffle(self):
self._check_shuffle(get_py_state_ptr())
def test_init(self):
self._check_init(get_py_state_ptr())
def test_perturb(self):
self._check_perturb(get_py_state_ptr())
| TestInternals |
python | matplotlib__matplotlib | lib/matplotlib/cbook.py | {
"start": 655,
"end": 3464
} | class ____:
"""
A class to carry exception information around.
This is used to store and later raise exceptions. It's an alternative to
directly storing Exception instances that circumvents traceback-related
issues: caching tracebacks can keep user's objects in local namespaces
alive indefinitely, which can lead to very surprising memory issues for
users and result in incorrect tracebacks.
"""
def __init__(self, cls, *args, notes=None):
self._cls = cls
self._args = args
self._notes = notes if notes is not None else []
@classmethod
def from_exception(cls, exc):
return cls(type(exc), *exc.args, notes=getattr(exc, "__notes__", []))
def to_exception(self):
exc = self._cls(*self._args)
for note in self._notes:
exc.add_note(note)
return exc
def _get_running_interactive_framework():
"""
Return the interactive framework whose event loop is currently running, if
any, or "headless" if no event loop can be started, or None.
Returns
-------
Optional[str]
One of the following values: "qt", "gtk3", "gtk4", "wx", "tk",
"macosx", "headless", ``None``.
"""
# Use ``sys.modules.get(name)`` rather than ``name in sys.modules`` as
# entries can also have been explicitly set to None.
QtWidgets = (
sys.modules.get("PyQt6.QtWidgets")
or sys.modules.get("PySide6.QtWidgets")
or sys.modules.get("PyQt5.QtWidgets")
or sys.modules.get("PySide2.QtWidgets")
)
if QtWidgets and QtWidgets.QApplication.instance():
return "qt"
Gtk = sys.modules.get("gi.repository.Gtk")
if Gtk:
if Gtk.MAJOR_VERSION == 4:
from gi.repository import GLib
if GLib.main_depth():
return "gtk4"
if Gtk.MAJOR_VERSION == 3 and Gtk.main_level():
return "gtk3"
wx = sys.modules.get("wx")
if wx and wx.GetApp():
return "wx"
tkinter = sys.modules.get("tkinter")
if tkinter:
codes = {tkinter.mainloop.__code__, tkinter.Misc.mainloop.__code__}
for frame in sys._current_frames().values():
while frame:
if frame.f_code in codes:
return "tk"
frame = frame.f_back
# Preemptively break reference cycle between locals and the frame.
del frame
macosx = sys.modules.get("matplotlib.backends._macosx")
if macosx and macosx.event_loop_is_running():
return "macosx"
if not _c_internal_utils.display_is_valid():
return "headless"
return None
def _exception_printer(exc):
if _get_running_interactive_framework() in ["headless", None]:
raise exc
else:
traceback.print_exc()
| _ExceptionInfo |
python | openai__openai-python | src/openai/types/beta/assistant_stream_event.py | {
"start": 5820,
"end": 6051
} | class ____(BaseModel):
data: Message
"""
Represents a message within a
[thread](https://platform.openai.com/docs/api-reference/threads).
"""
event: Literal["thread.message.incomplete"]
| ThreadMessageIncomplete |
python | prabhupant__python-ds | data_structures/binary_trees/top_view.py | {
"start": 77,
"end": 909
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.col = None
def top_view(root):
if not root:
return
queue = []
col = 0
d = {}
queue.append(root)
root.col = col
while queue:
root = queue.pop(0)
col = root.col
if col not in d:
d[col] = root.val
if root.left:
queue.append(root.left)
root.left.col = col - 1
if root.right:
queue.append(root.right)
root.right.col = col + 1
for i in sorted(d):
print(d[i], end=" ")
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.right = Node(4)
root.left.right.right = Node(5)
root.left.right.right.right = Node(6)
top_view(root) | Node |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 14016,
"end": 15086
} | class ____(AbstractTemplate):
"""
Typing for the builtin function abs. Returns the same
type as input except for boolean values which are converted
to integer.
This follows the expected result from the builtin abs function
which differs from numpy - np.abs returns a bool whereas abs
itself performs the cast.
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
if isinstance(args[0].value_type, (StringView, UDFString)):
# reject string types
return
else:
return_type = self.context.resolve_function_type(
self.key, (args[0].value_type,), kws
).return_type
if return_type in types.signed_domain:
# promote to unsigned to avoid overflow
return_type = from_dtype(np.dtype("u" + return_type.name))
return nb_signature(MaskedType(return_type), args[0])
@cuda_decl_registry.register_global(api.pack_return)
| MaskedScalarAbsoluteValue |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/githubapp.py | {
"start": 1320,
"end": 25125
} | class ____(Service):
vcs_provider_slug = GITHUB_APP
allauth_provider = GitHubAppProvider
supports_build_status = True
supports_clone_token = True
supports_commenting = True
def __init__(self, installation: GitHubAppInstallation):
self.installation = installation
@cached_property
def gh_app_client(self):
return get_gh_app_client()
@lru_cache
def get_app_installation(self) -> GHInstallation:
"""
Return the installation object from the GitHub API.
Useful to interact with installation related endpoints.
If the installation is no longer accessible, this will raise a GithubException.
"""
return self.gh_app_client.get_app_installation(
self.installation.installation_id,
)
@cached_property
def installation_client(self) -> Github:
"""Return a client authenticated as the GitHub installation to interact with the GH API."""
return self.gh_app_client.get_github_for_installation(self.installation.installation_id)
@classmethod
def for_project(cls, project):
"""
Return a GitHubAppService for the installation linked to the project.
Since this service only works for projects that have a remote repository,
and are linked to a GitHub App installation,
this returns only one service or None.
"""
if not project.remote_repository or not project.remote_repository.github_app_installation:
return None
yield cls(project.remote_repository.github_app_installation)
@classmethod
def for_user(cls, user):
"""
Return a GitHubAppService for each installation accessible to the user.
In order to get the installations accessible to the user, we need to use
the GitHub API authenticated as the user, making use of the user's access token
(not the installation token).
See:
- https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-with-a-github-app-on-behalf-of-a-user
- https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/generating-a-user-access-token-for-a-github-app
- https://docs.github.com/en/rest/apps/installations?apiVersion=2022-11-28#list-app-installations-accessible-to-the-user-access-token
.. note::
If the installation wasn't in our database, we create it
(but we don't sync the repositories, since the caller should be responsible for that).
.. note::
User access tokens expire after 8 hours, but our OAuth2 client should handle refreshing the token.
But, the refresh token expires after 6 months, in order to refresh that token,
the user needs to sign in using GitHub again (just a normal sign in, not a re-authorization or sign-up).
"""
social_accounts = SocialAccount.objects.filter(
user=user,
provider=cls.allauth_provider.id,
)
for account in social_accounts:
oauth2_client = get_oauth2_client(account)
resp = oauth2_client.get("https://api.github.com/user/installations")
if resp.status_code != 200:
log.info(
"Failed to fetch installations from GitHub",
user=user,
account_id=account.uid,
status_code=resp.status_code,
response=resp.json(),
)
continue
for gh_installation in resp.json()["installations"]:
(
installation,
_,
) = GitHubAppInstallation.objects.get_or_create_installation(
installation_id=gh_installation["id"],
target_id=gh_installation["target_id"],
target_type=gh_installation["target_type"],
extra_data={"installation": gh_installation},
)
yield cls(installation)
@classmethod
def sync_user_access(cls, user):
"""
Sync the user's access to the provider repositories and organizations.
Since we are using a GitHub App, we don't have a way to check all the repositories and organizations
the user has access to or if it lost access to a repository or organization.
Our webhooks should keep permissions in sync, but just in case,
we first sync the repositories from all installations accessible to the user (refresh access to new repositories),
and then we sync each repository the user has access to (check if the user lost access to a repository, or his access level changed).
This method is called when the user logs in or when the user manually clicks on "Sync repositories".
"""
has_error = False
# Refresh access to all installations accessible to the user.
for service in cls.for_user(user):
try:
service.sync()
except SyncServiceError:
# Don't stop the sync if one installation fails,
# as we should try to sync all installations.
has_error = True
# Update the access to each repository the user has access to.
queryset = RemoteRepository.objects.filter(
remote_repository_relations__user=user,
vcs_provider=cls.vcs_provider_slug,
).select_related("github_app_installation")
# Group by github_app_installation, so we don't create multiple clients.
grouped_installations = groupby(
queryset,
key=lambda x: x.github_app_installation,
)
for installation, remote_repos in grouped_installations:
service = cls(installation)
service.update_or_create_repositories(
[int(remote_repo.remote_id) for remote_repo in remote_repos]
)
# Update access to each organization the user has access to.
queryset = RemoteOrganization.objects.filter(
remote_organization_relations__user=user,
vcs_provider=cls.vcs_provider_slug,
)
for remote_organization in queryset:
remote_repo = remote_organization.repositories.select_related(
"github_app_installation"
).first()
# NOTE: this should never happen, unless our data is out of sync
# (we delete orphaned organizations when deleting projects).
if not remote_repo:
log.info(
"Remote organization without repositories detected, deleting.",
organization_login=remote_organization.slug,
remote_id=remote_organization.remote_id,
)
remote_organization.delete()
continue
service = cls(remote_repo.github_app_installation)
service.update_or_create_organization(remote_organization.slug)
if has_error:
raise SyncServiceError()
def sync(self):
"""
Sync all repositories and organizations that are accessible to the installation.
Repositories that are no longer accessible to the installation are removed from the database
only if they are not linked to a project. This is in case the user wants to grant access to the repository again.
If a remote organization doesn't have any repositories after removing the repositories,
we remove the organization from the database.
"""
try:
app_installation = self.get_app_installation()
except GithubException as e:
log.info(
"Failed to get installation",
installation_id=self.installation.installation_id,
exc_info=True,
)
if e.status == 404:
# The app was uninstalled, we remove the installation from the database.
self.installation.delete()
raise SyncServiceError()
if app_installation.suspended_at is not None:
log.info(
"Installation is suspended",
installation_id=self.installation.installation_id,
suspended_at=app_installation.suspended_at,
)
# The installation is suspended, we don't have access to it anymore,
# so we just delete it from the database.
self.installation.delete()
raise SyncServiceError()
remote_repositories = []
for gh_repo in app_installation.get_repos():
remote_repo = self._create_or_update_repository_from_gh(gh_repo)
if remote_repo:
remote_repositories.append(remote_repo)
repos_to_delete = self.installation.repositories.exclude(
pk__in=[repo.pk for repo in remote_repositories],
).values_list("remote_id", flat=True)
self.installation.delete_repositories(repos_to_delete)
def update_repository(self, remote_repository: RemoteRepository):
"""
Update a single repository from the given remote repository.
.. note::
Unlike the other providers, this method doesn't update the
`remote_repository` object itself. If you need the updated object,
fetch it again from the database.
"""
self.update_or_create_repositories([remote_repository.remote_id])
def update_or_create_repositories(self, repository_ids: list[int]):
"""Update or create repositories from the given list of repository IDs."""
repositories_to_delete = []
for repository_id in repository_ids:
try:
# NOTE: we save the repository ID as a string in our database,
# in order for PyGithub to use the API to fetch the repository by ID (not by name).
# it needs to be an integer, so just in case we cast it to an integer.
repo = self.installation_client.get_repo(int(repository_id))
# GitHub will send some events from all repositories in the organization (like the members event),
# even from those that don't have the app installed. For private repositories, the previous API
# call will fail, but for public repositories we can still hit the API successfully, so we make
# an additional check using the GitHub App API, which will raise a GithubException with a 404
# status code if the app is not installed on the repository.
if not repo.private:
self.gh_app_client.get_repo_installation(owner=repo.owner.login, repo=repo.name)
except GithubException as e:
log.info(
"Failed to fetch repository from GitHub",
repository_id=repository_id,
exc_info=True,
)
# if we lost access to the repository,
# we remove the repository from the database,
# and clean up the collaborators and relations.
if e.status in [404, 403]:
repositories_to_delete.append(repository_id)
continue
self._create_or_update_repository_from_gh(repo)
if repositories_to_delete:
self.installation.delete_repositories(repositories_to_delete)
def _create_or_update_repository_from_gh(
self, gh_repo: GHRepository
) -> RemoteRepository | None:
"""
Create or update a remote repository from a GitHub repository object.
We also sync the collaborators of the repository with the database,
and create or update the organization of the repository.
"""
target_id = self.installation.target_id
target_type = self.installation.target_type
# NOTE: All the repositories should be owned by the installation account.
# The following condition should never happen, unless the previous assumption is wrong.
if gh_repo.owner.id != target_id or gh_repo.owner.type != target_type:
log.exception(
"Repository owner does not match the installation account",
repository_id=gh_repo.id,
repository_owner_id=gh_repo.owner.id,
installation_target_id=target_id,
installation_target_type=target_type,
)
return
remote_repo, _ = RemoteRepository.objects.get_or_create(
remote_id=str(gh_repo.id),
vcs_provider=self.vcs_provider_slug,
)
remote_repo.name = gh_repo.name
remote_repo.full_name = gh_repo.full_name
remote_repo.description = gh_repo.description
remote_repo.avatar_url = gh_repo.owner.avatar_url or self.default_user_avatar_url
remote_repo.ssh_url = gh_repo.ssh_url
remote_repo.html_url = gh_repo.html_url
remote_repo.private = gh_repo.private
remote_repo.default_branch = gh_repo.default_branch
remote_repo.clone_url = gh_repo.clone_url
# NOTE: Only one installation of our APP should give access to a repository.
# The following condition should only happen if our data is out of sync.
if (
remote_repo.github_app_installation
and remote_repo.github_app_installation != self.installation
):
log.info(
"Repository linked to another installation. Our data may be out of sync.",
repository_id=remote_repo.remote_id,
old_installation_id=remote_repo.github_app_installation.installation_id,
new_installation_id=self.installation.installation_id,
)
remote_repo.github_app_installation = self.installation
remote_repo.organization = None
if gh_repo.owner.type == GitHubAccountType.ORGANIZATION:
# NOTE: The owner object doesn't have all attributes of an organization,
# so we need to fetch the organization object.
remote_repo.organization = self.update_or_create_organization(gh_repo.owner.login)
remote_repo.save()
self._resync_collaborators(gh_repo, remote_repo)
return remote_repo
# NOTE: normally, this should cache only one organization at a time, but just in case...
@lru_cache(maxsize=50)
def _get_gh_organization(self, login: str) -> GHOrganization:
"""Get a GitHub organization object given its login identifier."""
return self.installation_client.get_organization(login)
# NOTE: normally, this should cache only one organization at a time, but just in case...
@lru_cache(maxsize=50)
def update_or_create_organization(self, login: str) -> RemoteOrganization:
"""
Create or update a remote organization from its login identifier.
We also sync the members of the organization with the database.
This doesn't sync the repositories of the organization,
since the installation is the one that lists the repositories it has access to.
This method is cached, since we need to update the organization only once per sync of an installation.
"""
gh_org = self._get_gh_organization(login)
remote_org, _ = RemoteOrganization.objects.get_or_create(
remote_id=str(gh_org.id),
vcs_provider=self.vcs_provider_slug,
)
remote_org.slug = gh_org.login
remote_org.name = gh_org.name
# NOTE: do we need the email of the organization?
remote_org.email = gh_org.email
remote_org.avatar_url = gh_org.avatar_url or self.default_org_avatar_url
remote_org.url = gh_org.html_url
remote_org.save()
self._resync_organization_members(gh_org, remote_org)
return remote_org
def _resync_collaborators(self, gh_repo: GHRepository, remote_repo: RemoteRepository):
"""
Sync collaborators of a repository with the database.
This method will remove collaborators that are no longer in the list.
See https://docs.github.com/en/rest/collaborators/collaborators?apiVersion=2022-11-28#list-repository-collaborators.
"""
collaborators = {
collaborator.id: collaborator for collaborator in gh_repo.get_collaborators()
}
remote_repo_relations_ids = []
for account in self._get_social_accounts(collaborators.keys()):
remote_repo_relation, _ = RemoteRepositoryRelation.objects.get_or_create(
remote_repository=remote_repo,
user=account.user,
account=account,
)
remote_repo_relation.admin = collaborators[int(account.uid)].permissions.admin
remote_repo_relation.save()
remote_repo_relations_ids.append(remote_repo_relation.pk)
# Remove collaborators that are no longer in the list.
RemoteRepositoryRelation.objects.filter(
remote_repository=remote_repo,
).exclude(
pk__in=remote_repo_relations_ids,
).delete()
def _get_social_accounts(self, ids):
"""Get social accounts given a list of GitHub user IDs."""
return SocialAccount.objects.filter(
uid__in=ids,
provider=self.allauth_provider.id,
).select_related("user")
def _resync_organization_members(self, gh_org: GHOrganization, remote_org: RemoteOrganization):
"""
Sync members of an organization with the database.
This method will remove members that are no longer in the list.
"""
members = {member.id: member for member in gh_org.get_members()}
remote_org_relations_ids = []
for account in self._get_social_accounts(members.keys()):
remote_org_relation, _ = RemoteOrganizationRelation.objects.get_or_create(
remote_organization=remote_org,
user=account.user,
account=account,
)
remote_org_relations_ids.append(remote_org_relation.pk)
# Remove members that are no longer in the list.
RemoteOrganizationRelation.objects.filter(
remote_organization=remote_org,
).exclude(
pk__in=remote_org_relations_ids,
).delete()
def send_build_status(self, *, build, commit, status):
"""
Create a commit status on GitHub for the given build.
See https://docs.github.com/en/rest/commits/statuses?apiVersion=2022-11-28#create-a-commit-status.
"""
project = build.project
remote_repo = project.remote_repository
if status == BUILD_STATUS_SUCCESS:
target_url = build.version.get_absolute_url()
else:
target_url = build.get_full_url()
state = SELECT_BUILD_STATUS[status]["github"]
description = SELECT_BUILD_STATUS[status]["description"]
context = f"{settings.RTD_BUILD_STATUS_API_NAME}:{project.slug}"
try:
# NOTE: we use the lazy option to avoid fetching the repository object,
# since we only need the object to interact with the commit status API.
gh_repo = self.installation_client.get_repo(int(remote_repo.remote_id), lazy=True)
gh_repo.get_commit(commit).create_status(
state=state,
target_url=target_url,
description=description,
context=context,
)
return True
except GithubException:
log.info(
"Failed to send build status to GitHub",
project=project.slug,
build=build.pk,
commit=commit,
status=status,
exc_info=True,
)
return False
def get_clone_token(self, project):
"""
Return a token for HTTP-based Git access to the repository.
The token is scoped to have read-only access to the content of the repository attached to the project.
The token expires after one hour (this is given by GitHub and can't be changed).
See:
- https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app-installation
- https://docs.github.com/en/rest/apps/apps?apiVersion=2022-11-28#create-an-installation-access-token-for-an-app
"""
try:
# TODO: Use self.gh_app_client.get_access_token instead,
# once https://github.com/PyGithub/PyGithub/pull/3287 is merged.
_, response = self.gh_app_client.requester.requestJsonAndCheck(
"POST",
f"/app/installations/{self.installation.installation_id}/access_tokens",
headers=self.gh_app_client._get_headers(),
input={
"repository_ids": [int(project.remote_repository.remote_id)],
"permissions": {
"contents": "read",
},
},
)
token = response["token"]
return f"x-access-token:{token}"
except GithubException:
log.info(
"Failed to get clone token for project",
installation_id=self.installation.installation_id,
project=project.slug,
exc_info=True,
)
return None
def setup_webhook(self, project, integration=None) -> bool:
"""When using a GitHub App, we don't need to set up a webhook."""
return True
def update_webhook(self, project, integration=None) -> bool:
"""When using a GitHub App, we don't need to set up a webhook."""
return True
def post_comment(self, build, comment: str, create_new: bool = True):
"""
Post a comment on the pull request attached to the build.
Since repositories can be linked to multiple projects, we post a comment per project.
We use an HTML comment to identify the comment for the project.
"""
project = build.project
version = build.version
if not version.is_external:
raise ValueError("Only versions from pull requests can have comments posted.")
remote_repo = project.remote_repository
# NOTE: we use the lazy option to avoid fetching the repository object,
# since we only need the object to interact with the commit status API.
gh_repo = self.installation_client.get_repo(int(remote_repo.remote_id), lazy=True)
gh_issue = gh_repo.get_issue(int(version.verbose_name))
existing_gh_comment = None
comment_marker = f"<!-- readthedocs-{project.pk} -->"
for gh_comment in gh_issue.get_comments():
# Get the comment where the author is us, and the comment belongs to the project.
# The login of the author is the name of the GitHub App, with the "[bot]" suffix.
if (
gh_comment.user.login == f"{settings.GITHUB_APP_NAME}[bot]"
and comment_marker in gh_comment.body
):
existing_gh_comment = gh_comment
break
comment = f"{comment_marker}\n{comment}"
if existing_gh_comment:
existing_gh_comment.edit(body=comment)
elif create_new:
gh_issue.create_comment(body=comment)
else:
log.debug(
"No comment to update, skipping commenting",
project=project.slug,
build=build.pk,
)
| GitHubAppService |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-presidio/llama_index/postprocessor/presidio/base.py | {
"start": 2133,
"end": 4238
} | class ____(BaseNodePostprocessor):
"""
presidio PII Node processor.
Uses a presidio to analyse PIIs.
"""
pii_node_info_key: str = "__pii_node_info__"
entity_mapping: Dict[str, Dict] = Field(default_factory=dict)
mapping: Dict[str, str] = Field(default_factory=dict)
presidio_entities: List = Field(default_factory=list)
@classmethod
def class_name(cls) -> str:
return "PresidioPIINodePostprocessor"
def mask_pii(self, text: str) -> Tuple[str, Dict]:
analyzer = AnalyzerEngine()
results = analyzer.analyze(
text=text, language="en", entities=self.presidio_entities
)
engine = AnonymizerEngine()
engine.add_anonymizer(EntityTypeCountAnonymizer)
new_text = engine.anonymize(
text=text,
analyzer_results=results,
operators={
"DEFAULT": OperatorConfig(
"EntityTypeCountAnonymizer",
{
"entity_mapping": self.entity_mapping,
"deanonymize_mapping": self.mapping,
},
)
},
)
return new_text.text
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text = self.mask_pii(node.get_content(metadata_mode=MetadataMode.LLM))
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = self.mapping
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
| PresidioPIINodePostprocessor |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 30868,
"end": 33478
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
base_channels = config.base_channels
in_channels = config.in_channels
double_latent = config.double_latent
latent_channels = config.latent_channels
channel_multiplier = config.channel_multiplier
out_channels = 2 * latent_channels if double_latent else latent_channels
block_in = base_channels * channel_multiplier[-1]
self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
self.down_block = Emu3VQVAEDownBlock(config)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = torch.nn.Conv2d(
block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
self.time_res_stack = nn.ModuleList()
for i in range(temporal_down_blocks):
conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
self.time_conv.append(conv)
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(
in_channels=out_channels,
out_channels=out_channels,
)
self.time_res_stack.append(time_res_conv)
def forward(self, pixel_values: torch.LongTensor):
temporal_dim = pixel_values.shape[1]
pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
# downsampling & middle
hidden_states = self.conv_in(pixel_values)
hidden_states = self.down_block(hidden_states)
hidden_states = self.middle_block(hidden_states)
# end
hidden_states = self.norm_out(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
# temporal convs
for conv in self.time_conv:
hidden_states = conv(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
for layer in self.time_res_stack:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
return hidden_states
| Emu3VQVAEEncoder |
python | Netflix__metaflow | test/core/tests/resume_ubf_foreach_join.py | {
"start": 72,
"end": 1743
} | class ____(MetaflowTest):
"""
Resuming from a foreach join should work.
Check that data changes in all downstream steps after resume.
"""
RESUME = True
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["start"])
def step_start(self):
self.data = "start"
self.after = False
@steps(0, ["parallel-split"], required=True)
def split(self):
self.my_node_index = None
@steps(0, ["parallel-step"], required=True)
def inner(self):
from metaflow import current
assert_equals(4, current.parallel.num_nodes)
self.my_node_index = current.parallel.node_index
assert_equals(self.my_node_index, self.input)
@steps(0, ["join"], required=True)
def join(self, inputs):
if is_resumed():
self.data = "resume"
got = sorted([inp.my_node_index for inp in inputs])
assert_equals(list(range(4)), got)
self.after = True
else:
self.data = "run"
raise ResumeFromHere()
@steps(2, ["all"])
def step_all(self):
if self.after:
assert_equals("resume", self.data)
else:
assert_equals("start", self.data)
def check_results(self, flow, checker):
from itertools import product
checker.assert_artifact("start", "data", "start")
checker.assert_artifact("end", "data", "resume")
| ResumeUBFJoinTest |
python | pola-rs__polars | pyo3-polars/example/derive_expression/expression_lib/expression_lib/extension.py | {
"start": 1666,
"end": 2149
} | class ____:
def __init__(self, expr: pl.Expr):
self._expr = expr
def __getattr__(self, attr: str) -> Callable[..., pl.Expr]:
if attr in ("change_time_zone", "is_leap_year"):
def func(*args: Any, **kwargs: Any) -> pl.Expr:
return getattr(date_util, attr)(self._expr, *args, **kwargs)
return func
raise AttributeError(f"{self.__class__} has no attribute {attr}")
@pl.api.register_expr_namespace("panic")
| DateUtil |
python | huggingface__transformers | src/transformers/models/smolvlm/image_processing_smolvlm.py | {
"start": 1997,
"end": 10203
} | class ____(ImagesKwargs, total=False):
"""
do_image_splitting (`bool`, *optional*, defaults to `True`):
Whether to split the image into sub-images concatenated with the original image. They are split into patches
such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`.
max_image_size (`Dict`, *optional*, defaults to `{"longest_edge": 364}`):
Maximum resolution of the patches of images accepted by the model. This is a dictionary containing the key "longest_edge".
return_row_col_info (`bool`, *optional*, defaults to `False`):
Whether to return the row and column information of the images.
"""
do_image_splitting: bool
max_image_size: dict[str, int]
return_row_col_info: bool
MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum
def _resize_output_size_rescale_to_max_len(
height: int, width: int, min_len: Optional[int] = 1, max_len: Optional[int] = None
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
min_len (`int`, *optional*, defaults to 1):
Minimum size of the output image.
max_len (`int`, *optional*, defaults to the maximum size of the image):
Maximum size of the output image.
Returns:
The output size of the image after resizing.
"""
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height:
width = max_len
height = int(width / aspect_ratio)
if height % 2 != 0:
height += 1
elif height > width:
height = max_len
width = int(height * aspect_ratio)
if width % 2 != 0:
width += 1
# Avoid resizing to a size smaller than min_len
height = max(height, min_len)
width = max(width, min_len)
return height, width
def _resize_output_size_scale_below_upper_bound(
height: int, width: int, max_len: Optional[dict[str, int]] = None
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
max_len (`dict[str, int]`, *optional*, defaults to the maximum size of the image):
Defines the maximum dimensions of the image.
Returns:
The output size of the image after resizing.
"""
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height and width > max_len:
width = max_len
height = int(width / aspect_ratio)
elif height > width and height > max_len:
height = max_len
width = int(height * aspect_ratio)
# Avoid resizing to a size smaller than 1
height = max(height, 1)
width = max(width, 1)
return height, width
def get_resize_output_image_size(
image,
resolution_max_side: int,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
image (`np.ndarray`):
Image to resize.
resolution_max_side (`int`):
The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the
input aspect ratio.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
The output size of the image after resizing.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
# Find the output size, when rescaling the longest edge to max_len and preserving the aspect ratio
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side)
# Find the output size when scaling the image to be below the MAX_IMAGE_SIZE
height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)
return height, width
def get_max_height_width(
images_list: list[list[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> list[int]:
"""
Get the maximum height and width across all images in a batch.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images_list[0][0], num_channels=(1, 3, 4))
max_height = max_width = float("-inf")
for images in images_list:
for image in images:
height, width = get_image_size(image, channel_dim=input_data_format)
max_height = max(height, max_height)
max_width = max(width, max_width)
return (max_height, max_width)
def make_pixel_mask(
image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask
def convert_to_rgb(
image: np.ndarray,
palette: Optional[PIL.ImagePalette.ImagePalette] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> ImageInput:
"""
Converts an image to RGB format.
Args:
image (`np.ndarray`):
The image to convert.
palette (list[int], *optional*):
The palette to use if given.
data_format (ChannelDimension or str, *optional*):
The channel dimension format for the output image. If not provided, it will be the same as the input image.
input_data_format (ChannelDimension or str, *optional*):
The channel dimension format of the input image.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))
# For all transformations, we want to keep the same data format as the input image unless otherwise specified.
# The resized image from PIL will always have channels last, so find the input format first.
data_format = input_data_format if data_format is None else data_format
mode = "P" if palette is not None else None
image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format)
if image.mode == "P" and palette is not None:
image.putpalette(palette)
image_rgba = image.convert("RGBA")
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, image_rgba)
alpha_composite = alpha_composite.convert("RGB")
output_array = np.array(alpha_composite)
# The image is always in channels last format after converting from a PIL image
output_array = to_channel_dimension_format(output_array, data_format, input_channel_dim=ChannelDimension.LAST)
return output_array
# FIXME Amy: make a more general crop function that isn't just centre crop
def _crop(
image: np.ndarray,
w1: int,
h1: int,
w2: int,
h2: int,
data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
if data_format is None:
data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4))
if data_format == ChannelDimension.FIRST:
image = image[:, h1:h2, w1:w2]
elif data_format == ChannelDimension.LAST:
image = image[h1:h2, w1:w2, :]
else:
raise ValueError("Invalid channel dimension format.")
return image
| SmolVLMImageProcessorKwargs |
python | facebookresearch__faiss | tests/test_io.py | {
"start": 7873,
"end": 9650
} | class ____(unittest.TestCase):
"""
test write_VectorTransform using IOWriter Pointer
and read_VectorTransform using file name
"""
def test_write_vector_transform(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFSpectralHash(quantizer, d, n, 8, 1.0)
index.train(x)
index.add(x)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
writer = faiss.FileIOWriter(fname)
faiss.write_VectorTransform(index.vt, writer)
del writer
vt = faiss.read_VectorTransform(fname)
assert vt.d_in == index.vt.d_in
assert vt.d_out == index.vt.d_out
assert vt.is_trained
finally:
if os.path.exists(fname):
os.unlink(fname)
"""
test write_VectorTransform using file name
and read_VectorTransform using IOWriter Pointer
"""
def test_read_vector_transform(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFSpectralHash(quantizer, d, n, 8, 1.0)
index.train(x)
index.add(x)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_VectorTransform(index.vt, fname)
reader = faiss.FileIOReader(fname)
vt = faiss.read_VectorTransform(reader)
del reader
assert vt.d_in == index.vt.d_in
assert vt.d_out == index.vt.d_out
assert vt.is_trained
finally:
if os.path.exists(fname):
os.unlink(fname)
| Test_IO_VectorTransform |
python | ray-project__ray | rllib/algorithms/dreamerv3/dreamerv3_rl_module.py | {
"start": 812,
"end": 2958
} | class ____(RLModule, abc.ABC):
@override(RLModule)
def setup(self):
super().setup()
# Gather model-relevant settings.
T = self.model_config["batch_length_T"]
symlog_obs = do_symlog_obs(
self.observation_space,
self.model_config.get("symlog_obs", "auto"),
)
model_size = self.model_config["model_size"]
# Build encoder and decoder from catalog.
self.encoder = self.catalog.build_encoder(framework=self.framework)
self.decoder = self.catalog.build_decoder(framework=self.framework)
# Build the world model (containing encoder and decoder).
self.world_model = WorldModel(
model_size=model_size,
observation_space=self.observation_space,
action_space=self.action_space,
batch_length_T=T,
encoder=self.encoder,
decoder=self.decoder,
symlog_obs=symlog_obs,
)
input_size = get_gru_units(model_size) + get_num_z_classes(
model_size
) * get_num_z_categoricals(model_size)
self.actor = ActorNetwork(
input_size=input_size,
action_space=self.action_space,
model_size=model_size,
)
self.critic = CriticNetwork(
input_size=input_size,
model_size=model_size,
)
# Build the final dreamer model (containing the world model).
self.dreamer_model = DreamerModel(
model_size=self.model_config["model_size"],
action_space=self.action_space,
world_model=self.world_model,
actor=self.actor,
critic=self.critic,
# horizon=horizon_H,
# gamma=gamma,
)
self.action_dist_cls = self.catalog.get_action_dist_cls(
framework=self.framework
)
# Initialize the critic EMA net:
self.critic.init_ema()
@override(RLModule)
def get_initial_state(self) -> Dict:
# Use `DreamerModel`'s `get_initial_state` method.
return self.dreamer_model.get_initial_state()
| DreamerV3RLModule |
python | coleifer__peewee | tests/regressions.py | {
"start": 46200,
"end": 46276
} | class ____(TestModel):
name = TextField()
mtype = ModelTypeField()
| MTF |
python | kamyu104__LeetCode-Solutions | Python/minimize-hamming-distance-after-swap-operations.py | {
"start": 29,
"end": 1278
} | class ____(object):
def minimumHammingDistance(self, source, target, allowedSwaps):
"""
:type source: List[int]
:type target: List[int]
:type allowedSwaps: List[List[int]]
:rtype: int
"""
def iter_flood_fill(adj, node, lookup, idxs):
stk = [node]
while stk:
node = stk.pop()
if node in lookup:
continue
lookup.add(node)
idxs.append(node)
for child in adj[node]:
stk.append(child)
adj = [set() for i in xrange(len(source))]
for i, j in allowedSwaps:
adj[i].add(j)
adj[j].add(i)
result = 0
lookup = set()
for i in xrange(len(source)):
if i in lookup:
continue
idxs = []
iter_flood_fill(adj, i, lookup, idxs)
source_cnt = collections.Counter([source[i] for i in idxs])
target_cnt = collections.Counter([target[i] for i in idxs])
diff = source_cnt-target_cnt
result += sum(diff.itervalues())
return result
# Time: O(n * α(n)) ~= O(n)
# Space: O(n)
import collections
| Solution |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 16442,
"end": 17198
} | class ____(Indexer):
"""Lifecycle methods for virtual package providers."""
def _create(self) -> "spack.provider_index.ProviderIndex":
return spack.provider_index.ProviderIndex(repository=self.repository)
def read(self, stream):
self.index = spack.provider_index.ProviderIndex.from_json(stream, self.repository)
def update(self, pkg_fullname):
name = pkg_fullname.split(".")[-1]
is_virtual = (
not self.repository.exists(name) or self.repository.get_pkg_class(name).virtual
)
if is_virtual:
return
self.index.remove_provider(pkg_fullname)
self.index.update(pkg_fullname)
def write(self, stream):
self.index.to_json(stream)
| ProviderIndexer |
python | apache__airflow | airflow-core/src/airflow/models/crypto.py | {
"start": 2128,
"end": 3792
} | class ____:
"""
A wrapper around the real Fernet to set is_encrypted to True.
This class is only used internally to avoid changing the interface of
the get_fernet function.
"""
from cryptography.fernet import Fernet, MultiFernet
is_encrypted = True
def __init__(self, fernet: MultiFernet):
self._fernet = fernet
def decrypt(self, msg: bytes | str, ttl: int | None = None) -> bytes:
"""Decrypt with Fernet."""
return self._fernet.decrypt(msg, ttl)
def encrypt(self, msg: bytes) -> bytes:
"""Encrypt with Fernet."""
return self._fernet.encrypt(msg)
def rotate(self, msg: bytes | str) -> bytes:
"""Rotate the Fernet key for the given message."""
return self._fernet.rotate(msg)
@cache
def get_fernet() -> FernetProtocol:
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
"""
from cryptography.fernet import Fernet, MultiFernet
try:
fernet_key = conf.get("core", "FERNET_KEY")
if not fernet_key:
log.warning("empty cryptography key - values will not be stored encrypted.")
return _NullFernet()
fernet = MultiFernet([Fernet(fernet_part.encode("utf-8")) for fernet_part in fernet_key.split(",")])
return _RealFernet(fernet)
except (ValueError, TypeError) as value_error:
raise AirflowException(f"Could not create Fernet object: {value_error}")
| _RealFernet |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 8565,
"end": 9586
} | class ____(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
"projects-versions-detail",
kwargs={
"parent_lookup_project__slug": obj.project.slug,
"version_slug": obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
"projects-versions-builds-list",
kwargs={
"parent_lookup_project__slug": obj.project.slug,
"parent_lookup_version__slug": obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
"projects-detail",
kwargs={
"project_slug": obj.project.slug,
},
)
return self._absolute_url(path)
| VersionLinksSerializer |
python | walkccc__LeetCode | solutions/2944. Minimum Number of Coins for Fruits/2944-2.py | {
"start": 0,
"end": 446
} | class ____:
def minimumCoins(self, prices: list[int]) -> int:
n = len(prices)
# Stores (dp[i], i), where dp[i] is the minimum number of coins to acquire
# fruits[i:] (0-indexed).
minHeap = [(0, n)]
ans = 0
for i in range(n - 1, -1, -1):
while minHeap and minHeap[0][1] > (i + 1) * 2:
heapq.heappop(minHeap)
ans = prices[i] + minHeap[0][0]
heapq.heappush(minHeap, (ans, i))
return ans
| Solution |
python | pandas-dev__pandas | pandas/core/indexes/base.py | {
"start": 7649,
"end": 270082
} | class ____(IndexOpsMixin, PandasObject):
"""
Immutable sequence used for indexing and alignment.
The basic object storing axis labels for all pandas objects.
.. versionchanged:: 2.0.0
Index can hold all numpy numeric dtypes (except float16). Previously only
int64/uint64/float64 dtypes were accepted.
Parameters
----------
data : array-like (1-dimensional)
An array-like structure containing the data for the index. This could be a
Python list, a NumPy array, or a pandas Series.
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Index. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
copy : bool, default False
Copy input data.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical Index.
IntervalIndex : An Index of :class:`Interval` s.
DatetimeIndex : Index of datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
Notes
-----
An Index instance can **only** contain hashable objects.
An Index instance *can not* hold numpy float16 dtype.
Examples
--------
>>> pd.Index([1, 2, 3])
Index([1, 2, 3], dtype='int64')
>>> pd.Index(list("abc"))
Index(['a', 'b', 'c'], dtype='str')
>>> pd.Index([1, 2, 3], dtype="uint8")
Index([1, 2, 3], dtype='uint8')
"""
# similar to __array_priority__, positions Index after Series and DataFrame
# but before ExtensionArray. Should NOT be overridden by subclasses.
__pandas_priority__ = 2000
# Cython methods; see github.com/cython/cython/issues/2647
# for why we need to wrap these instead of making them class attributes
# Moreover, cython will choose the appropriate-dtyped sub-function
# given the dtypes of the passed arguments
@final
def _left_indexer_unique(self, other: Self) -> npt.NDArray[np.intp]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
# similar but not identical to ov.searchsorted(sv)
return libjoin.left_join_indexer_unique(sv, ov)
@final
def _left_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
@final
def _inner_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
@final
def _outer_indexer(
self, other: Self
) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
# Caller is responsible for ensuring other.dtype == self.dtype
sv = self._get_join_target()
ov = other._get_join_target()
joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov)
joined = self._from_join_target(joined_ndarray)
return joined, lidx, ridx
_typ: str = "index"
_data: ExtensionArray | np.ndarray
_data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = (
np.ndarray,
ExtensionArray,
)
_id: object | None = None
_name: Hashable = None
# MultiIndex.levels previously allowed setting the index name. We
# don't allow this anymore, and raise if it happens rather than
# failing silently.
_no_setting_name: bool = False
_comparables: list[str] = ["name"]
_attributes: list[str] = ["name"]
@cache_readonly
def _can_hold_strings(self) -> bool:
return not is_numeric_dtype(self.dtype)
_engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = {
np.dtype(np.int8): libindex.Int8Engine,
np.dtype(np.int16): libindex.Int16Engine,
np.dtype(np.int32): libindex.Int32Engine,
np.dtype(np.int64): libindex.Int64Engine,
np.dtype(np.uint8): libindex.UInt8Engine,
np.dtype(np.uint16): libindex.UInt16Engine,
np.dtype(np.uint32): libindex.UInt32Engine,
np.dtype(np.uint64): libindex.UInt64Engine,
np.dtype(np.float32): libindex.Float32Engine,
np.dtype(np.float64): libindex.Float64Engine,
np.dtype(np.complex64): libindex.Complex64Engine,
np.dtype(np.complex128): libindex.Complex128Engine,
}
@property
def _engine_type(
self,
) -> type[libindex.IndexEngine | libindex.ExtensionEngine]:
return self._engine_types.get(self.dtype, libindex.ObjectEngine)
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
_supports_partial_string_indexing = False
_accessors = {"str"}
str = Accessor("str", StringMethods)
_references: BlockValuesRefs | None = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
dtype=None,
copy: bool = False,
name=None,
tupleize_cols: bool = True,
) -> Self:
from pandas.core.indexes.range import RangeIndex
name = maybe_extract_name(name, data, cls)
if dtype is not None:
dtype = pandas_dtype(dtype)
data_dtype = getattr(data, "dtype", None)
refs = None
if not copy and isinstance(data, (ABCSeries, Index)):
refs = data._references
# range
if isinstance(data, (range, RangeIndex)):
result = RangeIndex(start=data, copy=copy, name=name)
if dtype is not None:
return result.astype(dtype, copy=False)
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return result # type: ignore[return-value]
elif is_ea_or_datetimelike_dtype(dtype):
# non-EA dtype indexes have special casting logic, so we punt here
if isinstance(data, (set, frozenset)):
data = list(data)
elif is_ea_or_datetimelike_dtype(data_dtype):
pass
elif isinstance(data, (np.ndarray, ABCMultiIndex)):
if isinstance(data, ABCMultiIndex):
data = data._values
if data.dtype.kind not in "iufcbmM":
# GH#11836 we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
elif isinstance(data, (ABCSeries, Index)):
# GH 56244: Avoid potential inference on object types
pass
elif is_scalar(data):
raise cls._raise_scalar_data_error(data)
elif hasattr(data, "__array__"):
return cls(np.asarray(data), dtype=dtype, copy=copy, name=name)
elif not is_list_like(data) and not isinstance(data, memoryview):
# 2022-11-16 the memoryview check is only necessary on some CI
# builds, not clear why
raise cls._raise_scalar_data_error(data)
else:
if tupleize_cols:
# GH21470: convert iterable to list before determining if empty
if is_iterator(data):
data = list(data)
if data and all(isinstance(e, tuple) for e in data):
# we must be all tuples, otherwise don't construct
# 10697
from pandas.core.indexes.multi import MultiIndex
# error: Incompatible return value type (got "MultiIndex",
# expected "Self")
return MultiIndex.from_tuples( # type: ignore[return-value]
data, names=name
)
# other iterable of some kind
if not isinstance(data, (list, tuple)):
# we allow set/frozenset, which Series/sanitize_array does not, so
# cast to list here
data = list(data)
if len(data) == 0:
# unlike Series, we default to object dtype:
data = np.array(data, dtype=object)
if len(data) and isinstance(data[0], tuple):
# Ensure we get 1-D array of tuples instead of 2D array.
data = com.asarray_tuplesafe(data, dtype=_dtype_obj)
try:
arr = sanitize_array(data, None, dtype=dtype, copy=copy)
except ValueError as err:
if "index must be specified when data is not list-like" in str(err):
raise cls._raise_scalar_data_error(data) from err
if "Data must be 1-dimensional" in str(err):
raise ValueError("Index data must be 1-dimensional") from err
raise
arr = ensure_wrapped_if_datetimelike(arr)
klass = cls._dtype_to_subclass(arr.dtype)
arr = klass._ensure_array(arr, arr.dtype, copy=False)
return klass._simple_new(arr, name, refs=refs)
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
if data.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
elif dtype == np.float16:
# float16 not supported (no indexing engine)
raise NotImplementedError("float16 indexes are not supported")
if copy:
# asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
data = data.copy()
return data
@final
@classmethod
def _dtype_to_subclass(cls, dtype: DtypeObj):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
if isinstance(dtype, ExtensionDtype):
return dtype.index_class
if dtype.kind == "M":
from pandas import DatetimeIndex
return DatetimeIndex
elif dtype.kind == "m":
from pandas import TimedeltaIndex
return TimedeltaIndex
elif dtype.kind == "O":
# NB: assuming away MultiIndex
return Index
elif issubclass(dtype.type, str) or is_numeric_dtype(dtype):
return Index
raise NotImplementedError(dtype)
# NOTE for new Index creation:
# - _simple_new: It returns new Index with the same type as the caller.
# All metadata (such as name) must be provided by caller's responsibility.
# Using _shallow_copy is recommended because it fills these metadata
# otherwise specified.
# - _shallow_copy: It returns new Index with the same type (using
# _simple_new), but fills caller's metadata otherwise specified. Passed
# kwargs will overwrite corresponding metadata.
# See each method's docstring.
@classmethod
def _simple_new(
cls,
values: ArrayLike,
name: Hashable | None = None,
refs: BlockValuesRefs | None = None,
) -> Self:
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
result._reset_identity()
if refs is not None:
result._references = refs
else:
result._references = BlockValuesRefs()
result._references.add_index_reference(result)
return result
@classmethod
def _with_infer(cls, *args, **kwargs):
"""
Constructor that uses the 1.0.x behavior inferring numeric dtypes
for ndarray[object] inputs.
"""
result = cls(*args, **kwargs)
if result.dtype == _dtype_obj and not result._is_multi:
# error: Argument 1 to "maybe_convert_objects" has incompatible type
# "Union[ExtensionArray, ndarray[Any, Any]]"; expected
# "ndarray[Any, Any]"
values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type]
if values.dtype.kind in "iufb":
return Index(values, name=result.name)
return result
@cache_readonly
def _constructor(self) -> type[Self]:
return type(self)
@final
def _maybe_check_unique(self) -> None:
"""
Check that an Index has no duplicates.
This is typically only called via
`NDFrame.flags.allows_duplicate_labels.setter` when it's set to
True (duplicates aren't allowed).
Raises
------
DuplicateLabelError
When the index is not unique.
"""
if not self.is_unique:
msg = """Index has duplicates."""
duplicates = self._format_duplicate_message()
msg += f"\n{duplicates}"
raise DuplicateLabelError(msg)
@final
def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(["a", "b", "a"])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
"""
from pandas import Series
duplicates = self[self.duplicated(keep="first")].unique()
assert len(duplicates)
out = (
Series(np.arange(len(self)), copy=False)
.groupby(self, observed=False)
.agg(list)[duplicates]
)
if self._is_multi:
# test_format_duplicate_labels_message_multi
# error: "Type[Index]" has no attribute "from_tuples" [attr-defined]
out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]
if self.nlevels == 1:
out = out.rename_axis("label")
return out.to_frame(name="positions")
# --------------------------------------------------------------------
# Index Internals Methods
def _shallow_copy(self, values, name: Hashable = no_default) -> Self:
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self._name if name is no_default else name
return self._simple_new(values, name=name, refs=self._references)
def _view(self) -> Self:
"""
fastpath to make a shallow copy, i.e. new object with same data.
"""
result = self._simple_new(self._values, name=self._name, refs=self._references)
result._cache = self._cache
return result
@final
def _rename(self, name: Hashable) -> Self:
"""
fastpath for rename if new name is already validated.
"""
result = self._view()
result._name = name
return result
@final
def is_(self, other) -> bool:
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
Other object to compare against.
Returns
-------
bool
True if both have same underlying data, False otherwise.
See Also
--------
Index.identical : Works like ``Index.is_`` but also checks metadata.
Examples
--------
>>> idx1 = pd.Index(["1", "2", "3"])
>>> idx1.is_(idx1.view())
True
>>> idx1.is_(idx1.copy())
False
"""
if self is other:
return True
elif not hasattr(other, "_id"):
return False
elif self._id is None or other._id is None:
return False
else:
return self._id is other._id
@final
def _reset_identity(self) -> None:
"""
Initializes or resets ``_id`` attribute with new object.
"""
self._id = object()
@final
def _cleanup(self) -> None:
if "_engine" in self._cache:
self._engine.clear_mapping()
@cache_readonly
def _engine(
self,
) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine:
# For base class (object dtype) we get ObjectEngine
target_values = self._get_engine_target()
if isinstance(self._values, ArrowExtensionArray) and self.dtype.kind in "Mm":
import pyarrow as pa
pa_type = self._values._pa_array.type
if pa.types.is_timestamp(pa_type):
target_values = self._values._to_datetimearray()
return libindex.DatetimeEngine(target_values._ndarray)
elif pa.types.is_duration(pa_type):
target_values = self._values._to_timedeltaarray()
return libindex.TimedeltaEngine(target_values._ndarray)
if isinstance(target_values, ExtensionArray):
if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)):
try:
return _masked_engines[target_values.dtype.name](target_values)
except KeyError:
# Not supported yet e.g. decimal
pass
elif self._engine_type is libindex.ObjectEngine:
return libindex.ExtensionEngine(target_values)
target_values = cast(np.ndarray, target_values)
# to avoid a reference cycle, bind `target_values` to a local variable, so
# `self` is not passed into the lambda.
if target_values.dtype == bool:
return libindex.BoolEngine(target_values)
elif target_values.dtype == np.complex64:
return libindex.Complex64Engine(target_values)
elif target_values.dtype == np.complex128:
return libindex.Complex128Engine(target_values)
elif needs_i8_conversion(self.dtype):
# We need to keep M8/m8 dtype when initializing the Engine,
# but don't want to change _get_engine_target bc it is used
# elsewhere
# error: Item "ExtensionArray" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr]
target_values = self._data._ndarray # type: ignore[union-attr]
elif is_string_dtype(self.dtype) and not is_object_dtype(self.dtype):
return libindex.StringObjectEngine(target_values, self.dtype.na_value) # type: ignore[union-attr]
# error: Argument 1 to "ExtensionEngine" has incompatible type
# "ndarray[Any, Any]"; expected "ExtensionArray"
return self._engine_type(target_values) # type: ignore[arg-type]
@final
@cache_readonly
def _dir_additions_for_owner(self) -> set[str_t]:
"""
Add the string-like labels to the owner dataframe/series dir output.
If this is a MultiIndex, it's first level values are used.
"""
return {
c
for c in self.unique(level=0)[: get_option("display.max_dir_items")]
if isinstance(c, str) and c.isidentifier()
}
# --------------------------------------------------------------------
# Array-Like Methods
# ndarray compat
def __len__(self) -> int:
"""
Return the length of the Index.
"""
return len(self._data)
def __array__(self, dtype=None, copy=None) -> np.ndarray:
"""
The array interface, return my values.
"""
if copy is None:
# Note, that the if branch exists for NumPy 1.x support
return np.asarray(self._data, dtype=dtype)
return np.array(self._data, dtype=dtype, copy=copy)
def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs):
if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs):
return NotImplemented
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_dti_isub_tdi
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
new_inputs = [x if x is not self else x._values for x in inputs]
result = getattr(ufunc, method)(*new_inputs, **kwargs)
if ufunc.nout == 2:
# i.e. np.divmod, np.modf, np.frexp
return tuple(self.__array_wrap__(x) for x in result)
elif method == "reduce":
result = lib.item_from_zerodim(result)
return result
elif is_scalar(result):
# e.g. matmul
return result
if result.dtype == np.float16:
result = result.astype(np.float32)
return self.__array_wrap__(result)
@final
def __array_wrap__(self, result, context=None, return_scalar=False):
"""
Gets called after a ufunc and other functions e.g. np.split.
"""
result = lib.item_from_zerodim(result)
if np.ndim(result) > 1:
# Reached in plotting tests with e.g. np.nonzero(index)
return result
return Index(result, name=self.name)
@cache_readonly
def dtype(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
See Also
--------
Index.inferred_type: Return a string of the type inferred from the values.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.dtype
dtype('int64')
"""
return self._data.dtype
@final
def ravel(self, order: str_t = "C") -> Self:
"""
Return a view on self.
Parameters
----------
order : {'K', 'A', 'C', 'F'}, default 'C'
Specify the memory layout of the view. This parameter is not
implemented currently.
Returns
-------
Index
A view on self.
See Also
--------
numpy.ndarray.ravel : Return a flattened array.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=["a", "b", "c"])
>>> s.index.ravel()
Index(['a', 'b', 'c'], dtype='object')
"""
return self[:]
def view(self, cls=None):
"""
Return a view of the Index with the specified dtype or a new Index instance.
This method returns a view of the calling Index object if no arguments are
provided. If a dtype is specified through the `cls` argument, it attempts
to return a view of the Index with the specified dtype. Note that viewing
the Index as a different dtype reinterprets the underlying data, which can
lead to unexpected results for non-numeric or incompatible dtype conversions.
Parameters
----------
cls : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
Omitting it results in the view having the same data-type as `self`.
This argument can also be specified as an ndarray sub-class,
e.g., np.int64 or np.float32 which then specifies the type of
the returned object.
Returns
-------
Index or ndarray
A view of the Index. If `cls` is None, the returned object is an Index
view with the same dtype as the calling object. If a numeric `cls` is
specified an ndarray view with the new dtype is returned.
Raises
------
ValueError
If attempting to change to a dtype in a way that is not compatible with
the original dtype's memory layout, for example, viewing an 'int64' Index
as 'str'.
See Also
--------
Index.copy : Returns a copy of the Index.
numpy.ndarray.view : Returns a new view of array with the same data.
Examples
--------
>>> idx = pd.Index([-1, 0, 1])
>>> idx.view()
Index([-1, 0, 1], dtype='int64')
>>> idx.view(np.uint64)
array([18446744073709551615, 0, 1],
dtype=uint64)
Viewing as 'int32' or 'float32' reinterprets the memory, which may lead to
unexpected behavior:
>>> idx.view("float32")
array([ nan, nan, 0.e+00, 0.e+00, 1.e-45, 0.e+00], dtype=float32)
"""
# we need to see if we are subclassing an
# index type here
if cls is not None:
dtype = cls
if isinstance(cls, str):
dtype = pandas_dtype(cls)
if needs_i8_conversion(dtype):
idx_cls = self._dtype_to_subclass(dtype)
arr = self.array.view(dtype)
if isinstance(arr, ExtensionArray):
# here we exclude non-supported dt64/td64 dtypes
return idx_cls._simple_new(
arr, name=self.name, refs=self._references
)
return arr
result = self._data.view(cls)
else:
result = self._view()
if isinstance(result, Index):
result._id = self._id
return result
def astype(self, dtype: Dtype, copy: bool = True):
"""
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a TypeError exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
See Also
--------
Index.dtype: Return the dtype object of the underlying data.
Index.dtypes: Return the dtype object of the underlying data.
Index.convert_dtypes: Convert columns to the best possible dtypes.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.astype("float")
Index([1.0, 2.0, 3.0], dtype='float64')
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if self.dtype == dtype:
# Ensure that self.astype(self.dtype) is self
return self.copy() if copy else self
values = self._data
if isinstance(values, ExtensionArray):
with rewrite_exception(type(values).__name__, type(self).__name__):
new_values = values.astype(dtype, copy=copy)
elif isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
# Note: for RangeIndex and CategoricalDtype self vs self._values
# behaves differently here.
new_values = cls._from_sequence(self, dtype=dtype, copy=copy)
else:
# GH#13149 specifically use astype_array instead of astype
new_values = astype_array(values, dtype=dtype, copy=copy)
# pass copy=False because any copying will be done in the astype above
result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)
if (
not copy
and self._references is not None
and astype_is_view(self.dtype, dtype)
):
result._references = self._references
result._references.add_index_reference(result)
return result
_index_shared_docs["take"] = """
Return a new %(klass)s of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : {0 or 'index'}, optional
The axis over which to select values, always 0 or 'index'.
allow_fill : bool, default True
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
**kwargs
Required for compatibility with numpy.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.take([2, 2, 1, 2])
Index(['c', 'c', 'b', 'c'], dtype='str')
"""
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> Self:
"""
Return a new Index of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : {0 or 'index'}, optional
The axis over which to select values, always 0 or 'index'.
allow_fill : bool, default True
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any
other negative values raise a ``ValueError``.
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
**kwargs
Required for compatibility with numpy.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.take([2, 2, 1, 2])
Index(['c', 'c', 'b', 'c'], dtype='str')
"""
if kwargs:
nv.validate_take((), kwargs)
if is_scalar(indices):
raise TypeError("Expected indices to be array-like")
indices = ensure_platform_int(indices)
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)):
return self.copy()
# Note: we discard fill_value and use self._na_value, only relevant
# in the case where allow_fill is True and fill_value is not None
values = self._values
if isinstance(values, np.ndarray):
taken = algos.take(
values, indices, allow_fill=allow_fill, fill_value=self._na_value
)
else:
# algos.take passes 'axis' keyword which not all EAs accept
taken = values.take(
indices, allow_fill=allow_fill, fill_value=self._na_value
)
return self._constructor._simple_new(taken, name=self.name)
@final
def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:
"""
We only use pandas-style take when allow_fill is True _and_
fill_value is not None.
"""
if allow_fill and fill_value is not None:
# only fill if we are passing a non-None fill_value
if self._can_hold_na:
if (indices < -1).any():
raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
else:
cls_name = type(self).__name__
raise ValueError(
f"Unable to fill values because {cls_name} cannot contain NA"
)
else:
allow_fill = False
return allow_fill
def repeat(self, repeats, axis: None = None) -> Self:
"""
Repeat elements of an Index.
Returns a new Index where each element of the current Index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Index.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.repeat(2)
Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')
>>> idx.repeat([1, 2, 3])
Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')
"""
repeats = ensure_platform_int(repeats)
nv.validate_repeat((), {"axis": axis})
res_values = self._values.repeat(repeats)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
# --------------------------------------------------------------------
# Copying Methods
def copy(
self,
name: Hashable | None = None,
deep: bool = False,
) -> Self:
"""
Make a copy of this object.
Name is set on the new object.
Parameters
----------
name : Label, optional
Set name for new object.
deep : bool, default False
If True attempts to make a deep copy of the Index.
Else makes a shallow copy.
Returns
-------
Index
Index refer to new object which is a copy of this object.
See Also
--------
Index.delete: Make new Index with passed location(-s) deleted.
Index.drop: Make new Index with passed list of labels deleted.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> new_idx = idx.copy()
>>> idx is new_idx
False
"""
name = self._validate_names(name=name, deep=deep)[0]
if deep:
new_data = self._data.copy()
new_index = type(self)._simple_new(new_data, name=name)
else:
new_index = self._rename(name=name)
return new_index
@final
def __copy__(self) -> Self:
return self.copy(deep=False)
@final
def __deepcopy__(self, memo=None) -> Self:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
# --------------------------------------------------------------------
# Rendering Methods
@final
def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
attrs_str = [f"{k}={v}" for k, v in attrs]
prepr = ", ".join(attrs_str)
return f"{klass_name}({data}{prepr})"
@property
def _formatter_func(self):
"""
Return the formatter function.
"""
return default_pprint
@final
def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = True
if self.inferred_type == "string":
is_justify = False
elif isinstance(self.dtype, CategoricalDtype):
self = cast("CategoricalIndex", self)
if is_string_dtype(self.categories.dtype):
is_justify = False
elif isinstance(self, ABCRangeIndex):
# We will do the relevant formatting via attrs
return ""
return format_object_summary(
self,
self._formatter_func,
is_justify=is_justify,
name=name,
line_break_each_value=self._is_multi,
)
def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]:
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs: list[tuple[str_t, str_t | int | bool | None]] = []
if not self._is_multi:
attrs.append(("dtype", f"'{self.dtype}'"))
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
elif self._is_multi and any(x is not None for x in self.names):
attrs.append(("names", default_pprint(self.names)))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
@final
def _get_level_names(self) -> range | Sequence[Hashable]:
"""
Return a name or list of names with None replaced by the level number.
"""
if self._is_multi:
return maybe_sequence_to_range(
[
level if name is None else name
for level, name in enumerate(self.names)
]
)
else:
return range(1) if self.name is None else [self.name]
@final
def _mpl_repr(self) -> np.ndarray:
# how to represent ourselves to matplotlib
if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M":
return cast(np.ndarray, self.values)
return self.astype(object, copy=False)._values
_default_na_rep = "NaN"
@final
def _format_flat(
self,
*,
include_name: bool,
formatter: Callable | None = None,
) -> list[str_t]:
"""
Render a string representation of the Index.
"""
header = []
if include_name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header=header, na_rep=self._default_na_rep)
def _format_with_header(self, *, header: list[str_t], na_rep: str_t) -> list[str_t]:
from pandas.io.formats.format import format_array
values = self._values
if (
is_object_dtype(values.dtype)
or is_string_dtype(values.dtype)
or isinstance(self.dtype, (IntervalDtype, CategoricalDtype))
):
# TODO: why do we need different justify for these cases?
justify = "all"
else:
justify = "left"
# passing leading_space=False breaks test_format_missing,
# test_index_repr_in_frame_with_nan, but would otherwise make
# trim_front unnecessary
formatted = format_array(values, None, justify=justify)
result = trim_front(formatted)
return header + result
def _get_values_for_csv(
self,
*,
na_rep: str_t = "",
decimal: str_t = ".",
float_format=None,
date_format=None,
quoting=None,
) -> npt.NDArray[np.object_]:
return get_values_for_csv(
self._values,
na_rep=na_rep,
decimal=decimal,
float_format=float_format,
date_format=date_format,
quoting=quoting,
)
def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if hasattr(head, "format") and not isinstance(head, str):
head = head.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
head = self._formatter_func(head).replace("'", "")
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
elif needs_i8_conversion(self.dtype):
# e.g. Timedelta, display as values, not quoted
tail = self._formatter_func(tail).replace("'", "")
index_summary = f", {head} to {tail}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
return f"{name}: {len(self)} entries{index_summary}"
# --------------------------------------------------------------------
# Conversion Methods
def to_flat_index(self) -> Self:
"""
Identity method.
This is implemented for compatibility with subclass implementations
when chaining.
Returns
-------
pd.Index
Caller.
See Also
--------
MultiIndex.to_flat_index : Subclass implementation.
"""
return self
@final
def to_series(self, index=None, name: Hashable | None = None) -> Series:
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
See Also
--------
Index.to_frame : Convert an Index to a DataFrame.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
By default, the original index and original name is reused.
>>> idx.to_series()
animal
Ant Ant
Bear Bear
Cow Cow
Name: animal, dtype: object
To enforce a new index, specify new labels to ``index``:
>>> idx.to_series(index=[0, 1, 2])
0 Ant
1 Bear
2 Cow
Name: animal, dtype: object
To override the name of the resulting column, specify ``name``:
>>> idx.to_series(name="zoo")
animal
Ant Ant
Bear Bear
Cow Cow
Name: zoo, dtype: object
"""
from pandas import Series
if index is None:
index = self._view()
if name is None:
name = self.name
return Series(self._values.copy(), index=index, name=name)
def to_frame(
self, index: bool = True, name: Hashable = lib.no_default
) -> DataFrame:
"""
Create a DataFrame with a column containing the Index.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, defaults to index.name
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(index=False, name="zoo")
zoo
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
if name is lib.no_default:
result_name = self._get_level_names()
else:
result_name = Index([name]) # type: ignore[assignment]
result = DataFrame(self, copy=False)
result.columns = result_name
if index:
result.index = self
return result
# --------------------------------------------------------------------
# Name-Centric Methods
@property
def name(self) -> Hashable:
"""
Return Index or MultiIndex name.
Returns
-------
label (hashable object)
The name of the Index.
See Also
--------
Index.set_names: Able to set new names partially and by level.
Index.rename: Able to set new names partially and by level.
Series.name: Corresponding Series property.
Examples
--------
>>> idx = pd.Index([1, 2, 3], name="x")
>>> idx
Index([1, 2, 3], dtype='int64', name='x')
>>> idx.name
'x'
"""
return self._name
@name.setter
def name(self, value: Hashable) -> None:
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
"Cannot set name on a level of a MultiIndex. Use "
"'MultiIndex.set_names' instead."
)
maybe_extract_name(value, None, type(self))
self._name = value
@final
def _validate_names(
self, name=None, names=None, deep: bool = False
) -> list[Hashable]:
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if names is None and name is None:
new_names = deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
new_names = names
elif not is_list_like(name):
new_names = [name]
else:
new_names = name
if len(new_names) != len(self.names):
raise ValueError(
f"Length of new names must be {len(self.names)}, got {len(new_names)}"
)
# All items in 'new_names' need to be hashable
validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name")
return new_names
def _get_default_index_names(
self, names: Hashable | Sequence[Hashable] | None = None, default=None
) -> list[Hashable]:
"""
Get names of index.
Parameters
----------
names : int, str or 1-dimensional list, default None
Index names to set.
default : str
Default name of index.
Raises
------
TypeError
if names not str or list-like
"""
from pandas.core.indexes.multi import MultiIndex
if names is not None:
if isinstance(names, (int, str)):
names = [names]
if not isinstance(names, list) and names is not None:
raise ValueError("Index names must be str or 1-dimensional list")
if not names:
if isinstance(self, MultiIndex):
names = com.fill_missing_names(self.names)
else:
names = [default] if self.name is None else [self.name]
return names
def _get_names(self) -> FrozenList:
"""
Get names on index.
This method returns a FrozenList containing the names of the object.
It's primarily intended for internal use.
Returns
-------
FrozenList
A FrozenList containing the object's names, contains None if the object
does not have a name.
See Also
--------
Index.name : Index name as a string, or None for MultiIndex.
Examples
--------
>>> idx = pd.Index([1, 2, 3], name="x")
>>> idx.names
FrozenList(['x'])
>>> idx = pd.Index([1, 2, 3], name=("x", "y"))
>>> idx.names
FrozenList([('x', 'y')])
If the index does not have a name set:
>>> idx = pd.Index([1, 2, 3])
>>> idx.names
FrozenList([None])
"""
return FrozenList((self.name,))
def _set_names(self, values, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError(f"Length of new names must be 1, got {len(values)}")
# GH 20527
# All items in 'name' need to be hashable:
validate_all_hashable(*values, error_name=f"{type(self).__name__}.name")
self._name = values[0]
names = property(fset=_set_names, fget=_get_names)
@overload
def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self: ...
@overload
def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ...
@overload
def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None: ...
def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : Hashable or a sequence of the previous or dict-like for MultiIndex
Name(s) to set.
level : int, Hashable or a sequence of the previous, optional
If the index is a MultiIndex and names is not dict-like, level(s) to set
(None for all levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names("quarter")
Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx = idx.set_names(["kind", "year"])
>>> idx.set_names("species", level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
>>> idx.set_names({"kind": "snake"})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if is_dict_like(names) and not isinstance(self, ABCMultiIndex):
raise TypeError("Can only pass dict-like as `names` for MultiIndex.")
if is_dict_like(names) and level is not None:
raise TypeError("Can not pass level for dictlike `names`.")
if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:
# Transform dict to list of new names and corresponding levels
level, names_adjusted = [], []
for i, name in enumerate(self.names):
if name in names.keys():
level.append(i)
names_adjusted.append(names[name])
names = names_adjusted
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._view()
idx._set_names(names, level=level)
if not inplace:
return idx
return None
@overload
def rename(self, name, *, inplace: Literal[False] = ...) -> Self: ...
@overload
def rename(self, name, *, inplace: Literal[True]) -> None: ...
def rename(self, name, *, inplace: bool = False) -> Self | None:
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : Hashable or a sequence of the previous
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(["A", "C", "A", "B"], name="score")
>>> idx.rename("grade")
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product(
... [["python", "cobra"], [2018, 2019]], names=["kind", "year"]
... )
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(["species", "year"])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename("species")
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
return self.set_names([name], inplace=inplace)
# --------------------------------------------------------------------
# Level-Centric Methods
@property
def nlevels(self) -> int:
"""
Number of levels.
"""
return 1
def _sort_levels_monotonic(self) -> Self:
"""
Compat with MultiIndex.
"""
return self
@final
def _validate_index_level(self, level) -> None:
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level, "
f"{level} is not a valid level number"
)
if level > 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
elif level != self.name:
raise KeyError(
f"Requested level ({level}) does not match index name ({self.name})"
)
def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
def sortlevel(
self,
level=None,
ascending: bool | list[bool] = True,
sort_remaining=None,
na_position: NaPosition = "first",
) -> tuple[Self, np.ndarray]:
"""
For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
na_position : {'first' or 'last'}, default 'first'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 2.1.0
level, sort_remaining are compat parameters
Returns
-------
Index
"""
if not isinstance(ascending, (list, bool)):
raise TypeError(
"ascending must be a single bool value or"
"a list of bool values of length 1"
)
if isinstance(ascending, list):
if len(ascending) != 1:
raise TypeError("ascending must be a list of bool values of length 1")
ascending = ascending[0]
if not isinstance(ascending, bool):
raise TypeError("ascending must be a bool value")
return self.sort_values(
return_indexer=True, ascending=ascending, na_position=na_position
)
def _get_level_values(self, level) -> Index:
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list("abc"))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
@final
def droplevel(self, level: IndexLabel = 0):
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex. The original index is not modified inplace.
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Returns an Index or MultiIndex object, depending on the resulting index
after removing the requested level(s).
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=["x", "y", "z"]
... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel("z")
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel(["x", "y"])
Index([5, 6], dtype='int64', name='z')
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted((self._get_level_number(lev) for lev in level), reverse=True)
return self._drop_level_numbers(levnums)
@final
def _drop_level_numbers(self, levnums: list[int]):
"""
Drop MultiIndex levels by level _number_, not name.
"""
if not levnums and not isinstance(self, ABCMultiIndex):
return self
if len(levnums) >= self.nlevels:
raise ValueError(
f"Cannot remove {len(levnums)} levels from an index with "
f"{self.nlevels} levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
self = cast("MultiIndex", self)
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_codes.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
lev = new_levels[0]
if len(lev) == 0:
# If lev is empty, lev.take will fail GH#42055
if len(new_codes[0]) == 0:
# GH#45230 preserve RangeIndex here
# see test_reset_index_empty_rangeindex
result = lev[:0]
else:
res_values = algos.take(lev._values, new_codes[0], allow_fill=True)
# _constructor instead of type(lev) for RangeIndex compat GH#35230
result = lev._constructor._simple_new(res_values, name=new_names[0])
else:
# set nan if needed
mask = new_codes[0] == -1
result = new_levels[0].take(new_codes[0])
if mask.any():
result = result.putmask(mask, np.nan)
result._name = new_names[0]
return result
else:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=new_names,
verify_integrity=False,
)
# --------------------------------------------------------------------
# Introspection Methods
@cache_readonly
@final
def _can_hold_na(self) -> bool:
if isinstance(self.dtype, ExtensionDtype):
return self.dtype._can_hold_na
if self.dtype.kind in "iub":
return False
return True
@property
def is_monotonic_increasing(self) -> bool:
"""
Return a boolean if the values are equal or increasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_decreasing : Check if the values are equal or decreasing.
Examples
--------
>>> pd.Index([1, 2, 3]).is_monotonic_increasing
True
>>> pd.Index([1, 2, 2]).is_monotonic_increasing
True
>>> pd.Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return a boolean if the values are equal or decreasing.
Returns
-------
bool
See Also
--------
Index.is_monotonic_increasing : Check if the values are equal or increasing.
Examples
--------
>>> pd.Index([3, 2, 1]).is_monotonic_decreasing
True
>>> pd.Index([3, 2, 2]).is_monotonic_decreasing
True
>>> pd.Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@final
@property
def _is_strictly_monotonic_increasing(self) -> bool:
"""
Return if the index is strictly monotonic increasing
(only increasing) values.
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@final
@property
def _is_strictly_monotonic_decreasing(self) -> bool:
"""
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
@cache_readonly
def is_unique(self) -> bool:
"""
Return if the index has unique values.
Returns
-------
bool
See Also
--------
Index.has_duplicates : Inverse method that checks if it has duplicate values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.is_unique
False
>>> idx = pd.Index([1, 5, 7])
>>> idx.is_unique
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
... "category"
... )
>>> idx.is_unique
False
>>> idx = pd.Index(["Orange", "Apple", "Watermelon"]).astype("category")
>>> idx.is_unique
True
"""
return self._engine.is_unique
@final
@property
def has_duplicates(self) -> bool:
"""
Check if the Index has duplicate values.
Returns
-------
bool
Whether or not the Index has duplicate values.
See Also
--------
Index.is_unique : Inverse method that checks if it has unique values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = pd.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = pd.Index(["Watermelon", "Orange", "Apple", "Watermelon"]).astype(
... "category"
... )
>>> idx.has_duplicates
True
>>> idx = pd.Index(["Orange", "Apple", "Watermelon"]).astype("category")
>>> idx.has_duplicates
False
"""
return not self.is_unique
@cache_readonly
def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
See Also
--------
Index.dtype : Return the dtype object of the underlying data.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.inferred_type
'integer'
"""
return lib.infer_dtype(self._values, skipna=False)
@cache_readonly
@final
def _is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
if needs_i8_conversion(self.dtype):
return True
elif self.dtype != _dtype_obj:
# TODO(ExtensionIndex): 3rd party EA might override?
# Note: this includes IntervalIndex, even when the left/right
# contain datetime-like objects.
return False
elif self._is_multi:
return False
return is_datetime_array(ensure_object(self._values))
@final
@cache_readonly
def _is_multi(self) -> bool:
"""
Cached check equivalent to isinstance(self, MultiIndex)
"""
return isinstance(self, ABCMultiIndex)
# --------------------------------------------------------------------
# Pickle Methods
def __reduce__(self):
d = {"data": self._data, "name": self.name}
return _new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Null Handling Methods
@cache_readonly
def _na_value(self):
"""The expected NA value to use with this index."""
dtype = self.dtype
if isinstance(dtype, np.dtype):
if dtype.kind in "mM":
return NaT
return np.nan
return dtype.na_value
@cache_readonly
def _isnan(self) -> npt.NDArray[np.bool_]:
"""
Return if each value is NaN.
"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def hasnans(self) -> bool:
"""
Return True if there are any NaNs.
Enables various performance speedups.
Returns
-------
bool
See Also
--------
Index.isna : Detect missing values.
Index.dropna : Return Index without NA/NaN values.
Index.fillna : Fill NA/NaN values with the specified value.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=["a", "b", None])
>>> s
a 1
b 2
None 3
dtype: int64
>>> s.index.hasnans
True
"""
if self._can_hold_na:
return bool(self._isnan.any())
else:
return False
@final
def isna(self) -> npt.NDArray[np.bool_]:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values.
Returns
-------
numpy.ndarray[bool]
A boolean array of whether my values are NA.
See Also
--------
Index.notna : Boolean inverse of isna.
Index.dropna : Omit entries with missing values.
isna : Top-level isna.
Series.isna : Detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.nan])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True])
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(["black", "", "red", None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True])
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex(
... [pd.Timestamp("1940-04-25"), pd.Timestamp(""), None, pd.NaT]
... )
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[s]', freq=None)
>>> idx.isna()
array([False, True, True, True])
"""
return self._isnan
isnull = isna
@final
def notna(self) -> npt.NDArray[np.bool_]:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values.
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
Returns
-------
numpy.ndarray[bool]
Boolean array to indicate which entries are not NA.
See Also
--------
Index.notnull : Alias of notna.
Index.isna: Inverse of notna.
notna : Top-level notna.
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.nan])
>>> idx
Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(["black", "", "red", None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def fillna(self, value):
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
NA/NaN values replaced with `value`.
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
Examples
--------
>>> idx = pd.Index([np.nan, np.nan, 3])
>>> idx.fillna(0)
Index([0.0, 0.0, 3.0], dtype='float64')
"""
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
if self.hasnans:
result = self.putmask(self._isnan, value)
# no need to care metadata other than name
# because it can't have freq if it has NaTs
# _with_infer needed for test_fillna_categorical
return Index._with_infer(result, name=self.name)
return self._view()
def dropna(self, how: AnyAll = "any") -> Self:
"""
Return Index without NA/NaN values.
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
Index
Returns an Index object after removing NA/NaN values.
See Also
--------
Index.fillna : Fill NA/NaN values with the specified value.
Index.isna : Detect missing values.
Examples
--------
>>> idx = pd.Index([1, np.nan, 3])
>>> idx.dropna()
Index([1.0, 3.0], dtype='float64')
"""
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
if self.hasnans:
res_values = self._values[~self._isnan]
return type(self)._simple_new(res_values, name=self.name)
return self._view()
# --------------------------------------------------------------------
# Uniqueness Methods
def unique(self, level: Hashable | None = None) -> Self:
"""
Return unique values in the index.
Unique values are returned in order of appearance, this does NOT sort.
Parameters
----------
level : int or hashable, optional
Only return values from specified level (for MultiIndex).
If int, gets the level by integer position, else by level name.
Returns
-------
Index
Unique values in the index.
See Also
--------
unique : Numpy array of unique values in that column.
Series.unique : Return unique values of Series object.
Examples
--------
>>> idx = pd.Index([1, 1, 2, 3, 3])
>>> idx.unique()
Index([1, 2, 3], dtype='int64')
"""
if level is not None:
self._validate_index_level(level)
if self.is_unique:
return self._view()
result = super().unique()
return self._shallow_copy(result)
def drop_duplicates(self, *, keep: DropKeep = "first") -> Self:
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
Index
A new Index object with the duplicate values removed.
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Index.duplicated : Related method on Index, indicating duplicate
Index values.
Examples
--------
Generate a pandas.Index with duplicate values.
>>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama", "hippo"])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep="first")
Index(['llama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep="last")
Index(['cow', 'beetle', 'llama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
if self.is_unique:
return self._view()
return super().drop_duplicates(keep=keep)
def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]:
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
np.ndarray[bool]
A numpy array of boolean values indicating duplicate index values.
See Also
--------
Series.duplicated : Equivalent method on pandas.Series.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama"])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep="first")
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep="last")
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
if self.is_unique:
# fastpath available bc we are immutable
return np.zeros(len(self), dtype=bool)
return self._duplicated(keep=keep)
# --------------------------------------------------------------------
# Arithmetic & Logical Methods
def __iadd__(self, other):
# alias for __add__
return self + other
@final
def __bool__(self) -> NoReturn:
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
# --------------------------------------------------------------------
# Set Operation Methods
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name is not name:
return self.rename(name)
return self
@final
def _validate_sort_keyword(self, sort) -> None:
if sort not in [None, False, True]:
raise ValueError(
"The 'sort' keyword only takes the values of "
f"None, True, or False; {sort} was passed."
)
@final
def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]:
"""
With mismatched timezones, cast both to UTC.
"""
# Caller is responsible for checking
# `self.dtype != other.dtype`
if (
isinstance(self, ABCDatetimeIndex)
and isinstance(other, ABCDatetimeIndex)
and self.tz is not None
and other.tz is not None
):
# GH#39328, GH#45357, GH#60080
# If both timezones are the same, no need to convert to UTC
if self.tz == other.tz:
return self, other
else:
left = self.tz_convert("UTC")
right = other.tz_convert("UTC")
return left, right
return self, other
@final
def union(self, other, sort: bool | None = None):
"""
Form the union of two Index objects.
If the Index objects are incompatible, both Index objects will be
cast to dtype('object') first.
Parameters
----------
other : Index or array-like
Index or an array-like object containing elements to form the union
with the original Index.
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object with all unique elements from both the original
Index and the `other` Index.
See Also
--------
Index.unique : Return unique values in the index.
Index.intersection : Form the intersection of two Index objects.
Index.difference : Return a new Index with elements of index not in `other`.
Examples
--------
Union matching dtypes
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Index([1, 2, 3, 4, 5, 6], dtype='int64')
Union mismatched dtypes
>>> idx1 = pd.Index(["a", "b", "c", "d"])
>>> idx2 = pd.Index([1, 2, 3, 4])
>>> idx1.union(idx2)
Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')
MultiIndex case
>>> idx1 = pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if self.dtype != other.dtype:
if (
isinstance(self, ABCMultiIndex)
and not is_object_dtype(_unpack_nested_dtype(other))
and len(other) > 0
):
raise NotImplementedError(
"Can only union MultiIndex with MultiIndex or Index of tuples, "
"try mi.to_flat_index().union(other) instead."
)
self, other = self._dti_setop_align_tzs(other, "union")
dtype = self._find_common_type_compat(other)
left = self.astype(dtype, copy=False)
right = other.astype(dtype, copy=False)
return left.union(right, sort=sort)
elif not len(other) or self.equals(other):
# NB: whether this (and the `if not len(self)` check below) come before
# or after the dtype equality check above affects the returned dtype
result = self._get_reconciled_name_object(other)
if sort is True:
return result.sort_values()
return result
elif not len(self):
result = other._get_reconciled_name_object(self)
if sort is True:
return result.sort_values()
return result
result = self._union(other, sort=sort)
return self._wrap_setop_result(other, result)
def _union(self, other: Index, sort: bool | None):
"""
Specific union logic should go here. In subclasses, union behavior
should be overwritten here rather than in `self.union`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* True : sort the result
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
Index
"""
lvals = self._values
rvals = other._values
if (
sort in (None, True)
and (self.is_unique or other.is_unique)
and self._can_use_libjoin
and other._can_use_libjoin
):
# Both are monotonic and at least one is unique, so can use outer join
# (actually don't need either unique, but without this restriction
# test_union_same_value_duplicated_in_both fails)
try:
return self._outer_indexer(other)[0]
except TypeError:
# incomparable objects; should only be for object dtype
value_list = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
value_list.extend(x for x in rvals if x not in value_set)
# If objects are unorderable, we must have object dtype.
return np.array(value_list, dtype=object)
elif not other.is_unique:
# other has duplicates
result_dups = algos.union_with_duplicates(self, other)
return _maybe_try_sort(result_dups, sort)
# The rest of this method is analogous to Index._intersection_via_get_indexer
# Self may have duplicates; other already checked as unique
# find indexes of things in "other" that are not in "self"
if self._index_as_unique:
indexer = self.get_indexer(other)
missing = (indexer == -1).nonzero()[0]
else:
missing = algos.unique1d(self.get_indexer_non_unique(other)[1])
result: Index | MultiIndex | ArrayLike
if self._is_multi:
# Preserve MultiIndex to avoid losing dtypes
result = self.append(other.take(missing))
else:
if len(missing) > 0:
other_diff = rvals.take(missing)
result = concat_compat((lvals, other_diff))
else:
result = lvals
if not self.is_monotonic_increasing or not other.is_monotonic_increasing:
# if both are monotonic then result should already be sorted
result = _maybe_try_sort(result, sort)
return result
@final
def _wrap_setop_result(self, other: Index, result) -> Index:
name = get_op_result_name(self, other)
if isinstance(result, Index):
if result.name != name:
result = result.rename(name)
else:
result = self._shallow_copy(result, name=name)
return result
@final
def intersection(self, other, sort: bool = False):
# default sort keyword is different here from other setops intentionally
# done in GH#25063
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
An Index or an array-like object containing elements to form the
intersection with the original Index.
sort : True, False or None, default False
Whether to sort the resulting index.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
* False : do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object with elements common to both the original Index
and the `other` Index.
See Also
--------
Index.union : Form the union of two Index objects.
Index.difference : Return a new Index with elements of index not in other.
Index.isin : Return a boolean array where the index values are in values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Index([3, 4], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if self.dtype != other.dtype:
self, other = self._dti_setop_align_tzs(other, "intersection")
if self.equals(other):
if not self.is_unique:
result = self.unique()._get_reconciled_name_object(other)
else:
result = self._get_reconciled_name_object(other)
if sort is True:
result = result.sort_values()
return result
if len(self) == 0 or len(other) == 0:
# fastpath; we need to be careful about having commutativity
if self._is_multi or other._is_multi:
# _convert_can_do_setop ensures that we have both or neither
# We retain self.levels
return self[:0].rename(result_name)
dtype = self._find_common_type_compat(other)
if self.dtype == dtype:
# Slicing allows us to retain DTI/TDI.freq, RangeIndex
# Note: self[:0] vs other[:0] affects
# 1) which index's `freq` we get in DTI/TDI cases
# This may be a historical artifact, i.e. no documented
# reason for this choice.
# 2) The `step` we get in RangeIndex cases
if len(self) == 0:
return self[:0].rename(result_name)
else:
return other[:0].rename(result_name)
return Index([], dtype=dtype, name=result_name)
elif not self._should_compare(other):
# We can infer that the intersection is empty.
if isinstance(self, ABCMultiIndex):
return self[:0].rename(result_name)
return Index([], name=result_name)
elif self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.intersection(other, sort=sort)
result = self._intersection(other, sort=sort)
return self._wrap_intersection_result(other, result)
def _intersection(self, other: Index, sort: bool = False):
"""
intersection specialized to the case with matching dtypes.
"""
if self._can_use_libjoin and other._can_use_libjoin:
try:
res_indexer, indexer, _ = self._inner_indexer(other)
except TypeError:
# non-comparable; should only be for object dtype
pass
else:
# TODO: algos.unique1d should preserve DTA/TDA
if is_numeric_dtype(self.dtype):
# This is faster, because Index.unique() checks for uniqueness
# before calculating the unique values.
res = algos.unique1d(res_indexer)
else:
result = self.take(indexer)
res = result.drop_duplicates() # type: ignore[assignment]
return ensure_wrapped_if_datetimelike(res)
res_values = self._intersection_via_get_indexer(other, sort=sort)
res_values = _maybe_try_sort(res_values, sort)
return res_values
def _wrap_intersection_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
@final
def _intersection_via_get_indexer(
self, other: Index | MultiIndex, sort
) -> ArrayLike | MultiIndex:
"""
Find the intersection of two Indexes using get_indexer.
Returns
-------
np.ndarray or ExtensionArray or MultiIndex
The returned array will be unique.
"""
left_unique = self.unique()
right_unique = other.unique()
# even though we are unique, we need get_indexer_for for IntervalIndex
indexer = left_unique.get_indexer_for(right_unique)
mask = indexer != -1
taker = indexer.take(mask.nonzero()[0])
if sort is False:
# sort bc we want the elements in the same order they are in self
# unnecessary in the case with sort=None bc we will sort later
taker = np.sort(taker)
result: MultiIndex | ExtensionArray | np.ndarray
if isinstance(left_unique, ABCMultiIndex):
result = left_unique.take(taker)
else:
result = left_unique.take(taker)._values
return result
@final
def difference(self, other, sort: bool | None = None):
"""
Return a new Index with elements of index not in `other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
Index object or an array-like object containing elements to be compared
with the elements of the original Index.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that are in the original
Index but not in the `other` Index.
See Also
--------
Index.symmetric_difference : Compute the symmetric difference of two Index
objects.
Index.intersection : Form the intersection of two Index objects.
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Index([2, 1], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
# Note: we do NOT call _dti_setop_align_tzs here, as there
# is no requirement that .difference be commutative, so it does
# not cast to object.
if self.equals(other):
# Note: we do not (yet) sort even if sort=None GH#24959
return self[:0].rename(result_name)
if len(other) == 0:
# Note: we do not (yet) sort even if sort=None GH#24959
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
if not self._should_compare(other):
# Nothing matches -> difference is everything
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
result = self._difference(other, sort=sort)
return self._wrap_difference_result(other, result)
def _difference(self, other, sort):
# overridden by RangeIndex
this = self
if isinstance(self, ABCCategoricalIndex) and self.hasnans and other.hasnans:
this = this.dropna()
other = other.unique()
the_diff = this[other.get_indexer_for(this) == -1]
the_diff = the_diff if this.is_unique else the_diff.unique()
the_diff = _maybe_try_sort(the_diff, sort)
return the_diff
def _wrap_difference_result(self, other, result):
# We will override for MultiIndex to handle empty results
return self._wrap_setop_result(other, result)
def symmetric_difference(
self,
other,
result_name: abc.Hashable | None = None,
sort: bool | None = None,
):
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
Index or an array-like object with elements to compute the symmetric
difference with the original Index.
result_name : str
A string representing the name of the resulting Index, if desired.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that appear in either the
original Index or the `other` Index, but not both.
See Also
--------
Index.difference : Return a new Index with elements of index not in other.
Index.union : Form the union of two Index objects.
Index.intersection : Form the intersection of two Index objects.
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Index([1, 5], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
if self.dtype != other.dtype:
self, other = self._dti_setop_align_tzs(other, "symmetric_difference")
if not self._should_compare(other):
return self.union(other, sort=sort).rename(result_name)
elif self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
that = other.astype(dtype, copy=False)
return this.symmetric_difference(that, sort=sort).rename(result_name)
this = self.unique()
other = other.unique()
indexer = this.get_indexer_for(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(
np.arange(this.size), common_indexer, assume_unique=True
)
left_diff = this.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.take(right_indexer)
res_values = left_diff.append(right_diff)
result = _maybe_try_sort(res_values, sort)
if not self._is_multi:
return Index(result, name=result_name, dtype=res_values.dtype)
else:
left_diff = cast("MultiIndex", left_diff)
if len(result) == 0:
# result might be an Index, if other was an Index
return left_diff.remove_unused_levels().set_names(result_name)
return result.set_names(result_name)
@final
def _assert_can_do_setop(self, other) -> bool:
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
return True
def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = get_op_result_name(self, other)
return other, result_name
# --------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
The key to check its location if it is present in the index.
Returns
-------
int if unique index, slice if monotonic index, else mask
Integer location, slice or boolean mask.
See Also
--------
Index.get_slice_bound : Calculate slice bound that corresponds to
given label.
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> unique_index = pd.Index(list("abc"))
>>> unique_index.get_loc("b")
1
>>> monotonic_index = pd.Index(list("abbc"))
>>> monotonic_index.get_loc("b")
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list("abcb"))
>>> non_monotonic_index.get_loc("b")
array([False, True, False, True])
"""
casted_key = self._maybe_cast_indexer(key)
try:
return self._engine.get_loc(casted_key)
except KeyError as err:
if isinstance(casted_key, slice) or (
isinstance(casted_key, abc.Iterable)
and any(isinstance(x, slice) for x in casted_key)
):
raise InvalidIndexError(key) from err
raise KeyError(key) from err
except TypeError:
# If we have a listlike key, _check_indexing_error will raise
# InvalidIndexError. Otherwise we fall through and re-raise
# the TypeError.
self._check_indexing_error(key)
raise
@final
def get_indexer(
self,
target,
method: ReindexMethod | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
"""
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
See Also
--------
Index.get_indexer_for : Returns an indexer even when non-unique.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Notes
-----
Returns -1 for unmatched values, for further explanation see the
example below.
Examples
--------
>>> index = pd.Index(["c", "a", "b"])
>>> index.get_indexer(["a", "b", "x"])
array([ 1, 2, -1])
Notice that the return value is an array of locations in ``index``
and ``x`` is marked by -1, as it is not in ``index``.
"""
method = clean_reindex_fill_method(method)
orig_target = target
target = self._maybe_cast_listlike_indexer(target)
self._check_indexing_method(method, limit, tolerance)
if not self._index_as_unique:
raise InvalidIndexError(self._requires_unique_msg)
if len(target) == 0:
return np.array([], dtype=np.intp)
if not self._should_compare(target) and not self._should_partial_index(target):
# IntervalIndex get special treatment bc numeric scalars can be
# matched to Interval scalars
return self._get_indexer_non_comparable(target, method=method, unique=True)
if isinstance(self.dtype, CategoricalDtype):
# _maybe_cast_listlike_indexer ensures target has our dtype
# (could improve perf by doing _should_compare check earlier?)
assert self.dtype == target.dtype
indexer = self._engine.get_indexer(target.codes)
if self.hasnans and target.hasnans:
# After _maybe_cast_listlike_indexer, target elements which do not
# belong to some category are changed to NaNs
# Mask to track actual NaN values compared to inserted NaN values
# GH#45361
target_nans = isna(orig_target)
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[target_nans] = loc
indexer[mask & ~target_nans] = -1
return indexer
if isinstance(target.dtype, CategoricalDtype):
# potential fastpath
# get an indexer for unique categories then propagate to codes via take_nd
# get_indexer instead of _get_indexer needed for MultiIndex cases
# e.g. test_append_different_columns_types
categories_indexer = self.get_indexer(target.categories)
indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1)
if (not self._is_multi and self.hasnans) and target.hasnans:
# Exclude MultiIndex because hasnans raises NotImplementedError
# we should only get here if we are unique, so loc is an integer
# GH#41934
loc = self.get_loc(np.nan)
mask = target.isna()
indexer[mask] = loc
return ensure_platform_int(indexer)
pself, ptarget = self._maybe_downcast_for_indexing(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(
ptarget, method=method, limit=limit, tolerance=tolerance
)
if self.dtype == target.dtype and self.equals(target):
# Only call equals if we have same dtype to avoid inference/casting
return np.arange(len(target), dtype=np.intp)
if self.dtype != target.dtype and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
target = target.astype(dtype, copy=False)
return this._get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
return self._get_indexer(target, method, limit, tolerance)
def _get_indexer(
self,
target: Index,
method: str_t | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if method in ["pad", "backfill"]:
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == "nearest":
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if target._is_multi and self._is_multi:
engine = self._engine
# error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]"
# has no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes( # type: ignore[union-attr]
target
)
else:
tgt_values = target._get_engine_target()
indexer = self._engine.get_indexer(tgt_values)
return ensure_platform_int(indexer)
@final
def _should_partial_index(self, target: Index) -> bool:
"""
Should we attempt partial-matching indexing?
"""
if isinstance(self.dtype, IntervalDtype):
if isinstance(target.dtype, IntervalDtype):
return False
# "Index" has no attribute "left"
return self.left._should_compare(target) # type: ignore[attr-defined]
return False
@final
def _check_indexing_method(
self,
method: str_t | None,
limit: int | None = None,
tolerance=None,
) -> None:
"""
Raise if we have a get_indexer `method` that is not supported or valid.
"""
if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]:
# in practice the clean_reindex_fill_method call would raise
# before we get here
raise ValueError("Invalid fill method") # pragma: no cover
if self._is_multi:
if method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
if method in ("pad", "backfill"):
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)):
# GH#37871 for now this is only for IntervalIndex and CategoricalIndex
if method is not None:
raise NotImplementedError(
f"method {method} not yet implemented for {type(self).__name__}"
)
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if doing pad, "
"backfill or nearest reindexing"
)
if limit is not None:
raise ValueError(
"limit argument only valid if doing pad, "
"backfill or nearest reindexing"
)
def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray:
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype "
f"{self.dtype} must contain numeric elements if it is list type"
)
raise ValueError(
f"tolerance argument for {type(self).__name__} with dtype {self.dtype} "
f"must be numeric if it is a scalar: {tolerance!r}"
)
return tolerance
@final
def _get_fill_indexer(
self, target: Index, method: str_t, limit: int | None = None, tolerance=None
) -> npt.NDArray[np.intp]:
if self._is_multi:
if not (self.is_monotonic_increasing or self.is_monotonic_decreasing):
raise ValueError("index must be monotonic increasing or decreasing")
encoded = self.append(target)._engine.values # type: ignore[union-attr]
self_encoded = Index(encoded[: len(self)])
target_encoded = Index(encoded[len(self) :])
return self_encoded._get_fill_indexer(
target_encoded, method, limit, tolerance
)
if self.is_monotonic_increasing and target.is_monotonic_increasing:
target_values = target._get_engine_target()
own_values = self._get_engine_target()
if not isinstance(target_values, np.ndarray) or not isinstance(
own_values, np.ndarray
):
raise NotImplementedError
if method == "pad":
indexer = libalgos.pad(own_values, target_values, limit=limit)
else:
# i.e. "backfill"
indexer = libalgos.backfill(own_values, target_values, limit=limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None and len(self):
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
@final
def _get_fill_indexer_searchsorted(
self, target: Index, method: str_t, limit: int | None = None
) -> npt.NDArray[np.intp]:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError(
f"limit argument for {method!r} method only well-defined "
"if index and target are monotonic"
)
side: Literal["left", "right"] = "left" if method == "pad" else "right"
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = indexer == -1
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == "left":
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
@final
def _get_nearest_indexer(
self, target: Index, limit: int | None, tolerance
) -> npt.NDArray[np.intp]:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
if not len(self):
return self._get_fill_indexer(target, "pad")
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
left_distances = self._difference_compat(target, left_indexer)
right_distances = self._difference_compat(target, right_indexer)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
# error: Argument 1&2 has incompatible type "Union[ExtensionArray,
# ndarray[Any, Any]]"; expected "Union[SupportsDunderLE,
# SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]"
op(left_distances, right_distances) # type: ignore[arg-type]
| (right_indexer == -1),
left_indexer,
right_indexer,
)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer, tolerance)
return indexer
@final
def _filter_indexer_tolerance(
self,
target: Index,
indexer: npt.NDArray[np.intp],
tolerance,
) -> npt.NDArray[np.intp]:
distance = self._difference_compat(target, indexer)
return np.where(distance <= tolerance, indexer, -1)
@final
def _difference_compat(
self, target: Index, indexer: npt.NDArray[np.intp]
) -> ArrayLike:
# Compatibility for PeriodArray, for which __sub__ returns an ndarray[object]
# of DateOffset objects, which do not support __abs__ (and would be slow
# if they did)
if isinstance(self.dtype, PeriodDtype):
# Note: we only get here with matching dtypes
own_values = cast("PeriodArray", self._data)._ndarray
target_values = cast("PeriodArray", target._data)._ndarray
diff = own_values[indexer] - target_values
else:
# error: Unsupported left operand type for - ("ExtensionArray")
diff = self._values[indexer] - target._values # type: ignore[operator]
return abs(diff)
# --------------------------------------------------------------------
# Indexer Conversion Methods
@final
def _validate_positional_slice(self, key: slice) -> None:
"""
For positional indexing, a slice must have either int or None
for each of start, stop, and step.
"""
self._validate_indexer("positional", key.start, "iloc")
self._validate_indexer("positional", key.stop, "iloc")
self._validate_indexer("positional", key.step, "iloc")
def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]):
"""
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
"""
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
is_index_slice = is_valid_positional_slice(key)
# TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able
# to simplify this.
if kind == "getitem":
# called from the getitem slicers, validate that we are in fact integers
if is_index_slice:
# In this case the _validate_indexer checks below are redundant
return key
elif self.dtype.kind in "iu":
# Note: these checks are redundant if we know is_index_slice
self._validate_indexer("slice", key.start, "getitem")
self._validate_indexer("slice", key.stop, "getitem")
self._validate_indexer("slice", key.step, "getitem")
return key
# convert the slice to an indexer here; checking that the user didn't
# pass a positional slice to loc
is_positional = is_index_slice and self._should_fallback_to_positional
# if we are mixed and have integers
if is_positional:
try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
except KeyError:
pass
if com.is_null_slice(key):
# It doesn't matter if we are positional or label based
indexer = key
elif is_positional:
if kind == "loc":
# GH#16121, GH#24612, GH#31810
raise TypeError(
"Slicing a positional slice with .loc is not allowed, "
"Use .loc with labels or .iloc with positions instead.",
)
indexer = key
else:
indexer = self.slice_indexer(start, stop, step)
return indexer
@final
def _raise_invalid_indexer(
self,
form: Literal["slice", "positional"],
key,
reraise: lib.NoDefault | None | Exception = lib.no_default,
) -> None:
"""
Raise consistent invalid indexer message.
"""
msg = (
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
if reraise is not lib.no_default:
raise TypeError(msg) from reraise
raise TypeError(msg)
# --------------------------------------------------------------------
# Reindex Methods
@final
def _validate_can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer ndarray
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self._index_as_unique and len(indexer):
raise ValueError("cannot reindex on an axis with duplicate labels")
def reindex(
self,
target,
method: ReindexMethod | None = None,
level=None,
limit: int | None = None,
tolerance: float | None = None,
) -> tuple[Index, npt.NDArray[np.intp] | None]:
"""
Create index with target's values.
Parameters
----------
target : an iterable
An iterable containing the values to be used for creating the new index.
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
level : int, optional
Level of multiindex.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : int, float, or list-like, optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
Raises
------
TypeError
If ``method`` passed along with ``level``.
ValueError
If non-unique multi-index
ValueError
If non-unique index and ``method`` or ``limit`` passed.
See Also
--------
Series.reindex : Conform Series to new index with optional filling logic.
DataFrame.reindex : Conform DataFrame to new index with optional filling logic.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.reindex(["car", "bike"])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "name")
# GH7774: preserve dtype/tz if target is empty and not an Index.
if is_iterator(target):
target = list(target)
if not isinstance(target, Index) and len(target) == 0:
if level is not None and self._is_multi:
# "Index" has no attribute "levels"; maybe "nlevels"?
idx = self.levels[level] # type: ignore[attr-defined]
else:
idx = self
target = idx[:0]
else:
target = ensure_index(target)
if level is not None and (
isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)
):
if method is not None:
raise TypeError("Fill method not supported if level passed")
# TODO: tests where passing `keep_order=not self._is_multi`
# makes a difference for non-MultiIndex case
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=not self._is_multi
)
else:
if self.equals(target):
indexer = None
else:
if self._index_as_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
elif self._is_multi:
raise ValueError("cannot handle a non-unique multi-index!")
elif not self.is_unique:
# GH#42568
raise ValueError("cannot reindex on an axis with duplicate labels")
else:
indexer, _ = self.get_indexer_non_unique(target)
target = self._wrap_reindex_result(target, indexer, preserve_names)
return target, indexer
def _wrap_reindex_result(self, target, indexer, preserve_names: bool):
target = self._maybe_preserve_names(target, preserve_names)
return target
def _maybe_preserve_names(self, target: IndexT, preserve_names: bool) -> IndexT:
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy(deep=False)
target.name = self.name
return target
@final
def _reindex_non_unique(
self, target: Index
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]:
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp]
Indices of output values in original index.
new_indexer : np.ndarray[np.intp] or None
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels: Index | np.ndarray = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer), dtype=np.intp)
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = length[~check]
cur_labels = self.take(indexer[check]).values
cur_indexer = length[check]
# Index constructor below will do inference
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# GH#38906
if not len(self):
new_indexer = np.arange(0, dtype=np.intp)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer), dtype=np.intp)
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp)
new_indexer[~check] = -1
if not isinstance(self, ABCMultiIndex):
new_index = Index(new_labels, name=self.name)
else:
new_index = type(self).from_tuples(new_labels, names=self.names)
return new_index, indexer, new_indexer
# --------------------------------------------------------------------
# Join Methods
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[True],
sort: bool = ...,
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ...
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: Literal[False] = ...,
sort: bool = ...,
) -> Index: ...
@overload
def join(
self,
other: Index,
*,
how: JoinHow = ...,
level: Level = ...,
return_indexers: bool = ...,
sort: bool = ...,
) -> (
Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]
): ...
@final
@_maybe_return_indexers
def join(
self,
other: Index,
*,
how: JoinHow = "left",
level: Level | None = None,
return_indexers: bool = False,
sort: bool = False,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
other : Index
The other index on which join is performed.
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
It is either the integer position or the name of the level.
return_indexers : bool, default False
Whether to return the indexers or not for both the index objects.
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
The new index.
See Also
--------
DataFrame.join : Join columns with `other` DataFrame either on index
or on a key.
DataFrame.merge : Merge DataFrame or named Series objects with a
database-style join.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([4, 5, 6])
>>> idx1.join(idx2, how="outer")
Index([1, 2, 3, 4, 5, 6], dtype='int64')
>>> idx1.join(other=idx2, how="outer", return_indexers=True)
(Index([1, 2, 3, 4, 5, 6], dtype='int64'),
array([ 0, 1, 2, -1, -1, -1]), array([-1, -1, -1, 0, 1, 2]))
"""
if not isinstance(other, Index):
warnings.warn(
f"Passing {type(other).__name__} to {type(self).__name__}.join "
"is deprecated and will raise in a future version. "
"Pass an Index instead.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
other = ensure_index(other)
sort = sort or how == "outer"
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (self.tz is None) ^ (other.tz is None):
# Raise instead of casting to object below.
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not self._is_multi and not other._is_multi:
# We have specific handling for MultiIndex below
pself, pother = self._maybe_downcast_for_indexing(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=True, sort=sort
)
# try to figure out the join level
# GH3662
if level is None and (self._is_multi or other._is_multi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how)
# join on the level
if level is not None and (self._is_multi or other._is_multi):
return self._join_level(other, level, how=how)
if len(self) == 0 or len(other) == 0:
try:
return self._join_empty(other, how, sort)
except TypeError:
# object dtype; non-comparable objects
pass
if self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.join(other, how=how, return_indexers=True)
elif (
isinstance(self, ABCCategoricalIndex)
and isinstance(other, ABCCategoricalIndex)
and not self.ordered
and not self.categories.equals(other.categories)
):
# dtypes are "equal" but categories are in different order
other = Index(other._values.reorder_categories(self.categories))
_validate_join_method(how)
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and other._can_use_libjoin
and (self.is_unique or other.is_unique)
):
try:
return self._join_monotonic(other, how=how)
except TypeError:
# object dtype; non-comparable objects
pass
elif not self.is_unique or not other.is_unique:
return self._join_non_unique(other, how=how, sort=sort)
return self._join_via_get_indexer(other, how, sort)
def _join_empty(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
assert len(self) == 0 or len(other) == 0
_validate_join_method(how)
lidx: np.ndarray | None
ridx: np.ndarray | None
if len(other):
how = cast(JoinHow, {"left": "right", "right": "left"}.get(how, how))
join_index, ridx, lidx = other._join_empty(self, how, sort)
elif how in ["left", "outer"]:
if sort and not self.is_monotonic_increasing:
lidx = self.argsort()
join_index = self.take(lidx)
else:
lidx = None
join_index = self._view()
ridx = np.broadcast_to(np.intp(-1), len(join_index))
else:
join_index = other._view()
lidx = np.array([], dtype=np.intp)
ridx = None
return join_index, lidx, ridx
@final
def _join_via_get_indexer(
self, other: Index, how: JoinHow, sort: bool
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# Fallback if we do not have any fastpaths available based on
# uniqueness/monotonicity
# Note: at this point we have checked matching dtypes
lindexer: npt.NDArray[np.intp] | None
rindexer: npt.NDArray[np.intp] | None
if how == "left":
if sort:
join_index, lindexer = self.sort_values(return_indexer=True)
rindexer = other.get_indexer_for(join_index)
return join_index, lindexer, rindexer
else:
join_index = self
elif how == "right":
if sort:
join_index, rindexer = other.sort_values(return_indexer=True)
lindexer = self.get_indexer_for(join_index)
return join_index, lindexer, rindexer
else:
join_index = other
elif how == "inner":
join_index = self.intersection(other, sort=sort)
elif how == "outer":
try:
join_index = self.union(other, sort=sort)
except TypeError:
join_index = self.union(other)
try:
join_index = _maybe_try_sort(join_index, sort)
except TypeError:
pass
names = other.names if how == "right" else self.names
if join_index.names != names:
join_index = join_index.set_names(names)
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer_for(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer_for(join_index)
return join_index, lindexer, rindexer
@final
def _join_multi(self, other: Index, how: JoinHow):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import restore_dropped_levels_multijoin
# figure out join names
self_names_list = list(self.names)
other_names_list = list(other.names)
self_names_order = self_names_list.index
other_names_order = other_names_list.index
self_names = set(self_names_list)
other_names = set(other_names_list)
overlap = self_names & other_names
# need at least 1 in common
if not overlap:
raise ValueError("cannot join with no overlapping index names")
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
# Drop the non-matching levels from left and right respectively
ldrop_names = sorted(self_names - overlap, key=self_names_order)
rdrop_names = sorted(other_names - overlap, key=other_names_order)
# if only the order differs
if not len(ldrop_names + rdrop_names):
self_jnlevels = self
other_jnlevels = other.reorder_levels(self.names)
else:
self_jnlevels = self.droplevel(ldrop_names)
other_jnlevels = other.droplevel(rdrop_names)
# Join left and right
# Join on same leveled multi-index frames is supported
join_idx, lidx, ridx = self_jnlevels.join(
other_jnlevels, how=how, return_indexers=True
)
# Restore the dropped levels
# Returned index level order is
# common levels, ldrop_names, rdrop_names
dropped_names = ldrop_names + rdrop_names
# error: Argument 5/6 to "restore_dropped_levels_multijoin" has
# incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any
# ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]"
levels, codes, names = restore_dropped_levels_multijoin(
self,
other,
dropped_names,
join_idx,
lidx, # type: ignore[arg-type]
ridx, # type: ignore[arg-type]
)
# Re-create the multi-index
multi_join_idx = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
multi_join_idx = multi_join_idx.remove_unused_levels()
# maintain the order of the index levels
if how == "right":
level_order = other_names_list + ldrop_names
else:
level_order = self_names_list + rdrop_names
multi_join_idx = multi_join_idx.reorder_levels(level_order)
return multi_join_idx, lidx, ridx
jl = next(iter(overlap))
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
if isinstance(self, MultiIndex):
self, other = other, self
flip_order = True
# flip if join method is right or left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how)
if flip_order:
return result[0], result[2], result[1]
return result
@final
def _join_non_unique(
self, other: Index, how: JoinHow = "left", sort: bool = False
) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]:
from pandas.core.reshape.merge import get_join_indexers_non_unique
# We only get here if dtypes match
assert self.dtype == other.dtype
left_idx, right_idx = get_join_indexers_non_unique(
self._values, other._values, how=how, sort=sort
)
if how == "right":
join_index = other.take(right_idx)
else:
join_index = self.take(left_idx)
if how == "outer":
mask = left_idx == -1
if mask.any():
right = other.take(right_idx)
join_index = join_index.putmask(mask, right)
if isinstance(join_index, ABCMultiIndex) and how == "outer":
# test_join_index_levels
join_index = join_index._sort_levels_monotonic()
return join_index, left_idx, right_idx
@final
def _join_level(
self, other: Index, level, how: JoinHow = "left", keep_order: bool = True
) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
"""
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
Parameters
----------
labels : list[np.ndarray]
Each ndarray has signed integer dtype, not necessarily identical.
Returns
-------
np.ndarray[np.intp]
"""
if labels[0].size == 0:
return np.empty(0, dtype=np.intp)
if len(labels) == 1:
return get_group_index_sorter(ensure_platform_int(labels[0]))
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = ensure_int64(labels[-1])
return lib.get_level_sorter(lab, ensure_platform_int(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"}
how = flip.get(how, how)
assert isinstance(left, MultiIndex)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError(
"Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
right, how=how, return_indexers=True
)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.codes[: level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = ensure_platform_int(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
old_codes = left.codes[level]
taker = old_codes[old_codes != -1]
new_lev_codes = rev_indexer.take(taker)
new_codes = list(left.codes)
new_codes[level] = new_lev_codes
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
left_indexer = cast(np.ndarray, left_indexer)
mask = new_lev_codes != -1
if not mask.all():
new_codes = [lab[mask] for lab in new_codes]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max()
ngroups = 1 + max_new_lev
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_codes, ngroups
)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0] :]
new_codes = [lab[left_indexer] for lab in new_codes]
else: # sort the leaves
mask = new_lev_codes != -1
mask_all = mask.all()
if not mask_all:
new_codes = [lab[mask] for lab in new_codes]
left_indexer = _get_leaf_sorter(new_codes[: level + 1])
new_codes = [lab[left_indexer] for lab in new_codes]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(
levels=new_levels,
codes=new_codes,
names=left.names,
verify_integrity=False,
)
if right_lev_indexer is not None:
right_indexer = right_lev_indexer.take(join_index.codes[level])
else:
right_indexer = join_index.codes[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
left_indexer = (
None if left_indexer is None else ensure_platform_int(left_indexer)
)
right_indexer = (
None if right_indexer is None else ensure_platform_int(right_indexer)
)
return join_index, left_indexer, right_indexer
def _join_monotonic(
self, other: Index, how: JoinHow = "left"
) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
# We only get here with (caller is responsible for ensuring):
# 1) matching dtypes
# 2) both monotonic increasing
# 3) other.is_unique or self.is_unique
assert other.dtype == self.dtype
assert self._can_use_libjoin and other._can_use_libjoin
if self.equals(other):
# This is a convenient place for this check, but its correctness
# does not depend on monotonicity, so it could go earlier
# in the calling method.
ret_index = other if how == "right" else self
return ret_index, None, None
ridx: npt.NDArray[np.intp] | None
lidx: npt.NDArray[np.intp] | None
if how == "left":
if other.is_unique:
# We can perform much better than the general case
join_index = self
lidx = None
ridx = self._left_indexer_unique(other)
else:
join_array, lidx, ridx = self._left_indexer(other)
join_index, lidx, ridx = self._wrap_join_result(
join_array, other, lidx, ridx, how
)
elif how == "right":
if self.is_unique:
# We can perform much better than the general case
join_index = other
lidx = other._left_indexer_unique(self)
ridx = None
else:
join_array, ridx, lidx = other._left_indexer(self)
join_index, lidx, ridx = self._wrap_join_result(
join_array, other, lidx, ridx, how
)
elif how == "inner":
join_array, lidx, ridx = self._inner_indexer(other)
join_index, lidx, ridx = self._wrap_join_result(
join_array, other, lidx, ridx, how
)
elif how == "outer":
join_array, lidx, ridx = self._outer_indexer(other)
join_index, lidx, ridx = self._wrap_join_result(
join_array, other, lidx, ridx, how
)
lidx = None if lidx is None else ensure_platform_int(lidx)
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
def _wrap_join_result(
self,
joined: ArrayLike,
other: Self,
lidx: npt.NDArray[np.intp] | None,
ridx: npt.NDArray[np.intp] | None,
how: JoinHow,
) -> tuple[Self, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
assert other.dtype == self.dtype
if lidx is not None and lib.is_range_indexer(lidx, len(self)):
lidx = None
if ridx is not None and lib.is_range_indexer(ridx, len(other)):
ridx = None
# return self or other if possible to maintain cached attributes
if lidx is None:
join_index = self
elif ridx is None:
join_index = other
else:
join_index = self._constructor._with_infer(joined, dtype=self.dtype)
names = other.names if how == "right" else self.names
if join_index.names != names:
join_index = join_index.set_names(names)
return join_index, lidx, ridx
@final
@cache_readonly
def _can_use_libjoin(self) -> bool:
"""
Whether we can use the fastpaths implemented in _libs.join.
This is driven by whether (in monotonic increasing cases that are
guaranteed not to have NAs) we can convert to an np.ndarray without
making a copy. If we cannot, this negates the performance benefit
of using libjoin.
"""
if not self.is_monotonic_increasing:
# The libjoin functions all assume monotonicity.
return False
if type(self) is Index:
# excludes EAs, but include masks, we get here with monotonic
# values only, meaning no NA
return (
isinstance(self.dtype, np.dtype)
or isinstance(self._values, (ArrowExtensionArray, BaseMaskedArray))
or (
isinstance(self.dtype, StringDtype)
and self.dtype.storage == "python"
)
)
# Exclude index types where the conversion to numpy converts to object dtype,
# which negates the performance benefit of libjoin
# Subclasses should override to return False if _get_join_target is
# not zero-copy.
# TODO: exclude RangeIndex (which allocates memory)?
# Doing so seems to break test_concat_datetime_timezone
return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex))
# --------------------------------------------------------------------
# Uncategorized Methods
@property
def values(self) -> ArrayLike:
"""
Return an array representing the data in the Index.
.. warning::
We recommend using :attr:`Index.array` or
:meth:`Index.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
.. versionchanged:: 3.0.0
The returned array is read-only.
Returns
-------
array: numpy.ndarray or ExtensionArray
See Also
--------
Index.array : Reference to the underlying data.
Index.to_numpy : A NumPy array representing the underlying data.
Examples
--------
For :class:`pandas.Index`:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.values
array([1, 2, 3])
For :class:`pandas.IntervalIndex`:
>>> idx = pd.interval_range(start=0, end=5)
>>> idx.values
<IntervalArray>
[(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
Length: 5, dtype: interval[int64, right]
"""
data = self._data
if isinstance(data, np.ndarray):
data = data.view()
data.flags.writeable = False
return data
@cache_readonly
@doc(IndexOpsMixin.array)
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
from pandas.core.arrays.numpy_ import NumpyExtensionArray
array = NumpyExtensionArray(array)
array = array.view()
array._readonly = True
return array
@property
def _values(self) -> ExtensionArray | np.ndarray:
"""
The best array representation.
This is an ndarray or ExtensionArray.
``_values`` are consistent between ``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values |
----------------- | --------------- | ------------- |
Index | ndarray | ndarray |
CategoricalIndex | Categorical | Categorical |
DatetimeIndex | ndarray[M8ns] | DatetimeArray |
DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |
PeriodIndex | ndarray[object] | PeriodArray |
IntervalIndex | IntervalArray | IntervalArray |
See Also
--------
values : Values
"""
return self._data
def _get_engine_target(self) -> ArrayLike:
"""
Get the ndarray or ExtensionArray that we can pass to the IndexEngine
constructor.
"""
vals = self._values
if isinstance(vals, StringArray):
# GH#45652 much more performant than ExtensionEngine
return vals._ndarray
if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in "Mm":
import pyarrow as pa
pa_type = vals._pa_array.type
if pa.types.is_timestamp(pa_type):
vals = vals._to_datetimearray()
return vals._ndarray.view("i8")
elif pa.types.is_duration(pa_type):
vals = vals._to_timedeltaarray()
return vals._ndarray.view("i8")
if (
type(self) is Index
and isinstance(self._values, ExtensionArray)
and not isinstance(self._values, BaseMaskedArray)
and not (
isinstance(self._values, ArrowExtensionArray)
and is_numeric_dtype(self.dtype)
# Exclude decimal
and self.dtype.kind != "O"
)
):
# TODO(ExtensionIndex): remove special-case, just use self._values
return self._values.astype(object)
return vals
@final
def _get_join_target(self) -> np.ndarray:
"""
Get the ndarray or ExtensionArray that we can pass to the join
functions.
"""
if isinstance(self._values, BaseMaskedArray):
# This is only used if our array is monotonic, so no NAs present
return self._values._data
elif (
isinstance(self._values, ArrowExtensionArray)
and self.dtype.kind not in "mM"
):
# This is only used if our array is monotonic, so no missing values
# present
# "mM" cases will go through _get_engine_target and cast to i8
return self._values.to_numpy()
# TODO: exclude ABCRangeIndex case here as it copies
target = self._get_engine_target()
if not isinstance(target, np.ndarray):
raise ValueError("_can_use_libjoin should return False.")
return target
def _from_join_target(self, result: np.ndarray) -> ArrayLike:
"""
Cast the ndarray returned from one of the libjoin.foo_indexer functions
back to type(self._data).
"""
if isinstance(self.values, BaseMaskedArray):
return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_))
elif isinstance(self.values, (ArrowExtensionArray, StringArray)):
return type(self.values)._from_sequence(result, dtype=self.dtype)
return result
@doc(IndexOpsMixin._memory_usage)
def memory_usage(self, deep: bool = False) -> int:
result = self._memory_usage(deep=deep)
# include our engine hashtable, only if it's already cached
if "_engine" in self._cache:
result += self._engine.sizeof(deep=deep)
return result
@final
def where(self, cond, other=None) -> Index:
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(["car", "train"]), "other")
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if isinstance(self, ABCMultiIndex):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
cond = np.asarray(cond, dtype=bool)
return self.putmask(~cond, other)
# construction helpers
@final
@classmethod
def _raise_scalar_data_error(cls, data):
# We return the TypeError so that we can raise it from the constructor
# in order to keep mypy happy
raise TypeError(
f"{cls.__name__}(...) must be called with a collection of some "
f"kind, {repr(data) if not isinstance(data, np.generic) else str(data)} "
"was passed"
)
def _validate_fill_value(self, value):
"""
Check if the value can be inserted into our array without casting,
and convert it to an appropriate native type if necessary.
Raises
------
TypeError
If the value cannot be inserted into an array of this dtype.
"""
dtype = self.dtype
if isinstance(dtype, np.dtype) and dtype.kind not in "mM":
if isinstance(value, tuple) and dtype != object:
# GH#54385
raise TypeError
try:
return np_can_hold_element(dtype, value)
except LossySetitemError as err:
# re-raise as TypeError for consistency
raise TypeError from err
elif not can_hold_element(self._values, value):
raise TypeError
return value
@cache_readonly
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
"""
return is_object_dtype(self.dtype) or (
is_string_dtype(self.dtype) and self.dtype.storage == "python" # type: ignore[union-attr]
)
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
# https://github.com/python/typeshed/issues/2148#issuecomment-520783318
# Incompatible types in assignment (expression has type "None", base class
# "object" defined the type as "Callable[[object], int]")
__hash__: ClassVar[None] # type: ignore[assignment]
@final
def __setitem__(self, key, value) -> None:
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
getitem = self._data.__getitem__
key = lib.item_from_zerodim(key)
if is_integer(key) or is_float(key):
# GH#44051 exclude bool, which would return a 2d ndarray
key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization com.is_bool_indexer and ndim checks.
return self._getitem_slice(key)
if com.is_bool_indexer(key):
# if we have list[bools, length=1e5] then doing this check+convert
# takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__
# time below from 3.8 ms to 496 µs
# if we already have ndarray[bool], the overhead is 1.4 µs or .25%
if isinstance(getattr(key, "dtype", None), ExtensionDtype):
key = key.to_numpy(dtype=bool, na_value=False)
else:
key = np.asarray(key, dtype=bool)
if not isinstance(self.dtype, ExtensionDtype):
if len(key) == 0 and len(key) != len(self):
raise ValueError(
"The length of the boolean indexer cannot be 0 "
"when the Index has length greater than 0."
)
result = getitem(key)
# Because we ruled out integer above, we always get an arraylike here
if result.ndim > 1:
disallow_ndim_indexing(result)
# NB: Using _constructor._simple_new would break if MultiIndex
# didn't override __getitem__
return self._constructor._simple_new(result, name=self._name)
def _getitem_slice(self, slobj: slice) -> Self:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._data[slobj]
result = type(self)._simple_new(res, name=self._name, refs=self._references)
if "_engine" in self._cache:
reverse = slobj.step is not None and slobj.step < 0
result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr]
return result
@final
def _can_hold_identifiers_and_holds_name(self, name) -> bool:
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if (
is_object_dtype(self.dtype)
or is_string_dtype(self.dtype)
or isinstance(self.dtype, CategoricalDtype)
):
return name in self
return False
def append(self, other: Index | Sequence[Index]) -> Index:
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Single Index or a collection of indices, which can be either a list or a
tuple.
Returns
-------
Index
Returns a new Index object resulting from appending the provided other
indices to the original Index.
See Also
--------
Index.insert : Make new Index inserting new item at location.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.append(pd.Index([4]))
Index([1, 2, 3, 4], dtype='int64')
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat += list(other)
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Sequence[Index]]"; expected "Index"
to_concat.append(other) # type: ignore[arg-type]
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError("all inputs must be Index")
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
"""
Concatenate multiple Index objects.
"""
to_concat_vals = [x._values for x in to_concat]
result = concat_compat(to_concat_vals)
return Index._with_infer(result, name=name)
def putmask(self, mask, value) -> Index:
"""
Return a new Index of the values set with the mask.
Parameters
----------
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not ``NA``.
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
A new Index of the values set with the mask.
See Also
--------
numpy.putmask : Changes elements of an array
based on conditional and input values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([5, 6, 7])
>>> idx1.putmask([True, False, False], idx2)
Index([5, 2, 3], dtype='int64')
"""
mask, noop = validate_putmask(self._values, mask)
if noop:
return self.copy()
if self.dtype != object and is_valid_na_for_dtype(value, self.dtype):
# e.g. None -> np.nan, see also Block._standardize_fill_value
value = self._na_value
try:
converted = self._validate_fill_value(value)
except (LossySetitemError, ValueError, TypeError) as err:
if is_object_dtype(self.dtype): # pragma: no cover
raise err
# See also: Block.coerce_to_target_dtype
dtype = self._find_common_type_compat(value)
if dtype == self.dtype:
# GH#56376 avoid RecursionError
raise AssertionError(
"Something has gone wrong. Please report a bug at "
"github.com/pandas-dev/pandas"
) from err
return self.astype(dtype).putmask(mask, value)
values = self._values.copy()
if isinstance(values, np.ndarray):
converted = setitem_datetimelike_compat(values, mask.sum(), converted)
np.putmask(values, mask, converted)
else:
# Note: we use the original value here, not converted, as
# _validate_fill_value is not idempotent
values._putmask(mask, value)
return self._shallow_copy(values)
def equals(self, other: Any) -> bool:
"""
Determine if two Index object are equal.
The things that are being compared are:
* The elements inside the Index object.
* The order of the elements inside the Index object.
Parameters
----------
other : Any
The other object to compare against.
Returns
-------
bool
True if "other" is an Index and it has the same elements and order
as the calling index; False otherwise.
See Also
--------
Index.identical: Checks that object attributes and types are also equal.
Index.has_duplicates: Check if the Index has duplicate values.
Index.is_unique: Return if the index has unique values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx1
Index([1, 2, 3], dtype='int64')
>>> idx1.equals(pd.Index([1, 2, 3]))
True
The elements inside are compared
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2
Index(['1', '2', '3'], dtype='object')
>>> idx1.equals(idx2)
False
The order is compared
>>> ascending_idx = pd.Index([1, 2, 3])
>>> ascending_idx
Index([1, 2, 3], dtype='int64')
>>> descending_idx = pd.Index([3, 2, 1])
>>> descending_idx
Index([3, 2, 1], dtype='int64')
>>> ascending_idx.equals(descending_idx)
False
The dtype is *not* compared
>>> int64_idx = pd.Index([1, 2, 3], dtype="int64")
>>> int64_idx
Index([1, 2, 3], dtype='int64')
>>> uint64_idx = pd.Index([1, 2, 3], dtype="uint64")
>>> uint64_idx
Index([1, 2, 3], dtype='uint64')
>>> int64_idx.equals(uint64_idx)
True
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
# quickly return if the lengths are different
return False
if isinstance(self.dtype, StringDtype) and other.dtype != self.dtype:
# TODO(infer_string) can we avoid this special case?
# special case for object behavior
return other.equals(self.astype(object))
if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):
# if other is not object, use other's logic for coercion
return other.equals(self)
if isinstance(other, ABCMultiIndex):
# d-level MultiIndex can equal d-tuple Index
return other.equals(self)
if isinstance(self._values, ExtensionArray):
# Dispatch to the ExtensionArray's .equals method.
if not isinstance(other, type(self)):
return False
earr = cast(ExtensionArray, self._data)
return earr.equals(other._data)
if isinstance(other.dtype, ExtensionDtype):
# All EA-backed Index subclasses override equals
return other.equals(self)
return array_equivalent(self._values, other._values)
@final
def identical(self, other) -> bool:
"""
Similar to equals, but checks that object attributes and types are also equal.
Parameters
----------
other : Index
The Index object you want to compare with the current Index object.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
See Also
--------
Index.equals: Determine if two Index object are equal.
Index.has_duplicates: Check if the Index has duplicate values.
Index.is_unique: Return if the index has unique values.
Examples
--------
>>> idx1 = pd.Index(["1", "2", "3"])
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2.identical(idx1)
True
>>> idx1 = pd.Index(["1", "2", "3"], name="A")
>>> idx2 = pd.Index(["1", "2", "3"], name="B")
>>> idx2.identical(idx1)
False
"""
return (
self.equals(other)
and all(
getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables
)
and type(self) == type(other)
and self.dtype == other.dtype
)
@final
def asof(self, label):
"""
Return the label from the index, or, if not present, the previous one.
Assuming that the index is sorted, return the passed index label if it
is in the index, or return the previous index label if the passed one
is not in the index.
Parameters
----------
label : object
The label up to which the method returns the latest index label.
Returns
-------
object
The passed label if it is in the index. The previous label if the
passed label is not in the sorted index or `NaN` if there is no
such label.
See Also
--------
Series.asof : Return the latest value in a Series up to the
passed index.
merge_asof : Perform an asof merge (similar to left join but it
matches on nearest key rather than equal key).
Index.get_loc : An `asof` is a thin wrapper around `get_loc`
with method='pad'.
Examples
--------
`Index.asof` returns the latest index label up to the passed label.
>>> idx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"])
>>> idx.asof("2014-01-01")
'2013-12-31'
If the label is in the index, the method returns the passed label.
>>> idx.asof("2014-01-02")
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
>>> idx.asof("1999-01-02")
nan
If the index is not sorted, an error is raised.
>>> idx_not_sorted = pd.Index(["2013-12-31", "2015-01-02", "2014-01-03"])
>>> idx_not_sorted.asof("2013-12-31")
Traceback (most recent call last):
ValueError: index must be monotonic increasing or decreasing
"""
self._searchsorted_monotonic(label) # validate sortedness
try:
loc = self.get_loc(label)
except (KeyError, TypeError) as err:
# KeyError -> No exact match, try for padded
# TypeError -> passed e.g. non-hashable, fall through to get
# the tested exception message
indexer = self.get_indexer([label], method="pad")
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("asof requires scalar valued input") from err
loc = indexer.item()
if loc == -1:
return self._na_value
else:
if isinstance(loc, slice):
return self[loc][-1]
return self[loc]
def asof_locs(
self, where: Index, mask: npt.NDArray[np.bool_]
) -> npt.NDArray[np.intp]:
"""
Return the locations (indices) of labels in the index.
As in the :meth:`pandas.Index.asof`, if the label (a particular entry in
``where``) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in ``where``,
-1 is returned.
``mask`` is used to ignore ``NA`` values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not ``NA``.
Returns
-------
np.ndarray[np.intp]
An array of locations (indices) of the labels from the index
which correspond to the return values of :meth:`pandas.Index.asof`
for every element in ``where``.
See Also
--------
Index.asof : Return the label from the index, or, if not present, the
previous one.
Examples
--------
>>> idx = pd.date_range("2023-06-01", periods=3, freq="D")
>>> where = pd.DatetimeIndex(
... ["2023-05-30 00:12:00", "2023-06-01 00:00:00", "2023-06-02 23:59:59"]
... )
>>> mask = np.ones(3, dtype=bool)
>>> idx.asof_locs(where, mask)
array([-1, 0, 1])
We can use ``mask`` to ignore certain values in the index during calculation.
>>> mask[1] = False
>>> idx.asof_locs(where, mask)
array([-1, 0, 0])
"""
# error: No overload variant of "searchsorted" of "ndarray" matches argument
# types "Union[ExtensionArray, ndarray[Any, Any]]", "str"
# TODO: will be fixed when ExtensionArray.searchsorted() is fixed
locs = self._values[mask].searchsorted(
where._values,
side="right", # type: ignore[call-overload]
)
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self), dtype=np.intp)[mask].take(locs)
first_value = self._values[mask.argmax()]
result[(locs == 0) & (where._values < first_value)] = -1
return result
@overload
def sort_values(
self,
*,
return_indexer: Literal[False] = ...,
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> Self: ...
@overload
def sort_values(
self,
*,
return_indexer: Literal[True],
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> tuple[Self, np.ndarray]: ...
@overload
def sort_values(
self,
*,
return_indexer: bool = ...,
ascending: bool = ...,
na_position: NaPosition = ...,
key: Callable | None = ...,
) -> Self | tuple[Self, np.ndarray]: ...
def sort_values(
self,
*,
return_indexer: bool = False,
ascending: bool = True,
na_position: NaPosition = "last",
key: Callable | None = None,
) -> Self | tuple[Self, np.ndarray]:
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
Series.sort_values : Sort values of a Series.
DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
if key is None and (
(ascending and self.is_monotonic_increasing)
or (not ascending and self.is_monotonic_decreasing)
):
if return_indexer:
indexer = np.arange(len(self), dtype=np.intp)
return self.copy(), indexer
else:
return self.copy()
# GH 35584. Sort missing values according to na_position kwarg
# ignore na_position for MultiIndex
if not isinstance(self, ABCMultiIndex):
_as = nargsort(
items=self, ascending=ascending, na_position=na_position, key=key
)
else:
idx = cast(Index, ensure_key_mapped(self, key))
_as = idx.argsort(na_position=na_position)
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def shift(self, periods: int = 1, freq=None) -> Self:
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or str, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
Shifted index.
See Also
--------
Series.shift : Shift values of Series.
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range("1/1/2011", periods=5, freq="MS")
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq="D")
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
"""
raise NotImplementedError(
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(self).__name__}"
)
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
np.ndarray[np.intp]
Integer indices that would sort the index if used as
an indexer.
See Also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(["b", "a", "d", "c"])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
# This works for either ndarray or EA, is overridden
# by RangeIndex, MultIIndex
return self._data.argsort(*args, **kwargs)
def _check_indexing_error(self, key) -> None:
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
raise InvalidIndexError(key)
@cache_readonly
def _should_fallback_to_positional(self) -> bool:
"""
Should an integer key be treated as positional?
"""
return self.inferred_type not in {
"integer",
"mixed-integer",
"floating",
"complex",
}
_index_shared_docs["get_indexer_non_unique"] = """
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
An iterable containing the values to be used for computing indexer.
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['b', 'b'])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['q', 'r', 't'])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])
>>> index.get_indexer_non_unique(['f', 'b', 's'])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["b", "b"])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["q", "r", "t"])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["f", "b", "s"])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
target = self._maybe_cast_listlike_indexer(target)
if not self._should_compare(target) and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
return self._get_indexer_non_comparable(target, method=None, unique=False)
pself, ptarget = self._maybe_downcast_for_indexing(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.dtype != target.dtype:
# TODO: if object, could use infer_dtype to preempt costly
# conversion if still non-comparable?
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
that = target.astype(dtype, copy=False)
return this.get_indexer_non_unique(that)
# TODO: get_indexer has fastpaths for both Categorical-self and
# Categorical-target. Can we do something similar here?
# Note: _maybe_downcast_for_indexing ensures we never get here
# with MultiIndex self and non-Multi target
if self._is_multi and target._is_multi:
engine = self._engine
# Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has
# no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr]
else:
tgt_values = target._get_engine_target()
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return ensure_platform_int(indexer), ensure_platform_int(missing)
@final
def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
np.ndarray[np.intp]
List of indices.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Examples
--------
>>> idx = pd.Index([np.nan, "var1", np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
"""
if self._index_as_unique:
return self.get_indexer(target)
indexer, _ = self.get_indexer_non_unique(target)
return indexer
def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]:
"""
Analogue to get_indexer that raises if any elements are missing.
"""
keyarr = key
if not isinstance(keyarr, Index):
keyarr = com.asarray_tuplesafe(keyarr)
if self._index_as_unique:
indexer = self.get_indexer_for(keyarr)
keyarr = self.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr)
self._raise_if_missing(keyarr, indexer, axis_name)
keyarr = self.take(indexer)
if isinstance(key, Index):
# GH 42790 - Preserve name from an Index
keyarr.name = key.name
if lib.is_np_dtype(keyarr.dtype, "mM") or isinstance(
keyarr.dtype, DatetimeTZDtype
):
# DTI/TDI.take can infer a freq in some cases when we dont want one
if isinstance(key, list) or (
isinstance(key, type(self))
# "Index" has no attribute "freq"
and key.freq is None # type: ignore[attr-defined]
):
# error: "Index" has no attribute "_with_freq"; maybe "_with_infer"?
keyarr = keyarr._with_freq(None) # type: ignore[attr-defined]
return keyarr, indexer
def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:
"""
Check that indexer can be used to return a result.
e.g. at least one element was found,
unless the list of keys was actually empty.
Parameters
----------
key : list-like
Targeted labels (only used to show correct error message).
indexer: array-like of booleans
Indices corresponding to the key,
(with -1 indicating not found).
axis_name : str
Raises
------
KeyError
If at least one key was requested but none was found.
"""
if len(key) == 0:
return
# Count missing values
missing_mask = indexer < 0
nmissing = missing_mask.sum()
if nmissing:
if nmissing == len(indexer):
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())
raise KeyError(f"{not_found} not in index")
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[True] = ...
) -> npt.NDArray[np.intp]: ...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: Literal[False]
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@overload
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@final
def _get_indexer_non_comparable(
self, target: Index, method, unique: bool = True
) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Called from get_indexer or get_indexer_non_unique when the target
is of a non-comparable dtype.
For get_indexer lookups with method=None, get_indexer is an _equality_
check, so non-comparable dtypes mean we will always have no matches.
For get_indexer lookups with a method, get_indexer is an _inequality_
check, so non-comparable dtypes mean we will always raise TypeError.
Parameters
----------
target : Index
method : str or None
unique : bool, default True
* True if called from get_indexer.
* False if called from get_indexer_non_unique.
Raises
------
TypeError
If doing an inequality check, i.e. method is not None.
"""
if method is not None:
other_dtype = _unpack_nested_dtype(target)
raise TypeError(f"Cannot compare dtypes {self.dtype} and {other_dtype}")
no_matches = -1 * np.ones(target.shape, dtype=np.intp)
if unique:
# This is for get_indexer
return no_matches
else:
# This is for get_indexer_non_unique
missing = np.arange(len(target), dtype=np.intp)
return no_matches, missing
@property
def _index_as_unique(self) -> bool:
"""
Whether we should treat this as unique for the sake of
get_indexer vs get_indexer_non_unique.
For IntervalIndex compat.
"""
return self.is_unique
_requires_unique_msg = "Reindexing only valid with uniquely valued Index objects"
@final
def _maybe_downcast_for_indexing(self, other: Index) -> tuple[Index, Index]:
"""
When dealing with an object-dtype Index and a non-object Index, see
if we can upcast the object-dtype one to improve performance.
"""
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (
self.tz is not None
and other.tz is not None
and not tz_compare(self.tz, other.tz)
):
# standardize on UTC
return self.tz_convert("UTC"), other.tz_convert("UTC")
elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex):
try:
return type(other)(self), other
except OutOfBoundsDatetime:
return self, other
elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex):
# TODO: we dont have tests that get here
return type(other)(self), other
elif self.dtype.kind == "u" and other.dtype.kind == "i":
# GH#41873
if other.min() >= 0:
# lookup min as it may be cached
# TODO: may need itemsize check if we have non-64-bit Indexes
return self, other.astype(self.dtype)
elif self._is_multi and not other._is_multi:
try:
# "Type[Index]" has no attribute "from_tuples"
other = type(self).from_tuples(other) # type: ignore[attr-defined]
except (TypeError, ValueError):
# let's instead try with a straight Index
self = Index(self._values)
if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype):
# Reverse op so we dont need to re-implement on the subclasses
other, self = other._maybe_downcast_for_indexing(self)
return self, other
@final
def _find_common_type_compat(self, target) -> DtypeObj:
"""
Implementation of find_common_type that adjusts for Index-specific
special cases.
"""
target_dtype, _ = infer_dtype_from(target)
if isinstance(target, tuple):
# GH#54385
return np.dtype(object)
if using_string_dtype():
# special case: if left or right is a zero-length RangeIndex or
# Index[object], those can be created by the default empty constructors
# -> for that case ignore this dtype and always return the other
# (https://github.com/pandas-dev/pandas/pull/60797)
from pandas.core.indexes.range import RangeIndex
if len(self) == 0 and (
isinstance(self, RangeIndex) or self.dtype == np.object_
):
return target_dtype
if (
isinstance(target, Index)
and len(target) == 0
and (isinstance(target, RangeIndex) or target_dtype == np.object_)
):
return self.dtype
# special case: if one dtype is uint64 and the other a signed int, return object
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
# Now it's:
# * float | [u]int -> float
# * uint64 | signed int -> object
# We may change union(float | [u]int) to go to object.
if self.dtype == "uint64" or target_dtype == "uint64":
if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(
target_dtype
):
return _dtype_obj
dtype = find_result_type(self.dtype, target)
dtype = common_dtype_categorical_compat([self, target], dtype)
return dtype
@final
def _should_compare(self, other: Index) -> bool:
"""
Check if `self == other` can ever have non-False entries.
"""
# NB: we use inferred_type rather than is_bool_dtype to catch
# object_dtype_of_bool and categorical[object_dtype_of_bool] cases
if (
other.inferred_type == "boolean" and is_any_real_numeric_dtype(self.dtype)
) or (
self.inferred_type == "boolean" and is_any_real_numeric_dtype(other.dtype)
):
# GH#16877 Treat boolean labels passed to a numeric index as not
# found. Without this fix False and True would be treated as 0 and 1
# respectively.
return False
dtype = _unpack_nested_dtype(other)
return (
self._is_comparable_dtype(dtype)
or is_object_dtype(dtype)
or is_string_dtype(dtype)
)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if self.dtype.kind == "b":
return dtype.kind == "b"
elif is_numeric_dtype(self.dtype):
return is_numeric_dtype(dtype)
# TODO: this was written assuming we only get here with object-dtype,
# which is no longer correct. Can we specialize for EA?
return True
@final
def groupby(self, values) -> PrettyDict[Hashable, Index]:
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
if isinstance(values, ABCMultiIndex):
values = values._values
values = Categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in result.items()}
return PrettyDict(result)
def map(self, mapper, na_action: Literal["ignore"] | None = None):
"""
Map values using an input mapping or function.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
Union[Index, MultiIndex]
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
See Also
--------
Index.where : Replace values where the condition is False.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx.map({1: "a", 2: "b", 3: "c"})
Index(['a', 'b', 'c'], dtype='object')
Using `map` with a function:
>>> idx = pd.Index([1, 2, 3])
>>> idx.map("I am a {}".format)
Index(['I am a 1', 'I am a 2', 'I am a 3'], dtype='object')
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.map(lambda x: x.upper())
Index(['A', 'B', 'C'], dtype='object')
"""
from pandas.core.indexes.multi import MultiIndex
new_values = self._map_values(mapper, na_action=na_action)
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif self.name:
names = [self.name] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values, names=names)
dtype = None
if not new_values.size:
# empty
dtype = self.dtype
elif isinstance(new_values, Categorical):
# cast_pointwise_result is unnecessary
dtype = new_values.dtype
else:
if isinstance(self, MultiIndex):
arr = self[:0].to_flat_index().array
else:
arr = self[:0].array
# e.g. if we are floating and new_values is all ints, then we
# don't want to cast back to floating. But if we are UInt64
# and new_values is all ints, we want to try.
new_values = arr._cast_pointwise_result(new_values)
dtype = new_values.dtype
return Index(new_values, dtype=dtype, copy=False, name=self.name)
# TODO: De-duplicate with map, xref GH#32349
@final
def _transform_index(self, func, *, level=None) -> Index:
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
values = [
(
self.get_level_values(i).map(func)
if i == level or level is None
else self.get_level_values(i)
)
for i in range(self.nlevels)
]
return type(self).from_arrays(values)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
def isin(self, values, level: str_t | int | None = None) -> npt.NDArray[np.bool_]:
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
np.ndarray[bool]
NumPy array of boolean values.
See Also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays(
... [[1, 2, 3], ["red", "blue", "green"]], names=["number", "color"]
... )
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(["red", "orange", "yellow"], level="color")
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, "red"), (3, "red")])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self._values, values)
def _get_string_slice(self, key: str_t):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(
self,
start: Hashable | None = None,
end: Hashable | None = None,
step: int | None = None,
) -> slice:
"""
Compute the slice indexer for input labels and step.
Index needs to be ordered and unique.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
If None, defaults to 1.
Returns
-------
slice
A slice object.
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
See Also
--------
Index.slice_locs : Computes slice locations for input labels.
Index.get_slice_bound : Retrieves slice bound that corresponds to given label.
Notes
-----
This function assumes that the data is sorted, so use at your own peril.
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_indexer(start="b", end="c")
slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list("abcd"), list("efgh")])
>>> idx.slice_indexer(start="b", end=("c", "g"))
slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
if (
is_float(key)
and np.isnan(key)
and isinstance(self.dtype, FloatingDtype)
and is_nan_na()
):
# TODO: better place to do this?
key = self.dtype.na_value
return key
def _maybe_cast_listlike_indexer(self, target) -> Index:
"""
Analogue to maybe_cast_indexer for get_indexer instead of get_loc.
"""
target_index = ensure_index(target)
if (
not hasattr(target, "dtype")
and self.dtype == object
and target_index.dtype == "string"
):
# If we started with a list-like, avoid inference to string dtype if self
# is object dtype (coercing to string dtype will alter the missing values)
target_index = Index(target, dtype=self.dtype)
elif (
not hasattr(target, "dtype")
and isinstance(self.dtype, StringDtype)
and self.dtype.na_value is np.nan
and using_string_dtype()
):
# Fill missing values to ensure consistent missing value representation
target_index = target_index.fillna(np.nan)
return target_index
@final
def _validate_indexer(
self,
form: Literal["positional", "slice"],
key,
kind: Literal["getitem", "iloc"],
) -> None:
"""
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
if not lib.is_int_or_none(key):
self._raise_invalid_indexer(form, key)
def _maybe_cast_slice_bound(self, label, side: str_t):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. datetimelike Indexes
if is_numeric_dtype(self.dtype):
return self._maybe_cast_indexer(label)
# reject them, if index does not contain label
if (is_float(label) or is_integer(label)) and label not in self:
self._raise_invalid_indexer("slice", label)
return label
def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side="right" if side == "left" else "left"
)
return len(self) - pos
raise ValueError("index must be monotonic increasing or decreasing")
def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
The label for which to calculate the slice bound.
side : {'left', 'right'}
if 'left' return leftmost position of given label.
if 'right' return one-past-the-rightmost position of given label.
Returns
-------
int
Index of label.
See Also
--------
Index.get_loc : Get integer location, slice or boolean mask for requested
label.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_slice_bound(3, "left")
3
>>> idx.get_slice_bound(3, "right")
4
If ``label`` is non-unique in the index, an error will be raised.
>>> idx_duplicate = pd.Index(["a", "b", "a", "c", "d"])
>>> idx_duplicate.get_slice_bound("a", "left")
Traceback (most recent call last):
KeyError: Cannot get left slice bound for non-unique label: 'a'
"""
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err from None
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array, which
# is OK as long as they are representable by a slice.
assert is_bool_dtype(slc.dtype)
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {original_label!r}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
def slice_locs(
self,
start: SliceType = None,
end: SliceType = None,
step: int | None = None,
) -> tuple[int, int]:
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
Returns
-------
tuple[int, int]
Returns a tuple of two integers representing the slice locations for the
input labels within the index.
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_locs(start="b", end="c")
(1, 3)
>>> idx = pd.Index(list("bcde"))
>>> idx.slice_locs(start="a", end="c")
(0, 2)
"""
inc = step is None or step >= 0
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, "left")
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, "right")
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(
self, loc: int | np.integer | list[int] | npt.NDArray[np.integer]
) -> Self:
"""
Make new Index with passed location(-s) deleted.
Parameters
----------
loc : int or list of int
Location of item(-s) which will be deleted.
Use a list of locations to delete more than one value at the same time.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
See Also
--------
numpy.delete : Delete any rows and column from NumPy array (ndarray).
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.delete(1)
Index(['a', 'c'], dtype='str')
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.delete([0, 2])
Index(['b'], dtype='str')
"""
values = self._values
res_values: ArrayLike
if isinstance(values, np.ndarray):
# TODO(__array_function__): special casing will be unnecessary
res_values = np.delete(values, loc)
else:
res_values = values.delete(loc)
# _constructor so RangeIndex-> Index with an int64 dtype
return self._constructor._simple_new(res_values, name=self.name)
def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location.
Follows Python numpy.insert semantics for negative values.
Parameters
----------
loc : int
The integer location where the new item will be inserted.
item : object
The new item to be inserted into the Index.
Returns
-------
Index
Returns a new Index object resulting from inserting the specified item at
the specified location within the original Index.
See Also
--------
Index.append : Append a collection of Indexes together.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.insert(1, "x")
Index(['a', 'x', 'b', 'c'], dtype='str')
"""
item = lib.item_from_zerodim(item)
if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:
item = self._na_value
arr = self._values
if using_string_dtype() and len(self) == 0 and self.dtype == np.object_:
# special case: if we are an empty object-dtype Index, also
# take into account the inserted item for the resulting dtype
# (https://github.com/pandas-dev/pandas/pull/60797)
dtype = self._find_common_type_compat(item)
if dtype != self.dtype:
return self.astype(dtype).insert(loc, item)
try:
if isinstance(arr, ExtensionArray):
res_values = arr.insert(loc, item)
return type(self)._simple_new(res_values, name=self.name)
else:
item = self._validate_fill_value(item)
except (TypeError, ValueError, LossySetitemError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
if dtype == self.dtype:
# EA's might run into recursion errors if loc is invalid
raise
return self.astype(dtype).insert(loc, item)
if arr.dtype != object or not isinstance(
item, (tuple, np.datetime64, np.timedelta64)
):
# with object-dtype we need to worry about numpy incorrectly casting
# dt64/td64 to integer, also about treating tuples as sequences
# special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550
casted = arr.dtype.type(item)
new_values = np.insert(arr, loc, casted)
else:
# error: No overload variant of "insert" matches argument types
# "ndarray[Any, Any]", "int", "None"
new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
# GH#51363 stopped doing dtype inference here
out = Index(new_values, dtype=new_values.dtype, name=self.name)
return out
def drop(
self,
labels: Index | np.ndarray | Iterable[Hashable],
errors: IgnoreRaise = "raise",
) -> Index:
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like or scalar
Array-like object or a scalar value, representing the labels to be removed
from the Index.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
Index
Will be same type as self, except for RangeIndex.
Raises
------
KeyError
If not all of the labels are found in the selected axis
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Index.drop_duplicates : Return Index with duplicate values removed.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.drop(["a"])
Index(['b', 'c'], dtype='object')
"""
if not isinstance(labels, Index):
# avoid materializing e.g. RangeIndex
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask].tolist()} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
@final
def infer_objects(self, copy: bool = True) -> Index:
"""
If we have an object dtype, try to infer a non-object dtype.
Parameters
----------
copy : bool, default True
Whether to make a copy in cases where no inference occurs.
Returns
-------
Index
An Index with a new dtype if the dtype was inferred
or a shallow copy if the dtype could not be inferred.
See Also
--------
Index.inferred_type: Return a string of the type inferred from the values.
Examples
--------
>>> pd.Index(["a", 1]).infer_objects()
Index(['a', '1'], dtype='object')
>>> pd.Index([1, 2], dtype="object").infer_objects()
Index([1, 2], dtype='int64')
"""
if self._is_multi:
raise NotImplementedError(
"infer_objects is not implemented for MultiIndex. "
"Use index.to_frame().infer_objects() instead."
)
if self.dtype != object:
return self.copy() if copy else self
values = self._values
values = cast("npt.NDArray[np.object_]", values)
res_values = lib.maybe_convert_objects(
values,
convert_non_numeric=True,
)
if copy and res_values is values:
return self.copy()
result = Index(res_values, name=self.name)
if not copy and res_values is values and self._references is not None:
result._references = self._references
result._references.add_index_reference(result)
return result
@final
def diff(self, periods: int = 1) -> Index:
"""
Computes the difference between consecutive values in the Index object.
If periods is greater than 1, computes the difference between values that
are `periods` number of positions apart.
Parameters
----------
periods : int, optional
The number of positions between the current and previous
value to compute the difference with. Default is 1.
Returns
-------
Index
A new Index object with the computed differences.
Examples
--------
>>> import pandas as pd
>>> idx = pd.Index([10, 20, 30, 40, 50])
>>> idx.diff()
Index([nan, 10.0, 10.0, 10.0, 10.0], dtype='float64')
"""
return Index(self.to_series().diff(periods))
def round(self, decimals: int = 0) -> Self:
"""
Round each value in the Index to the given number of decimals.
Parameters
----------
decimals : int, optional
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
Returns
-------
Index
A new Index with the rounded values.
Examples
--------
>>> import pandas as pd
>>> idx = pd.Index([10.1234, 20.5678, 30.9123, 40.4567, 50.7890])
>>> idx.round(decimals=2)
Index([10.12, 20.57, 30.91, 40.46, 50.79], dtype='float64')
"""
return self._constructor(self.to_series().round(decimals))
# --------------------------------------------------------------------
# Generated Arithmetic, Comparison, and Unary Methods
def _cmp_method(self, other, op):
"""
Wrapper used to dispatch comparison operations.
"""
if self.is_(other):
# fastpath
if op in {operator.eq, operator.le, operator.ge}:
arr = np.ones(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
# TODO: should set MultiIndex._can_hold_na = False?
arr[self.isna()] = False
return arr
elif op is operator.ne:
arr = np.zeros(len(self), dtype=bool)
if self._can_hold_na and not isinstance(self, ABCMultiIndex):
arr[self.isna()] = True
return arr
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(
self
) != len(other):
raise ValueError("Lengths must match to compare")
if not isinstance(other, ABCMultiIndex):
other = extract_array(other, extract_numpy=True)
else:
other = np.asarray(other)
result = ops.comparison_op(self._values, other, op)
return result
@final
def _logical_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name, other=other)
@final
def _construct_result(self, result, name, other):
if isinstance(result, tuple):
return (
Index(result[0], name=name, dtype=result[0].dtype),
Index(result[1], name=name, dtype=result[1].dtype),
)
return Index(result, name=name, dtype=result.dtype)
def _arith_method(self, other, op):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
return super()._arith_method(other, op)
@final
def _unary_method(self, op):
result = op(self._values)
return Index(result, name=self.name)
def __abs__(self) -> Index:
return self._unary_method(operator.abs)
def __neg__(self) -> Index:
return self._unary_method(operator.neg)
def __pos__(self) -> Index:
return self._unary_method(operator.pos)
def __invert__(self) -> Index:
# GH#8875
return self._unary_method(operator.inv)
# --------------------------------------------------------------------
# Reductions
def any(self, *args, **kwargs):
"""
Return whether any element is Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.all : Return whether all elements are True.
Series.all : Return whether all elements are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
>>> index = pd.Index([0, 1, 2])
>>> index.any()
True
>>> index = pd.Index([0, 0, 0])
>>> index.any()
False
"""
nv.validate_any(args, kwargs)
self._maybe_disable_logical_methods("any")
vals = self._values
if not isinstance(vals, np.ndarray):
# i.e. EA, call _reduce instead of "any" to get TypeError instead
# of AttributeError
return vals._reduce("any")
return np.any(vals)
def all(self, *args, **kwargs):
"""
Return whether all elements are Truthy.
Parameters
----------
*args
Required for compatibility with numpy.
**kwargs
Required for compatibility with numpy.
Returns
-------
bool or array-like (if axis is specified)
A single element array-like may be converted to bool.
See Also
--------
Index.any : Return whether any element in an Index is True.
Series.any : Return whether any element in a Series is True.
Series.all : Return whether all elements in a Series are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 1, 2]).all()
False
"""
nv.validate_all(args, kwargs)
self._maybe_disable_logical_methods("all")
vals = self._values
if not isinstance(vals, np.ndarray):
# i.e. EA, call _reduce instead of "all" to get TypeError instead
# of AttributeError
return vals._reduce("all")
return np.all(vals)
@final
def _maybe_disable_logical_methods(self, opname: str_t) -> None:
"""
raise if this Index subclass does not support any or all.
"""
if isinstance(self, ABCMultiIndex):
raise TypeError(f"cannot perform {opname} with {type(self).__name__}")
@Appender(IndexOpsMixin.argmin.__doc__)
def argmin(
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
) -> int:
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
if not skipna:
raise ValueError("Encountered an NA value with skipna=False")
elif self._isnan.all():
raise ValueError("Encountered all NA values")
return super().argmin(skipna=skipna)
@Appender(IndexOpsMixin.argmax.__doc__)
def argmax(
self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs
) -> int:
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
if not self._is_multi and self.hasnans:
if not skipna:
raise ValueError("Encountered an NA value with skipna=False")
elif self._isnan.all():
raise ValueError("Encountered all NA values")
return super().argmax(skipna=skipna)
def min(self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(["c", "b", "a"])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([("a", "b"), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
first = self[0]
if not isna(first):
return first
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="min", skipna=skipna)
return nanops.nanmin(self._values, skipna=skipna)
def max(self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(["c", "b", "a"])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([("a", "b"), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
if len(self) and self.is_monotonic_increasing:
# quick check
last = self[-1]
if not isna(last):
return last
if not self._is_multi and self.hasnans:
# Take advantage of cache
mask = self._isnan
if not skipna or mask.all():
return self._na_value
if not self._is_multi and not isinstance(self._values, np.ndarray):
return self._values._reduce(name="max", skipna=skipna)
return nanops.nanmax(self._values, skipna=skipna)
# --------------------------------------------------------------------
@final
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
See Also
--------
Index.size: Return the number of elements in the underlying data.
Index.ndim: Number of dimensions of the underlying data, by definition 1.
Index.dtype: Return the dtype object of the underlying data.
Index.values: Return an array representing the data in the Index.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.shape
(3,)
"""
# See GH#27775, GH#27384 for history/reasoning in how this is defined.
return (len(self),)
def maybe_sequence_to_range(sequence) -> Any | range:
"""
Convert a 1D, non-pandas sequence to a range if possible.
Returns the input if not possible.
Parameters
----------
sequence : 1D sequence
names : sequence of str
Returns
-------
Any : input or range
"""
if isinstance(sequence, (range, ExtensionArray)):
return sequence
elif len(sequence) == 1 or lib.infer_dtype(sequence, skipna=False) != "integer":
return sequence
elif isinstance(sequence, (ABCSeries, Index)) and not (
isinstance(sequence.dtype, np.dtype) and sequence.dtype.kind == "i"
):
return sequence
if len(sequence) == 0:
return range(0)
try:
np_sequence = np.asarray(sequence, dtype=np.int64)
except OverflowError:
return sequence
diff = np_sequence[1] - np_sequence[0]
if diff == 0:
return sequence
elif len(sequence) == 2 or lib.is_sequence_range(np_sequence, diff):
return range(np_sequence[0], np_sequence[-1] + diff, diff)
else:
return sequence
def ensure_index_from_sequences(sequences, names=None) -> Index:
"""
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 4]], names=["name"])
Index([1, 2, 4], dtype='int64', name='name')
>>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
('a', 'b')],
names=['L1', 'L2'])
See Also
--------
ensure_index
"""
from pandas.core.indexes.api import default_index
from pandas.core.indexes.multi import MultiIndex
if len(sequences) == 0:
return default_index(0)
elif len(sequences) == 1:
if names is not None:
names = names[0]
return Index(maybe_sequence_to_range(sequences[0]), name=names)
else:
# TODO: Apply maybe_sequence_to_range to sequences?
return MultiIndex.from_arrays(sequences, names=names)
def ensure_index(index_like: Axes, copy: bool = False) -> Index:
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index_like : sequence
An Index or other sequence
copy : bool, default False
Returns
-------
index : Index or MultiIndex
See Also
--------
ensure_index_from_sequences
Examples
--------
>>> ensure_index(["a", "b"])
Index(['a', 'b'], dtype='str')
>>> ensure_index([("a", "a"), ("b", "c")])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([["a", "a"], ["b", "c"]])
MultiIndex([('a', 'b'),
('a', 'c')],
)
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if isinstance(index_like, ABCSeries):
name = index_like.name
return Index(index_like, name=name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
if isinstance(index_like, list):
if type(index_like) is not list:
# must check for exactly list here because of strict type
# check in clean_index_list
index_like = list(index_like)
if index_like and lib.is_all_arraylike(index_like):
from pandas.core.indexes.multi import MultiIndex
return MultiIndex.from_arrays(index_like)
else:
return Index(index_like, copy=copy, tupleize_cols=False)
else:
return Index(index_like, copy=copy)
def trim_front(strings: list[str]) -> list[str]:
"""
Trims leading spaces evenly among all strings.
Examples
--------
>>> trim_front([" a", " b"])
['a', 'b']
>>> trim_front([" a", " "])
['a', '']
"""
if not strings:
return strings
smallest_leading_space = min(len(x) - len(x.lstrip()) for x in strings)
if smallest_leading_space > 0:
strings = [x[smallest_leading_space:] for x in strings]
return strings
def _validate_join_method(method: str) -> None:
if method not in ["left", "right", "inner", "outer"]:
raise ValueError(f"do not recognize join method {method}")
def maybe_extract_name(name, obj, cls) -> Hashable:
"""
If no name is passed, then extract it from data, validating hashability.
"""
if name is None and isinstance(obj, (Index, ABCSeries)):
# Note we don't just check for "name" attribute since that would
# pick up e.g. dtype.name
name = obj.name
# GH#29069
if not is_hashable(name):
raise TypeError(f"{cls.__name__}.name must be a hashable type")
return name
def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]:
"""
Return common name if all indices agree, otherwise None (level-by-level).
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the unanimous 'names' found.
"""
name_tups = (tuple(i.names) for i in indexes)
name_sets = ({*ns} for ns in zip_longest(*name_tups))
names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets)
return names
def _unpack_nested_dtype(other: Index) -> DtypeObj:
"""
When checking if our dtype is comparable with another, we need
to unpack CategoricalDtype to look at its categories.dtype.
Parameters
----------
other : Index
Returns
-------
np.dtype or ExtensionDtype
"""
dtype = other.dtype
if isinstance(dtype, CategoricalDtype):
# If there is ever a SparseIndex, this could get dispatched
# here too.
return dtype.categories.dtype
elif isinstance(dtype, ArrowDtype):
# GH 53617
import pyarrow as pa
if pa.types.is_dictionary(dtype.pyarrow_dtype):
other = other[:0].astype(ArrowDtype(dtype.pyarrow_dtype.value_type))
return other.dtype
def _maybe_try_sort(result: Index | ArrayLike, sort: bool | None):
if sort is not False:
try:
# error: Incompatible types in assignment (expression has type
# "Union[ExtensionArray, ndarray[Any, Any], Index, Series,
# Tuple[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series],
# ndarray[Any, Any]]]", variable has type "Union[Index,
# Union[ExtensionArray, ndarray[Any, Any]]]")
result = algos.safe_sort(result) # type: ignore[assignment]
except TypeError as err:
if sort is True:
raise
warnings.warn(
f"{err}, sort order is undefined for incomparable objects.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
return result
def get_values_for_csv(
values: ArrayLike,
*,
date_format,
na_rep: str = "nan",
quoting=None,
float_format=None,
decimal: str = ".",
) -> npt.NDArray[np.object_]:
"""
Convert to types which can be consumed by the standard library's
csv.writer.writerows.
"""
if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm":
# GH#40754 Convert categorical datetimes to datetime array
values = algos.take_nd(
values.categories._values,
ensure_platform_int(values._codes),
fill_value=na_rep,
)
values = ensure_wrapped_if_datetimelike(values)
if isinstance(values, (DatetimeArray, TimedeltaArray)):
if values.ndim == 1:
result = values._format_native_types(na_rep=na_rep, date_format=date_format)
result = result.astype(object, copy=False)
return result
# GH#21734 Process every column separately, they might have different formats
results_converted = []
for i in range(len(values)):
result = values[i, :]._format_native_types(
na_rep=na_rep, date_format=date_format
)
results_converted.append(result.astype(object, copy=False))
return np.vstack(results_converted)
elif isinstance(values.dtype, PeriodDtype):
# TODO: tests that get here in column path
values = cast("PeriodArray", values)
res = values._format_native_types(na_rep=na_rep, date_format=date_format)
return res
elif isinstance(values.dtype, IntervalDtype):
# TODO: tests that get here in column path
values = cast("IntervalArray", values)
mask = values.isna()
if not quoting:
result = np.asarray(values).astype(str)
else:
result = np.array(values, dtype=object, copy=True)
result[mask] = na_rep
return result
elif values.dtype.kind == "f" and not isinstance(values.dtype, SparseDtype):
# see GH#13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == ".":
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
values = values.astype(object, copy=False)
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(
values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
res = formatter.get_result_as_array()
res = res.astype(object, copy=False)
return res
elif isinstance(values, ExtensionArray):
mask = isna(values)
new_values = np.asarray(values.astype(object))
new_values[mask] = na_rep
return new_values
else:
mask = isna(values)
itemsize = writers.word_len(na_rep)
if values.dtype != _dtype_obj and not quoting and itemsize:
values = values.astype(str)
if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize:
# enlarge for the na_rep
values = values.astype(f"<U{itemsize}")
else:
values = np.array(values, dtype="object")
values[mask] = na_rep
values = values.astype(object, copy=False)
return values
| Index |
python | django__django | tests/model_formsets/models.py | {
"start": 60,
"end": 229
} | class ____(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
| Author |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_hawaii_zip.py | {
"start": 737,
"end": 1735
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_hawaii_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_hawaii_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidHawaiiZip |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_sql.py | {
"start": 2497,
"end": 25931
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.cloudsql_hook = CloudSQLHook(api_version="v1", gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
def test_instance_import_exception(self, mock_get_credentials):
self.cloudsql_hook.get_conn = mock.Mock(
side_effect=HttpError(resp=httplib2.Response({"status": 400}), content=b"Error content")
)
with pytest.raises(AirflowException) as ctx:
self.cloudsql_hook.import_instance(instance="instance", body={})
err = ctx.value
assert "Importing instance " in str(err)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
def test_instance_export_exception(self, mock_get_credentials):
self.cloudsql_hook.get_conn = mock.Mock(
side_effect=HttpError(resp=httplib2.Response({"status": 400}), content=b"Error content")
)
with pytest.raises(HttpError) as ctx:
self.cloudsql_hook.export_instance(instance="instance", body={})
err = ctx.value
assert err.resp.status == 400
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_import(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
import_method = get_conn.return_value.instances.return_value.import_
execute_method = import_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.import_instance(instance="instance", body={})
import_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="example-project", operation_name="operation_id"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_export(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
export_method = get_conn.return_value.instances.return_value.export
execute_method = export_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.export_instance(instance="instance", body={})
export_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
assert mock_get_credentials.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_export_with_in_progress_retry(self, wait_for_operation_to_complete, get_conn):
export_method = get_conn.return_value.instances.return_value.export
execute_method = export_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
with pytest.raises(HttpError):
self.cloudsql_hook.export_instance(project_id="example-project", instance="instance", body={})
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_get_instance(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
get_method = get_conn.return_value.instances.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "instance"}
wait_for_operation_to_complete.return_value = None
res = self.cloudsql_hook.get_instance(instance="instance")
assert res is not None
assert res["name"] == "instance"
get_method.assert_called_once_with(instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_instance(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
insert_method = get_conn.return_value.instances.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.create_instance(body={})
insert_method.assert_called_once_with(body={}, project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_instance_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
insert_method = get_conn.return_value.instances.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.create_instance(body={})
assert mock_get_credentials.call_count == 1
assert insert_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_instance_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
patch_method = get_conn.return_value.instances.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.patch_instance(instance="instance", body={})
assert mock_get_credentials.call_count == 1
assert patch_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_instance(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
patch_method = get_conn.return_value.instances.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.patch_instance(instance="instance", body={})
patch_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_instance(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
delete_method = get_conn.return_value.instances.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.delete_instance(instance="instance")
delete_method.assert_called_once_with(instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project", time_to_sleep=5
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_instance_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
delete_method = get_conn.return_value.instances.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.delete_instance(instance="instance")
assert mock_get_credentials.call_count == 1
assert delete_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project", time_to_sleep=5
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_instance_clone(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
clone_method = get_conn.return_value.instances.return_value.clone
execute_method = clone_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
body = {
"cloneContext": {
"kind": "sql#cloneContext",
"destinationInstanceName": "clonedInstance",
}
}
self.cloudsql_hook.clone_instance(instance="instance", body=body)
clone_method.assert_called_once_with(instance="instance", project="example-project", body=body)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_get_database(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
get_method = get_conn.return_value.databases.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "database"}
wait_for_operation_to_complete.return_value = None
res = self.cloudsql_hook.get_database(database="database", instance="instance")
assert res is not None
assert res["name"] == "database"
get_method.assert_called_once_with(
instance="instance", database="database", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_database(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
insert_method = get_conn.return_value.databases.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.create_database(instance="instance", body={})
insert_method.assert_called_once_with(body={}, instance="instance", project="example-project")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_create_database_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
insert_method = get_conn.return_value.databases.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.create_database(instance="instance", body={})
assert mock_get_credentials.call_count == 1
assert insert_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_database(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
patch_method = get_conn.return_value.databases.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.patch_database(instance="instance", database="database", body={})
patch_method.assert_called_once_with(
body={}, database="database", instance="instance", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_patch_database_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
patch_method = get_conn.return_value.databases.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.patch_database(instance="instance", database="database", body={})
assert mock_get_credentials.call_count == 1
assert patch_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_database(self, wait_for_operation_to_complete, get_conn, mock_get_credentials):
delete_method = get_conn.return_value.databases.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.delete_database(instance="instance", database="database")
delete_method.assert_called_once_with(
database="database", instance="instance", project="example-project"
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
assert mock_get_credentials.call_count == 1
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_credentials_and_project_id",
return_value=(mock.MagicMock(), "example-project"),
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook._wait_for_operation_to_complete")
def test_delete_database_with_in_progress_retry(
self, wait_for_operation_to_complete, get_conn, mock_get_credentials
):
delete_method = get_conn.return_value.databases.return_value.delete
execute_method = delete_method.return_value.execute
execute_method.side_effect = [
HttpError(
resp=httplib2.Response({"status": 429}),
content=b"Internal Server Error",
),
{"name": "operation_id"},
]
wait_for_operation_to_complete.return_value = None
self.cloudsql_hook.delete_database(instance="instance", database="database")
assert mock_get_credentials.call_count == 1
assert delete_method.call_count == 2
assert execute_method.call_count == 2
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project"
)
| TestGcpSqlHookDefaultProjectId |
python | getsentry__sentry | src/sentry/models/savedsearch.py | {
"start": 1385,
"end": 1977
} | class ____:
ORGANIZATION = "organization"
OWNER = "owner"
OWNER_PINNED = "owner_pinned"
@classmethod
def as_choices(cls) -> list[tuple[str, Any]]:
# Note that the pinned value may not always be a visibility we want to
# expose. The pinned search API explicitly will set this visibility,
# but the saved search API should not allow it to be set
return [
(cls.ORGANIZATION, _("Organization")),
(cls.OWNER, _("Only for me")),
(cls.OWNER_PINNED, _("My Pinned Search")),
]
@region_silo_model
| Visibility |
python | huggingface__transformers | src/transformers/models/bridgetower/processing_bridgetower.py | {
"start": 1337,
"end": 2274
} | class ____(ProcessorMixin):
r"""
Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single
processor.
[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and
[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and
[`~BridgeTowerProcessor.decode`] for more information.
Args:
image_processor (`BridgeTowerImageProcessor`):
An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.
tokenizer (`RobertaTokenizerFast`):
An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input.
"""
valid_processor_kwargs = BridgeTowerProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
__all__ = ["BridgeTowerProcessor"]
| BridgeTowerProcessor |
python | pytorch__pytorch | test/test_throughput_benchmark.py | {
"start": 654,
"end": 1098
} | class ____(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(2 * H, D_out)
def forward(self, x1, x2):
h1_relu = self.linear1(x1).clamp(min=0)
h2_relu = self.linear1(x2).clamp(min=0)
cat = torch.cat((h1_relu, h2_relu), 1)
y_pred = self.linear2(cat)
return y_pred
| TwoLayerNetModule |
python | sympy__sympy | sympy/stats/rv.py | {
"start": 2477,
"end": 3110
} | class ____(RandomDomain):
"""
A single variable and its domain.
See Also
========
sympy.stats.crv.SingleContinuousDomain
sympy.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Basic.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
def __contains__(self, other):
if len(other) != 1:
return False
sym, val = tuple(other)[0]
return self.symbol == sym and val in self.set
| SingleDomain |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_group_autofix_update.py | {
"start": 267,
"end": 5223
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.group = self.create_group()
self.url = f"/api/0/issues/{self.group.id}/autofix/update/"
@patch(
"sentry.seer.endpoints.group_autofix_update.get_seer_org_acknowledgement", return_value=True
)
@patch("sentry.seer.endpoints.group_autofix_update.requests.post")
def test_autofix_update_successful(
self, mock_post: MagicMock, mock_get_seer_org_acknowledgement: MagicMock
) -> None:
mock_post.return_value.status_code = 202
mock_post.return_value.json.return_value = {}
response = self.client.post(
self.url,
data={
"run_id": 123,
"payload": {
"type": "select_root_cause",
"cause_id": 456,
},
},
format="json",
)
assert response.status_code == status.HTTP_202_ACCEPTED
expected_body = orjson.dumps(
{
"run_id": 123,
"payload": {
"type": "select_root_cause",
"cause_id": 456,
},
"invoking_user": {
"id": self.user.id,
"display_name": self.user.get_display_name(),
},
}
)
expected_url = f"{settings.SEER_AUTOFIX_URL}/v1/automation/autofix/update"
expected_headers = {
"content-type": "application/json;charset=utf-8",
**sign_with_seer_secret(expected_body),
}
mock_post.assert_called_once_with(
expected_url,
data=expected_body,
headers=expected_headers,
)
@patch(
"sentry.seer.endpoints.group_autofix_update.get_seer_org_acknowledgement", return_value=True
)
@patch("sentry.seer.endpoints.group_autofix_update.requests.post")
def test_autofix_update_failure(
self, mock_post: MagicMock, mock_get_seer_org_acknowledgement: MagicMock
) -> None:
mock_post.return_value.raise_for_status.side_effect = Exception("Failed to update")
response = self.client.post(
self.url,
data={
"run_id": 123,
"payload": {
"type": "select_root_cause",
"cause_id": 456,
},
},
format="json",
)
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
@patch(
"sentry.seer.endpoints.group_autofix_update.get_seer_org_acknowledgement", return_value=True
)
def test_autofix_update_missing_parameters(
self, mock_get_seer_org_acknowledgement: MagicMock
) -> None:
response = self.client.post(self.url, data={}, format="json")
assert response.status_code == status.HTTP_400_BAD_REQUEST
@patch(
"sentry.seer.endpoints.group_autofix_update.get_seer_org_acknowledgement",
return_value=False,
)
def test_autofix_update_org_not_acknowledged(
self, mock_get_seer_org_acknowledgement: MagicMock
) -> None:
"""Test that a 403 is returned when the organization hasn't acknowledged Seer."""
response = self.client.post(
self.url,
data={
"run_id": 123,
"payload": {
"type": "select_root_cause",
"cause_id": 456,
},
},
format="json",
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert (
response.data["error"]
== "Seer has not been enabled for this organization. Please open an issue at sentry.io/issues and set up Seer."
)
@patch(
"sentry.seer.endpoints.group_autofix_update.get_seer_org_acknowledgement", return_value=True
)
@patch("sentry.seer.endpoints.group_autofix_update.requests.post")
def test_autofix_update_updates_last_triggered_field(
self, mock_post, mock_get_seer_org_acknowledgement
):
"""Test that a successful call updates the seer_autofix_last_triggered field."""
mock_post.return_value.status_code = 202
mock_post.return_value.json.return_value = {}
self.group.refresh_from_db()
assert self.group.seer_autofix_last_triggered is None
response = self.client.post(
self.url,
data={
"run_id": 456,
"payload": {
"type": "some_update",
"data": "value",
},
},
format="json",
)
assert response.status_code == status.HTTP_202_ACCEPTED
self.group.refresh_from_db()
assert isinstance(self.group.seer_autofix_last_triggered, datetime)
| TestGroupAutofixUpdate |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_nlp_seq.py | {
"start": 2184,
"end": 3560
} | class ____(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init in the configuration.
Args:
inputs: input data.
Returns:
output: `(bs, timesteps, in_dim)`
"""
config = self.config
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
length = inputs.shape[1]
pos_emb_shape = (1, config.max_len, inputs.shape[-1])
if config.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(max_len=config.max_len)(None,
pos_emb_shape,
None)
else:
pos_embedding = self.param('pos_embedding', config.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
return inputs + pe
| AddPositionEmbs |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/app.py | {
"start": 11199,
"end": 13627
} | class ____:
"""
A helper class to make it possible to run the ExecutionAPI "in-process".
The sync version of this makes use of a2wsgi which runs the async loop in a separate thread. This is
needed so that we can use the sync httpx client
"""
_app: FastAPI | None = None
_cm: AsyncExitStack | None = None
@cached_property
def app(self):
if not self._app:
from airflow.api_fastapi.common.dagbag import create_dag_bag
from airflow.api_fastapi.execution_api.app import create_task_execution_api_app
from airflow.api_fastapi.execution_api.deps import (
JWTBearerDep,
JWTBearerTIPathDep,
)
from airflow.api_fastapi.execution_api.routes.connections import has_connection_access
from airflow.api_fastapi.execution_api.routes.variables import has_variable_access
from airflow.api_fastapi.execution_api.routes.xcoms import has_xcom_access
self._app = create_task_execution_api_app()
# Set up dag_bag in app state for dependency injection
self._app.state.dag_bag = create_dag_bag()
async def always_allow(): ...
self._app.dependency_overrides[JWTBearerDep.dependency] = always_allow
self._app.dependency_overrides[JWTBearerTIPathDep.dependency] = always_allow
self._app.dependency_overrides[has_connection_access] = always_allow
self._app.dependency_overrides[has_variable_access] = always_allow
self._app.dependency_overrides[has_xcom_access] = always_allow
return self._app
@cached_property
def transport(self) -> httpx.WSGITransport:
import asyncio
import httpx
from a2wsgi import ASGIMiddleware
middleware = ASGIMiddleware(self.app)
# https://github.com/abersheeran/a2wsgi/discussions/64
async def start_lifespan(cm: AsyncExitStack, app: FastAPI):
await cm.enter_async_context(app.router.lifespan_context(app))
self._cm = AsyncExitStack()
asyncio.run_coroutine_threadsafe(start_lifespan(self._cm, self.app), middleware.loop)
return httpx.WSGITransport(app=middleware) # type: ignore[arg-type]
@cached_property
def atransport(self) -> httpx.ASGITransport:
import httpx
return httpx.ASGITransport(app=self.app)
| InProcessExecutionAPI |
python | PyCQA__pylint | tests/functional/o/overridden_final_method_py38.py | {
"start": 241,
"end": 447
} | class ____(Base):
def my_method(self): # [overridden-final-method]
pass
# Check for crash on method definitions not at top level of class
# https://github.com/pylint-dev/pylint/issues/5648
| Subclass |
python | readthedocs__readthedocs.org | readthedocs/api/v3/mixins.py | {
"start": 8010,
"end": 8122
} | class ____:
def get_queryset(self):
return self.model.objects.api(self.request.user)
| RemoteQuerySetMixin |
python | python__mypy | mypyc/irbuild/context.py | {
"start": 5694,
"end": 7448
} | class ____(ImplicitClass):
"""Contains information about implicit generator function classes."""
def __init__(self, ir: ClassIR) -> None:
super().__init__(ir)
# This register holds the label number that the '__next__' function should go to the next
# time it is called.
self._next_label_reg: Value | None = None
self._next_label_target: AssignmentTarget | None = None
# These registers hold the error values for the generator object for the case that the
# 'throw' function is called.
self.exc_regs: tuple[Value, Value, Value] | None = None
# Holds the arg passed to send
self.send_arg_reg: Value | None = None
# Holds the PyObject ** pointer through which return value can be passed
# instead of raising StopIteration(ret_value) (only if not NULL). This
# is used for faster native-to-native calls.
self.stop_iter_value_reg: Value | None = None
# The switch block is used to decide which instruction to go using the value held in the
# next-label register.
self.switch_block = BasicBlock()
self.continuation_blocks: list[BasicBlock] = []
@property
def next_label_reg(self) -> Value:
assert self._next_label_reg is not None
return self._next_label_reg
@next_label_reg.setter
def next_label_reg(self, reg: Value) -> None:
self._next_label_reg = reg
@property
def next_label_target(self) -> AssignmentTarget:
assert self._next_label_target is not None
return self._next_label_target
@next_label_target.setter
def next_label_target(self, target: AssignmentTarget) -> None:
self._next_label_target = target
| GeneratorClass |
python | psf__requests | tests/test_utils.py | {
"start": 6032,
"end": 8044
} | class ____:
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable.
"""
@pytest.fixture(autouse=True, params=["no_proxy", "NO_PROXY"])
def no_proxy(self, request, monkeypatch):
monkeypatch.setenv(
request.param, "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1"
)
@pytest.mark.parametrize(
"url",
(
"http://192.168.0.1:5000/",
"http://192.168.0.1/",
"http://172.16.1.1/",
"http://172.16.1.1:5000/",
"http://localhost.localdomain:5000/v1.0/",
),
)
def test_bypass(self, url):
assert get_environ_proxies(url, no_proxy=None) == {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.1.1:5000/",
"http://192.168.1.1/",
"http://www.requests.com/",
),
)
def test_not_bypass(self, url):
assert get_environ_proxies(url, no_proxy=None) != {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.1.1:5000/",
"http://192.168.1.1/",
"http://www.requests.com/",
),
)
def test_bypass_no_proxy_keyword(self, url):
no_proxy = "192.168.1.1,requests.com"
assert get_environ_proxies(url, no_proxy=no_proxy) == {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.0.1:5000/",
"http://192.168.0.1/",
"http://172.16.1.1/",
"http://172.16.1.1:5000/",
"http://localhost.localdomain:5000/v1.0/",
),
)
def test_not_bypass_no_proxy_keyword(self, url, monkeypatch):
# This is testing that the 'no_proxy' argument overrides the
# environment variable 'no_proxy'
monkeypatch.setenv("http_proxy", "http://proxy.example.com:3128/")
no_proxy = "192.168.1.1,requests.com"
assert get_environ_proxies(url, no_proxy=no_proxy) != {}
| TestGetEnvironProxies |
python | numba__numba | numba/tests/test_types.py | {
"start": 20196,
"end": 20997
} | class ____(unittest.TestCase):
def test_record_type_equiv(self):
rec_dt = np.dtype([('a', np.int32), ('b', np.float32)])
rec_ty = typeof(rec_dt)
art1 = rec_ty[::1]
arr = np.zeros(5, dtype=rec_dt)
art2 = typeof(arr)
self.assertEqual(art2.dtype.dtype, rec_ty.dtype)
self.assertEqual(art1, art2)
def test_user_specified(self):
rec_dt = np.dtype([('a', np.int32), ('b', np.float32)])
rec_type = typeof(rec_dt)
@jit((rec_type[:],), nopython=True)
def foo(x):
return x['a'], x['b']
arr = np.zeros(1, dtype=rec_dt)
arr[0]['a'] = 123
arr[0]['b'] = 32.1
a, b = foo(arr)
self.assertEqual(a, arr[0]['a'])
self.assertEqual(b, arr[0]['b'])
| TestRecordDtype |
python | apache__airflow | task-sdk/tests/conftest.py | {
"start": 7846,
"end": 8512
} | class ____(Protocol):
def __call__(
self,
dag_id: str = ...,
run_id: str = ...,
logical_date: str | datetime = ...,
data_interval_start: str | datetime = ...,
data_interval_end: str | datetime = ...,
clear_number: int = ...,
start_date: str | datetime = ...,
run_after: str | datetime = ...,
run_type: str = ...,
task_reschedule_count: int = ...,
conf: dict[str, Any] | None = ...,
should_retry: bool = ...,
max_tries: int = ...,
consumed_asset_events: Sequence[AssetEventDagRunReference] = ...,
) -> TIRunContext: ...
| MakeTIContextCallable |
python | spack__spack | lib/spack/spack/relocate_text.py | {
"start": 10865,
"end": 11145
} | class ____(spack.error.SpackError):
def __init__(self, msg):
msg += (
" To fix this, compile with more padding "
"(config:install_tree:padded_length), or install to a shorter prefix."
)
super().__init__(msg)
| BinaryTextReplaceError |
python | apache__avro | lang/py/avro/protocol.py | {
"start": 5889,
"end": 12062
} | class ____:
"""
A message has attributes:
- a doc, an optional description of the message,
- a request, a list of named, typed parameter schemas (this has the same form as the fields of a record declaration);
- a response schema;
- an optional union of declared error schemas. The effective union has "string" prepended to the declared union, to permit transmission of undeclared "system" errors. For example, if the declared error union is ["AccessError"], then the effective union is ["string", "AccessError"]. When no errors are declared, the effective error union is ["string"]. Errors are serialized using the effective union; however, a protocol's JSON declaration contains only the declared union.
- an optional one-way boolean parameter.
A request parameter list is processed equivalently to an anonymous record. Since record field lists may vary between reader and writer, request parameters may also differ between the caller and responder, and such differences are resolved in the same manner as record field differences.
The one-way parameter may only be true when the response type is "null" and no errors are listed.
"""
__slots__ = ["_errors", "_name", "_request", "_response", "_validate_names"]
def __init__(
self,
name: str,
request: Sequence[Mapping[str, object]],
response: Union[str, object],
errors: Optional[Sequence[str]] = None,
names: Optional[avro.name.Names] = None,
validate_names: bool = True,
) -> None:
self._name = name
names = names or avro.name.Names(validate_names=validate_names)
self._request = _parse_request(request, names, validate_names)
self._response = _parse_response(response, names, validate_names)
self._errors = _parse_errors(errors or [], names, validate_names)
self._validate_names = validate_names
@property
def name(self) -> str:
return self._name
@property
def request(self) -> avro.schema.RecordSchema:
return self._request
@property
def response(self) -> avro.schema.Schema:
return self._response
@property
def errors(self) -> avro.schema.ErrorUnionSchema:
return self._errors
def __str__(self) -> str:
return json.dumps(self.to_json())
def to_json(self, names: Optional[avro.name.Names] = None) -> "MessageObject":
names = names or avro.name.Names(validate_names=self._validate_names)
try:
to_dump = MessageObject()
except NameError:
to_dump = {}
to_dump["request"] = self.request.to_json(names)
to_dump["response"] = self.response.to_json(names)
if self.errors:
to_dump["errors"] = self.errors.to_json(names)
return to_dump
def __eq__(self, that: object) -> bool:
return all(hasattr(that, prop) and getattr(self, prop) == getattr(that, prop) for prop in self.__class__.__slots__)
def _parse_request(request: Sequence[Mapping[str, object]], names: avro.name.Names, validate_names: bool = True) -> avro.schema.RecordSchema:
if not isinstance(request, Sequence):
raise avro.errors.ProtocolParseException(f"Request property not a list: {request}")
return avro.schema.RecordSchema(None, None, request, names, "request", validate_names=validate_names)
def _parse_response(response: Union[str, object], names: avro.name.Names, validate_names: bool = True) -> avro.schema.Schema:
return (isinstance(response, str) and names.get_name(response)) or avro.schema.make_avsc_object(response, names, validate_names=validate_names)
def _parse_errors(errors: Sequence[str], names: avro.name.Names, validate_names: bool = True) -> avro.schema.ErrorUnionSchema:
"""Even if errors is empty, we still want an ErrorUnionSchema with "string" in it."""
if not isinstance(errors, Sequence):
raise avro.errors.ProtocolParseException(f"Errors property not a list: {errors}")
errors_for_parsing = {"type": "error_union", "declared_errors": errors}
return cast(avro.schema.ErrorUnionSchema, avro.schema.make_avsc_object(errors_for_parsing, names, validate_names=validate_names))
def make_avpr_object(json_data: "ProtocolObject", validate_names: bool = True) -> Protocol:
"""Build Avro Protocol from data parsed out of JSON string."""
if not hasattr(json_data, "get"):
raise avro.errors.ProtocolParseException(f"Not a JSON object: {json_data}")
name = json_data["protocol"]
namespace = json_data.get("namespace")
types = json_data.get("types")
messages = json_data.get("messages")
return Protocol(name, namespace, types, messages, validate_names)
def parse(json_string: str, validate_names: bool = True) -> Protocol:
"""Constructs the Protocol from the JSON text."""
try:
protocol_object = json.loads(json_string)
except ValueError:
raise avro.errors.ProtocolParseException(f"Error parsing JSON: {json_string}")
return make_avpr_object(protocol_object, validate_names)
def _parse_types(types: Sequence[str], type_names: avro.name.Names, validate_names: bool = True) -> Sequence[avro.schema.NamedSchema]:
schemas = []
for type_ in types:
schema = avro.schema.make_avsc_object(type_, type_names, validate_names=validate_names)
if isinstance(schema, avro.schema.NamedSchema):
schemas.append(schema)
continue
raise avro.errors.ProtocolParseException(f"Type {type_} not an enum, fixed, record, or error.")
return schemas
def _parse_messages(message_objects: Mapping[str, "MessageObject"], names: avro.name.Names, validate_names: bool = True) -> Mapping[str, Message]:
messages = {}
for name, body in message_objects.items():
if not hasattr(body, "get"):
raise avro.errors.ProtocolParseException(f'Message name "{name}" has non-object body {body}.')
request = body["request"]
response = body["response"]
errors = body.get("errors")
messages[name] = Message(name, request, response, errors, names, validate_names=validate_names)
return messages
| Message |
python | pypa__warehouse | tests/unit/utils/test_paginate.py | {
"start": 1592,
"end": 1872
} | class ____:
def __init__(self, fake):
self.fake = fake
self.range = slice(None)
def __getitem__(self, range):
self.range = range
return self
def execute(self):
return FakeResult6(self.fake[self.range], len(self.fake))
| FakeQuery6 |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-vllm/llama_index/llms/vllm/base.py | {
"start": 977,
"end": 10713
} | class ____(LLM):
r"""
Vllm LLM.
This class runs a vLLM model locally.
Examples:
`pip install llama-index-llms-vllm`
```python
from llama_index.llms.vllm import Vllm
# specific functions to format for mistral instruct
def messages_to_prompt(messages):
prompt = "\n".join([str(x) for x in messages])
return f"<s>[INST] {prompt} [/INST] </s>\n"
def completion_to_prompt(completion):
return f"<s>[INST] {completion} [/INST] </s>\n"
llm = Vllm(
model="mistralai/Mistral-7B-Instruct-v0.1",
tensor_parallel_size=4,
max_new_tokens=256,
vllm_kwargs={"swap_space": 1, "gpu_memory_utilization": 0.5},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
)
llm.complete(
"What is a black hole?"
)
```
"""
model: Optional[str] = Field(description="The HuggingFace Model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
tensor_parallel_size: Optional[int] = Field(
default=1,
description="The number of GPUs to use for distributed execution with tensor parallelism.",
)
trust_remote_code: Optional[bool] = Field(
default=True,
description="Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.",
)
n: int = Field(
default=1,
description="Number of output sequences to return for the given prompt.",
)
best_of: Optional[int] = Field(
default=None,
description="Number of output sequences that are generated from the prompt.",
)
presence_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on whether they appear in the generated text so far.",
)
frequency_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on their frequency in the generated text so far.",
)
top_p: float = Field(
default=1.0,
description="Float that controls the cumulative probability of the top tokens to consider.",
)
top_k: int = Field(
default=-1,
description="Integer that controls the number of top tokens to consider.",
)
stop: Optional[List[str]] = Field(
default=None,
description="List of strings that stop the generation when they are generated.",
)
ignore_eos: bool = Field(
default=False,
description="Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.",
)
max_new_tokens: int = Field(
default=512,
description="Maximum number of tokens to generate per output sequence.",
)
logprobs: Optional[int] = Field(
default=None,
description="Number of log probabilities to return per output token.",
)
dtype: str = Field(
default="auto",
description="The data type for the model weights and activations.",
)
download_dir: Optional[str] = Field(
default=None,
description="Directory to download and load the weights. (Default to the default cache dir of huggingface)",
)
vllm_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Holds any model parameters valid for `vllm.LLM` call not explicitly specified.",
)
api_url: str = Field(description="The api url for vllm server")
is_chat_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "facebook/opt-125m",
temperature: float = 1.0,
tensor_parallel_size: int = 1,
trust_remote_code: bool = False,
n: int = 1,
best_of: Optional[int] = None,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
top_p: float = 1.0,
top_k: int = -1,
stop: Optional[List[str]] = None,
ignore_eos: bool = False,
max_new_tokens: int = 512,
logprobs: Optional[int] = None,
dtype: str = "auto",
download_dir: Optional[str] = None,
vllm_kwargs: Dict[str, Any] = {},
api_url: Optional[str] = "",
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
is_chat_model: Optional[bool] = False,
) -> None:
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
temperature=temperature,
n=n,
best_of=best_of,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
top_p=top_p,
top_k=top_k,
stop=stop,
ignore_eos=ignore_eos,
max_new_tokens=max_new_tokens,
logprobs=logprobs,
dtype=dtype,
download_dir=download_dir,
vllm_kwargs=vllm_kwargs,
api_url=api_url,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
is_chat_model=is_chat_model,
)
if not api_url:
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
self._client = VLLModel(
model=model,
tensor_parallel_size=tensor_parallel_size,
trust_remote_code=trust_remote_code,
dtype=dtype,
download_dir=download_dir,
**vllm_kwargs,
)
else:
self._client = None
@classmethod
def class_name(cls) -> str:
return "Vllm"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(model_name=self.model, is_chat_model=self.is_chat_model)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"max_tokens": self.max_new_tokens,
"n": self.n,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"best_of": self.best_of,
"ignore_eos": self.ignore_eos,
"stop": self.stop,
"logprobs": self.logprobs,
"top_k": self.top_k,
"top_p": self.top_p,
}
return {**base_kwargs}
@atexit.register
def close():
import torch
import gc
if torch.cuda.is_available():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
kwargs = kwargs if kwargs else {}
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
from vllm import SamplingParams
# build sampling parameters
sampling_params = SamplingParams(**params)
outputs = self._client.generate([prompt], sampling_params)
return CompletionResponse(text=outputs[0].outputs[0].text)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise (ValueError("Not Implemented"))
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise (ValueError("Not Implemented"))
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
kwargs = kwargs if kwargs else {}
return self.chat(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise (ValueError("Not Implemented"))
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise (ValueError("Not Implemented"))
| Vllm |
python | huggingface__transformers | src/transformers/models/udop/tokenization_udop.py | {
"start": 8602,
"end": 50619
} | class ____(TokenizersBackend):
"""
Construct a "fast" UDOP tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
sep_token_box (`list[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
extra_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Extra special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
eos_token="</s>",
sep_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
extra_special_tokens=None,
vocab=None,
**kwargs,
):
if "additional_special_tokens" in kwargs and "extra_special_tokens" not in kwargs:
kwargs["extra_special_tokens"] = kwargs.pop("additional_special_tokens")
if extra_special_tokens is not None:
kwargs["extra_special_tokens"] = extra_special_tokens
if vocab is None:
vocab_scores = [(str(pad_token), 0.0), (str(eos_token), 0.0), (str(unk_token), 0.0), ("▁", -2.0)]
elif isinstance(vocab, dict):
vocab_scores = [(str(token), float(score)) for token, score in vocab.items()]
elif isinstance(vocab, list) and len(vocab) > 0:
if isinstance(vocab[0], (tuple, list)):
vocab_scores = [(str(token), float(score)) for token, score in vocab]
else:
vocab_scores = [(str(token), 0.0) for token in vocab]
unk_id = 2
for idx, (token, _) in enumerate(vocab_scores):
if token == str(unk_token):
unk_id = idx
break
self._tokenizer = Tokenizer(
Unigram(
vocab_scores,
unk_id=unk_id,
byte_fallback=False,
)
)
self._tokenizer.normalizer = None
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.WhitespaceSplit(),
pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always", split=True),
]
)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always", split=True)
super().__init__(
tokenizer_object=self._tokenizer,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
**kwargs,
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=["$A", "</s>"],
pair=["$A", "</s>", "$B", "</s>"],
special_tokens=[
("</s>", self.eos_token_id),
],
)
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
self.init_kwargs["vocab"] = vocab
self._tokenizer.encode_special_tokens = self.split_special_tokens
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None,
boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
**kwargs,
) -> BatchEncoding:
if text is None and text_target is None:
raise ValueError("You need to specify either `text` or `text_target`.")
if text is not None:
# The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
# input mode in this case.
if not self._in_target_context_manager and hasattr(self, "_switch_to_input_mode"):
self._switch_to_input_mode()
encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs)
if text_target is not None:
if hasattr(self, "_switch_to_target_mode"):
self._switch_to_target_mode()
target_encodings = self._encode_plus(
text=text_target,
text_pair=text_pair_target,
**kwargs,
)
# Leave back tokenizer in input mode
if hasattr(self, "_switch_to_input_mode"):
self._switch_to_input_mode()
if text_target is None:
return encodings
elif text is None:
return target_encodings
else:
encodings["labels"] = target_encodings["input_ids"]
return encodings
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def call_boxes(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None,
boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`list[list[int]]`, `list[list[list[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`list[int]`, `list[list[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `list[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"words must of type `list[str]` (single pretokenized example), "
"or `list[list[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must of type `list[str]` (single pretokenized example), "
"or `list[list[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes")
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
self._tokenizer.encode_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens)
encodings = self._tokenizer.encode_batch(
batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
)
return encodings[0].tokens
def batch_encode_plus_boxes(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
details in `encode_plus`).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus_boxes(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
)
# Convert encoding to dict
# `Tokens` has type: tuple[
# list[dict[str, list[list[int]]]] or list[dict[str, 2D-Tensor]],
# list[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=True
if word_labels is not None
else return_offsets_mapping, # we use offsets to create the labels
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
# create the token boxes
token_boxes = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_encodings[batch_index].sequence_ids,
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
else:
if id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError("Id not recognized")
token_boxes.append(token_boxes_example)
sanitized_tokens["bbox"] = token_boxes
# optionally, create the labels
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
labels_example = []
previous_token_empty = False
for id, offset, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_tokens["offset_mapping"][batch_index],
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0 and not previous_token_empty:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
if self.decode(id) == "":
previous_token_empty = True
else:
previous_token_empty = False
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens["labels"] = labels
# finally, remove offsets if the user didn't want them
if not return_offsets_mapping:
del sanitized_tokens["offset_mapping"]
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus_boxes(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# make it a batched input
# 2 options:
# 1) only text, in case text must be a list of str
# 2) text + text_pair, in which case text = str and text_pair a list of str
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus_boxes(
batched_input,
is_pair=bool(text_pair is not None),
boxes=batched_boxes,
word_labels=batched_word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
def encode_boxes(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> list[int]:
"""
Args:
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing
`self.convert_tokens_to_ids(self.tokenize(text))`.
text (`str`, `list[str]` or `list[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
encoded_inputs = self.encode_plus_boxes(
text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def encode_plus_boxes(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences.
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
Args:
text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _pad(
self,
encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.sep_token_id]
sep = [self.sep_token_id]
return token_ids_0 + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0]
return len(token_ids_0 + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
"""
Save the tokenizer vocabulary files. For TokenizersBackend, the tokenizer.json file is saved
by the base class. This method returns an empty tuple since we only use tokenizer.json.
"""
# The base class handles saving tokenizer.json in _save_pretrained
# We don't need to save vocab_file since we only use tokenizer.json
return ()
__all__ = ["UdopTokenizer"]
| UdopTokenizer |
python | doocs__leetcode | solution/1300-1399/1370.Increasing Decreasing String/Solution.py | {
"start": 0,
"end": 331
} | class ____:
def sortString(self, s: str) -> str:
cnt = Counter(s)
cs = ascii_lowercase + ascii_lowercase[::-1]
ans = []
while len(ans) < len(s):
for c in cs:
if cnt[c]:
ans.append(c)
cnt[c] -= 1
return "".join(ans)
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/embeddings/embeddings.py | {
"start": 132,
"end": 2405
} | class ____(ABC):
"""Interface for embedding models.
This is an interface meant for implementing text embedding models.
Text embedding models are used to map text to a vector (a point in n-dimensional
space).
Texts that are similar will usually be mapped to points that are close to each
other in this space. The exact details of what's considered "similar" and how
"distance" is measured in this space are dependent on the specific embedding model.
This abstraction contains a method for embedding a list of documents and a method
for embedding a query text. The embedding of a query text is expected to be a single
vector, while the embedding of a list of documents is expected to be a list of
vectors.
Usually the query embedding is identical to the document embedding, but the
abstraction allows treating them independently.
In addition to the synchronous methods, this interface also provides asynchronous
versions of the methods.
By default, the asynchronous methods are implemented using the synchronous methods;
however, implementations may choose to override the asynchronous methods with
an async native implementation for performance reasons.
"""
@abstractmethod
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: List of text to embed.
Returns:
List of embeddings.
"""
@abstractmethod
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: Text to embed.
Returns:
Embedding.
"""
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
"""Asynchronous Embed search docs.
Args:
texts: List of text to embed.
Returns:
List of embeddings.
"""
return await run_in_executor(None, self.embed_documents, texts)
async def aembed_query(self, text: str) -> list[float]:
"""Asynchronous Embed query text.
Args:
text: Text to embed.
Returns:
Embedding.
"""
return await run_in_executor(None, self.embed_query, text)
| Embeddings |
python | aimacode__aima-python | mdp.py | {
"start": 13120,
"end": 16758
} | class ____:
"""Matrix operations class"""
@staticmethod
def add(A, B):
"""Add two matrices A and B"""
res = []
for i in range(len(A)):
row = []
for j in range(len(A[0])):
row.append(A[i][j] + B[i][j])
res.append(row)
return res
@staticmethod
def scalar_multiply(a, B):
"""Multiply scalar a to matrix B"""
for i in range(len(B)):
for j in range(len(B[0])):
B[i][j] = a * B[i][j]
return B
@staticmethod
def multiply(A, B):
"""Multiply two matrices A and B element-wise"""
matrix = []
for i in range(len(B)):
row = []
for j in range(len(B[0])):
row.append(B[i][j] * A[j][i])
matrix.append(row)
return matrix
@staticmethod
def matmul(A, B):
"""Inner-product of two matrices"""
return [[sum(ele_a * ele_b for ele_a, ele_b in zip(row_a, col_b)) for col_b in list(zip(*B))] for row_a in A]
@staticmethod
def transpose(A):
"""Transpose a matrix"""
return [list(i) for i in zip(*A)]
def pomdp_value_iteration(pomdp, epsilon=0.1):
"""Solving a POMDP by value iteration."""
U = {'': [[0] * len(pomdp.states)]}
count = 0
while True:
count += 1
prev_U = U
values = [val for action in U for val in U[action]]
value_matxs = []
for i in values:
for j in values:
value_matxs.append([i, j])
U1 = defaultdict(list)
for action in pomdp.actions:
for u in value_matxs:
u1 = Matrix.matmul(Matrix.matmul(pomdp.t_prob[int(action)],
Matrix.multiply(pomdp.e_prob[int(action)], Matrix.transpose(u))),
[[1], [1]])
u1 = Matrix.add(Matrix.scalar_multiply(pomdp.gamma, Matrix.transpose(u1)), [pomdp.rewards[int(action)]])
U1[action].append(u1[0])
U = pomdp.remove_dominated_plans_fast(U1)
# replace with U = pomdp.remove_dominated_plans(U1) for accurate calculations
if count > 10:
if pomdp.max_difference(U, prev_U) < epsilon * (1 - pomdp.gamma) / pomdp.gamma:
return U
__doc__ += """
>>> pi = best_policy(sequential_decision_environment, value_iteration(sequential_decision_environment, .01))
>>> sequential_decision_environment.to_arrows(pi)
[['>', '>', '>', '.'], ['^', None, '^', '.'], ['^', '>', '^', '<']]
>>> from utils import print_table
>>> print_table(sequential_decision_environment.to_arrows(pi))
> > > .
^ None ^ .
^ > ^ <
>>> print_table(sequential_decision_environment.to_arrows(policy_iteration(sequential_decision_environment)))
> > > .
^ None ^ .
^ > ^ <
""" # noqa
"""
s = { 'a' : { 'plan1' : [(0.2, 'a'), (0.3, 'b'), (0.3, 'c'), (0.2, 'd')],
'plan2' : [(0.4, 'a'), (0.15, 'b'), (0.45, 'c')],
'plan3' : [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')],
},
'b' : { 'plan1' : [(0.2, 'a'), (0.6, 'b'), (0.2, 'c'), (0.1, 'd')],
'plan2' : [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan3' : [(0.3, 'a'), (0.3, 'b'), (0.4, 'c')],
},
'c' : { 'plan1' : [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan2' : [(0.5, 'a'), (0.3, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan3' : [(0.1, 'a'), (0.3, 'b'), (0.1, 'c'), (0.5, 'd')],
},
}
"""
| Matrix |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 6537,
"end": 7620
} | class ____(FunctionPass):
_name = "inline_closure_likes"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
# Ensure we have an IR and type information.
assert state.func_ir
# if the return type is a pyobject, there's no type info available and
# no ability to resolve certain typed function calls in the array
# inlining code, use this variable to indicate
typed_pass = not isinstance(state.return_type, types.misc.PyObject)
from numba.core.inline_closurecall import InlineClosureCallPass
inline_pass = InlineClosureCallPass(
state.func_ir,
state.flags.auto_parallel,
state.parfor_diagnostics.replaced_fns,
typed_pass)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(state.func_ir)
post_proc.run()
fixup_var_define_in_scope(state.func_ir.blocks)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| InlineClosureLikes |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 55128,
"end": 56242
} | class ____:
"""The type of the Python representation of the JavaScript null object"""
def __new__(cls):
return jsnull
def __repr__(self):
return "jsnull"
def __bool__(self):
return False
typeof = "object"
#: The Python representation of the JavaScript null object.
jsnull: JsNull = object.__new__(JsNull)
from json import encoder
encoder._JSNULL = jsnull # type:ignore[attr-defined]
__all__ = [
"ConversionError",
"InternalError",
"JsArray",
"JsAsyncGenerator",
"JsAsyncIterable",
"JsAsyncIterator",
"JsBuffer",
"JsCallableDoubleProxy",
"JsDoubleProxy",
"JsException",
"JsFetchResponse",
"JsGenerator",
"JsIterable",
"JsIterator",
"JsMap",
"JsMutableMap",
"JsPromise",
"JsProxy",
"JsDomElement",
"JsCallable",
"JsOnceCallable",
"JsTypedArray",
"JsWeakRef",
"ToJsConverter",
"run_sync",
"can_run_sync",
"create_once_callable",
"create_proxy",
"destroy_proxies",
"to_js",
"JsNull",
"jsnull",
]
__name__ = _save_name
del _save_name
| JsNull |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_first_valid_index.py | {
"start": 158,
"end": 2575
} | class ____:
def test_first_valid_index_single_nan(self, frame_or_series):
# GH#9752 Series/DataFrame should both return None, not raise
obj = frame_or_series([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
@pytest.mark.parametrize(
"empty", [DataFrame(), Series(dtype=object), Series([], index=[], dtype=object)]
)
def test_first_valid_index_empty(self, empty):
# GH#12800
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid_frame(self, data, idx, expected_first, expected_last):
# GH#21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize(
"index",
[Index([str(i) for i in range(20)]), date_range("2020-01-01", periods=20)],
)
def test_first_last_valid(self, index):
mat = np.random.default_rng(2).standard_normal(len(index))
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=index)
assert frame.first_valid_index() == frame.index[5]
assert frame.last_valid_index() == frame.index[-6]
ser = frame["foo"]
assert ser.first_valid_index() == frame.index[5]
assert ser.last_valid_index() == frame.index[-6]
@pytest.mark.parametrize(
"index",
[Index([str(i) for i in range(10)]), date_range("2020-01-01", periods=10)],
)
def test_first_last_valid_all_nan(self, index):
# GH#17400: no valid entries
frame = DataFrame(np.nan, columns=["foo"], index=index)
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
ser = frame["foo"]
assert ser.first_valid_index() is None
assert ser.last_valid_index() is None
| TestFirstValidIndex |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_name/invalid_name_property.py | {
"start": 264,
"end": 573
} | class ____:
"""Test properties with attr-rgx and property-classes options."""
@property
def FOO(self):
pass
@property
def bar(self): # [invalid-name]
pass
@abc.abstractproperty
def BAZ(self):
pass
@custom_prop
def QUX(self):
pass
| FooClass |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 75000,
"end": 76263
} | class ____(
BlackwellTMATemplateConfigMixin, ScaledMMConfigMixin
):
"""
Scaled Blackwell TMA-specific mixin that extends ScaledMMConfigMixin with TMA functionality.
This is for scaled MM templates that use device TMA on Blackwell.
This inherits from ScaledMMConfigMixin, which inherits the scale_mm_epilogue, and adds TMA-specific options.
"""
# pyrefly: ignore [bad-override]
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
"""
Warp specialization-specific filtering (BlackwellTMATemplateConfigMixin)
(compilation issues occur in some versions of Triton)
- num_warps < 4 unsafe for warpspec
- num_stages < 2 unsafe for warpspec
TMA-specific filtering:
- block_k >= 32 required for TMA (requires inner-most dimension >= 32)
"""
configs = [c for c in configs if c.block_k >= 32]
return super()._filter_configs(configs)
# Template-specific heuristic classes using multiple inheritance
@register_template_heuristic(
mm_template.uid,
"cuda",
register=torch.version.hip is None,
)
@register_template_heuristic(
bmm_template.uid,
"cuda",
register=torch.version.hip is None,
)
| ScaledBlackwellTMAConfigMixin |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 5334,
"end": 5444
} | class ____(Web3Exception):
"""
Raised when the number of log topics is mismatched.
"""
| LogTopicError |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/github_client.py | {
"start": 2902,
"end": 3875
} | class ____(DataClassJsonMixin):
"""
Dataclass for the response from the Github API's getBranch endpoint.
Attributes:
- commit (Commit): Commit object for the branch.
"""
@dataclass
class Commit(DataClassJsonMixin):
"""Dataclass for the commit object in the branch. (commit.commit)."""
@dataclass
class Commit(DataClassJsonMixin):
"""Dataclass for the commit object in the commit. (commit.commit.tree)."""
@dataclass
class Tree(DataClassJsonMixin):
"""
Dataclass for the tree object in the commit.
Usage: commit.commit.tree.sha
"""
sha: str
tree: Tree
commit: Commit
@dataclass
class Links(DataClassJsonMixin):
_self: str = field(metadata=config(field_name="self"))
html: str
commit: Commit
name: str
_links: Links
| GitBranchResponseModel |
python | tensorflow__tensorflow | tensorflow/python/eager/backprop_test.py | {
"start": 2422,
"end": 54088
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
return g1 * g2 * g3
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_y = tf_g1 * tf_g2 * tf_g3
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(tf_grad.values,
tf_grad.indices,
tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
@test_util.run_in_graph_and_eager_modes
def testAggregateGradientsWithTensor(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
tf_y = tf_g1 * tf_g2
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
self.assertAllClose(grad, tf_grad)
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
@parameterized.named_parameters([('Function', def_function.function),
('NoFunction', lambda f: f)])
def testNoOpBehaviorConsistent(self, decorator):
@decorator
def f(x):
# Test all different types of no-ops
x1 = array_ops.identity(x)
x2 = math_ops.add_v2(x, 0)
x3 = math_ops.subtract(x, 0)
x4 = math_ops.multiply(x, 1)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(x1)
t.watch(x2)
t.watch(x3)
t.watch(x4)
y1 = x * 2.
y2 = x1 * 3.
y3 = x2 * 3.
y4 = x3 * 3.
y5 = x4 * 3.
loss = y1 + y2 + y3 + y4 + y5
return t.gradient(loss, [x, x1, x2, x3, x4])
self.assertAllClose([2., 3., 3., 3., 3.], f(constant_op.constant(10.)))
def testResourceHandleOutputWithoutHandleData(self):
# This is a bit of a weird thing to test since we try to maintain handle
# data. But users do create their own resources, and those often do not have
# any handle data.
h = resource_variable_ops.var_handle_op(
shape=[], dtype=dtypes.float32, shared_name='abc')
with backprop.GradientTape() as tape:
x = constant_op.constant(1.)
tape.watch(x)
tape.watch(h)
y, h = array_ops.identity_n([x, h])
self.assertAllClose(1., tape.gradient(y, x))
def testGradientInsideLoop(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
def body(_):
_ = v + 1.0 # This reads the variable inside the loop context
with backprop.GradientTape() as t:
result = v * 2
self.assertIsNotNone(t.gradient(result, v))
return 1.0
while_loop.while_loop(lambda i: False, body, [1.0])
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
def f(x):
return array_ops.where(x < 10, x, x * x)
g = backprop.gradients_function(f)
self.assertAllEqual(g(5.)[0], 1.0)
self.assertAllEqual(g(50.)[0], 100.0)
def testTwoTargets(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertAllEqual(dx, 2.0)
self.assertAllEqual(dy, 3.0)
def testCustomGradientEmptyError(self):
@custom_gradient.custom_gradient
def identity(x):
def grad(_):
return [] # This return value is wrong!
return x, grad
x = variables.Variable(1.0)
with backprop.GradientTape() as t:
y = identity(x)
with self.assertRaises(ValueError):
t.gradient(y, [x])
def test_stop_gradient_hides_downstream_ops(self):
@custom_gradient.custom_gradient
def _backward_pass_error(x):
def _grad(_):
raise AssertionError(
'Unexpectedly ran the backward function. This probably means that '
'tf.GradientTape is not properly ignoring tensors downstream of '
'tf.stop_gradient.')
return x, _grad
@def_function.function
def f(x):
return _backward_pass_error(x)
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(array_ops.stop_gradient(x))
self.assertIsNone(tape.gradient(y, x))
def testOutputGradUsedInComputation(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
loss = x * y
dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0])
self.assertAllEqual(dx, 4.0)
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testGradientInteger(self):
def f(x):
return x + x
int_tensor = constant_op.constant(1)
self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None)
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testGradientsFunctionInCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
(y,) = backprop.gradients_function(lambda x: x * x)(x)
def grad(dy):
return [2 * dy]
return y, grad
self.assertAllEqual(f(1.0), 2.0)
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with ops.Graph().as_default(), self.cached_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices, grad.indices)
self.assertAllClose(tf_grad.values, grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = self.evaluate(tf_embedding)
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testImplicitGradOrdering(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
v1 = resource_variable_ops.ResourceVariable(2.0)
def f():
x = v1 * v1
y = v0 * v0
return x + y
grads = backprop.implicit_grad(f)()
ordered_variables = [x[1] for x in grads]
self.assertIs(ordered_variables[0], v0)
self.assertIs(ordered_variables[1], v1)
def testTapeNoOpGradient(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeIdentityGradientIsIdentity(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = array_ops.identity(x)
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testFunctionIndexedSlicesGradient(self):
@def_function.function
def f(x):
return x + 1
with backprop.GradientTape() as t:
x = constant_op.constant([1.0])
t.watch(x)
y = f(x)
y = array_ops.gather(y, [0])
self.assertAllEqual(t.gradient(y, x), [1.0])
def testTapeGradientMultiTargetOneIsSource(self):
x = constant_op.constant(2.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x * x
self.assertEqual(t.gradient([x, y], x).numpy(), 5.0)
def testTapeNoOpGradientWithMultiTargetAllSource(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient([y, y], x).numpy(), 2.0)
def testTapeNoOpGradientWithMultiTargetMultiSource(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
z = y * y
self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0])
def testTapeGradientStringTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(s)
grads = t.gradient(s, x)
self.assertEqual(grads, None)
def testTapeNoOpGradientStringSourceAndTarget(self):
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(s)
grads = t.gradient(s, s)
self.assertEqual(grads, None)
def testTapeNoOpGradientWithMultiTargetMultiSourceIncludeString(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
s = constant_op.constant('unknown', dtype=dtypes.string)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
t.watch(s)
z = y * y
grads = t.gradient([x, y, z, s], [x, y, s])
self.assertAllEqual(grads[:2], [1.0, 11.0])
self.assertEqual(grads[2], None)
def testTapeNoOpOnVariableIsIdentity(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as t:
y = v0.read_value()
self.assertEqual(t.gradient(y, v0).numpy(), 1.0)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testTapeNoOpGradient2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(1.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testTapeNoOpGradientMultiTarget2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(2.0, shape=[2, 2]).numpy())
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
def testTapeStopStartRecording(self):
with backprop.GradientTape(persistent=True) as t:
x = resource_variable_ops.ResourceVariable(1.0)
x2 = x * 2 # This should be differentiated through.
with t.stop_recording():
y = x2 * x2
z = x2 * x2
self.assertEqual(t.gradient(y, x2), None)
# If the x*2 was not differentiated through, this would be 2.0, not 4.0
self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
t.reset()
loss += v * v
self.assertAllEqual(t.gradient(loss, v), 2.0)
def testPythonMax(self):
x = [
resource_variable_ops.ResourceVariable(2.),
resource_variable_ops.ResourceVariable(3.),
resource_variable_ops.ResourceVariable(5.)
]
with backprop.GradientTape() as t:
f = max(x)
grad = t.gradient(f, x)
self.assertAllEqual(self.evaluate(f), 5.)
self.assertAllEqual(self.evaluate(grad), [None, None, 1.0])
def testAutomaticWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
loss += v * v
self.assertAllEqual([v], t.watched_variables())
def testExplicitWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
@test_util.assert_no_new_tensors
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
with backprop.GradientTape(persistent=True) as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
with self.assertRaises(ValueError):
with t:
pass
@test_util.assert_no_new_tensors
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
@test_util.run_in_graph_and_eager_modes
def testWatchingIsTapeLocal(self):
x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
with backprop.GradientTape() as tape1:
with backprop.GradientTape() as tape2:
tape1.watch(x1)
tape2.watch([x1, x2])
y = x1**3
z = x2**2
dy, dz = tape2.gradient([y, z], [x1, x2])
d2y, d2z = tape1.gradient([dy, dz], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertEqual(self.evaluate(d2y), 12.0)
self.assertIsNone(d2z)
@test_util.assert_no_new_tensors
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=False)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testPersistentMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=True)
_, vjp = wrapped_fn(constant_op.constant(3.0))
vjp_result1 = vjp(2.0)[0]
vjp_result2 = vjp(2.0)[0]
self.assertAllEqual(vjp_result1, vjp_result2, 12.0)
@test_util.assert_no_new_tensors
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
@test_util.assert_no_new_tensors
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
@test_util.assert_no_new_tensors
def testStopGradient(self):
grad = backprop.gradients_function(
lambda x: array_ops.stop_gradient(math_ops.argmax(x)))
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testArgmax(self):
def argmax(x):
i = math_ops.argmax(x)
return array_ops.stop_gradient(i)
grad = backprop.gradients_function(argmax)
self.assertAllEqual(grad([0.0])[0], None)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPU(self):
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testGPUImplicitGrad(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
@test_util.assert_no_new_tensors
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyGPU2CPU2GPU(self):
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
@test_util.assert_no_new_tensors
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
def testGradientTapeReEnterContext(self):
g = backprop.GradientTape()
with g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
with g:
z = 2 * y
grad = g.gradient(target=z, sources=[x])
self.assertEqual(self.evaluate(grad), [4.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
grad = g.gradient(target=y, sources=[x, x])
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
g.watch(x)
g.watch(y)
z = x * x + x * y
grad = g.gradient(target=z, sources=[x, x])
self.assertEqual(self.evaluate(grad), [11.0, 11.0])
grad = g.gradient(target=z, sources=[y, x])
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
# cached, leading to a different gradient then what one might expect.
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.1)
x3 = constant_op.constant(3.2)
g.watch(x1)
g.watch(x2)
g.watch(x3)
y = x1 + 2 * x2 + 3 * x3
self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0])
self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,))
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0))
self.assertEqual(
self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])), [(1.0, 2.0),
(2.0, 3.0)])
self.assertEqual(
self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))),
(1.0, 2.0, [1.0, 3.0]))
self.assertEqual(
self.evaluate(g.gradient(y, [x1, {
'x2': x2,
'x3': x3
}])), [1.0, {
'x2': 2.0,
'x3': 3.0
}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGadientTapeCalledOnConstantTarget(self):
with backprop.GradientTape() as g:
x = variables.Variable([3.0])
y = variables.Variable([2.0])
grad = g.gradient(x, y)
self.assertAllEqual(grad, None)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
def true_fn():
return x
def false_fn():
return x * x
with backprop.GradientTape() as g:
g.watch(x)
y = tf_cond.cond(x < x, true_fn, false_fn)
if not context.executing_eagerly():
with self.assertRaisesRegex(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
def cond(i, _):
return i < 3
def body(i, x):
return i + 1, x * 2
with backprop.GradientTape() as g:
g.watch([x])
_, y = while_loop.while_loop(cond, body, [i, x])
if not context.executing_eagerly():
with self.assertRaisesRegex(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 4.0)
@test_util.assert_no_new_tensors
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegex(
RuntimeError, 'A non-persistent GradientTape can only'):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
def testGradientTapeJacobianCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.jacobian(z, [x])
with self.assertRaisesRegex(
RuntimeError, 'A non-persistent GradientTape can only'):
g.jacobian(y, [x])
@test_util.assert_no_new_tensors
def testJacobianInsideGradientTapeScope(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
self.assertAllClose(4. * 3. ** 3., g.jacobian(z, x))
@test_util.assert_no_new_tensors
def testBatchJacobianInsideGradientTapeScope(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[3.0]])
g.watch(x)
y = x * x
z = y * y
self.assertAllClose([[[4. * 3. ** 3.]]], g.batch_jacobian(z, x))
def testBatchJacobianParallelIterations(self):
@def_function.function
def f(persistent):
with backprop.GradientTape(persistent=persistent) as t:
x = constant_op.constant([[3.0]])
t.watch(x)
y = x * x
z = array_ops.tile(y * y, [1, 16])
return t.batch_jacobian(z, x, parallel_iterations=8)
with self.assertRaisesRegex(RuntimeError,
'persistent=True.*parallel_iterations'):
f(persistent=False)
self.assertAllClose([[[4. * 3. ** 3.]] * 16], f(persistent=True))
@test_util.assert_no_new_tensors
def testGradientTapeBatchJacobianCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant([[3.0]])
g.watch(x)
y = x * x
z = y * y
g.batch_jacobian(z, x)
with self.assertRaisesRegex(
RuntimeError, 'A non-persistent GradientTape can only'):
g.batch_jacobian(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3)
dy_dx = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy_dx), 2 * 3)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x**3 # y := x^3
dy_dx = g.gradient(y, x) # dy/dx := 3x^2
d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x
d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6
x = 3
self.assertAllClose(self.evaluate(y), x**3)
self.assertEqual(self.evaluate(dy_dx), 3 * x**2)
self.assertEqual(self.evaluate(d2y_dx2), 6 * x)
self.assertEqual(self.evaluate(d3y_dx3), 6)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape(persistent=True) as gg:
gg.watch(y)
z = 2 * y
for _ in range(2):
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
del gg
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
grad = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(grad), 12.0)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch(x)
y = x * x
z = y * y
dz_dx, dz_dy = g.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 108.0)
self.assertEqual(self.evaluate(dz_dy), 18.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsDefault(self):
x = constant_op.constant(1.0)
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x)
self.assertEqual(dz_dx, None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsZeros(self):
x = constant_op.constant(1.0, shape=[2, 2])
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsVariablesZeros(self):
x = resource_variable_ops.ResourceVariable(
constant_op.constant(1., shape=[2, 2]))
self.evaluate(x.initializer)
y = resource_variable_ops.ResourceVariable(constant_op.constant(3.))
self.evaluate(y.initializer)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.run_in_graph_and_eager_modes
def testUnknownUnconnectedGradientsValueGiven(self):
x = constant_op.constant(1.0)
y = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
with self.assertRaisesRegex(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
g.gradient(z, x, unconnected_gradients='nonsense')
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsNestedDefunZeros(self):
@def_function.function
def f(x):
return x * x
@def_function.function
def h(y):
z = f(y)
return array_ops.stop_gradient(z)
x = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch(x)
k = x + 2.
y = h(k)
dy_dx = g.gradient(y, x, unconnected_gradients='zero')
self.assertEqual(0.0, self.evaluate(dy_dx))
def testInvalidRecordOperationMessage(self):
y = constant_op.constant(2.)
x = constant_op.constant(1.)
with backprop.GradientTape() as g:
g.watch(x)
record.record_operation('InvalidBackprop', [y], [x], lambda dy: [])
with self.assertRaisesRegex(errors_impl.InternalError,
'InvalidBackprop.*too few gradients'):
g.gradient(y, x)
@test_util.assert_no_new_tensors
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
@test_util.assert_no_new_tensors
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
@test_util.run_gpu_only
@test_util.assert_no_new_tensors
def testTensorCopyCPU2GPU2CPU(self):
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, int(pywrap_tfe.TF_ATTR_TYPE))
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [int(pywrap_tfe.TF_ATTR_INT)])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(int(pywrap_tfe.TF_ATTR_TYPE), 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([int(pywrap_tfe.TF_ATTR_TYPE)], [1]))
def testMakeAttrString(self):
self.assertEqual(b'a',
backprop.make_attr(int(pywrap_tfe.TF_ATTR_STRING), 'a'))
def testMakeAttrStringList(self):
self.assertEqual(
[b'a'], backprop.make_attr([int(pywrap_tfe.TF_ATTR_STRING)], ['a']))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(backprop.gradients_function(mul)(3.0)[0].numpy(), 6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(int(pywrap_tfe.TF_ATTR_SHAPE), s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' %
(s, expected, actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([int(pywrap_tfe.TF_ATTR_SHAPE)], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0], 2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
@test_util.assert_no_new_tensors
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
@test_util.assert_no_new_tensors
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops_stack.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
@test_util.assert_no_new_tensors
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3., name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
def testJacobianCustomGradient(self):
class MyCallable(object):
def __init__(self):
self.a = variables.Variable(1.)
self.b = variables.Variable(2.)
self.c = variables.Variable(3.)
def __call__(self, x):
return self.a * x * x + self.b * x + self.c
@def_function.function
def call(c, x):
@custom_gradient.custom_gradient
def _call():
y = c(x)
def grad(dy, variables=None): # pylint: disable=redefined-outer-name
with backprop.GradientTape(persistent=True) as g:
g.watch(variables)
y = c(x)
grad_vars = [
2 * math_ops.reduce_sum(dy * g.jacobian(y, v)) for v in variables
]
del g
return (), grad_vars
return y, grad
return _call()
c = MyCallable()
x = constant_op.constant([1., 2., 3.])
with backprop.GradientTape(persistent=True) as g:
g.watch([c.a, c.b, c.c])
y = call(c, x)
self.assertAllEqual(g.gradient(y, x), None)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x * y
def grad(dr):
return [dr * y, dr * x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr * grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
@test_util.assert_no_new_tensors
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x * y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegex(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegex(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
def testZerosCacheDoesntLeakAcrossGraphs(self):
with ops.Graph().as_default():
def get_grad():
with ops.Graph().as_default(), self.cached_session():
t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
with backprop.GradientTape() as tape:
tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1**2
y = array_ops.concat([y1, t], axis=1)
return self.evaluate(tape.gradient(y, x))
grad1 = get_grad()
grad2 = get_grad()
self.assertAllEqual(grad1, grad2)
@test_util.run_in_graph_and_eager_modes
def testSelectivelyWatchVariables(self):
x1 = resource_variable_ops.ResourceVariable(1.0)
x2 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x2)
y = x1**2
z = x2**3
self.assertTupleEqual(tape.watched_variables(), (x2,))
dy, dz = tape.gradient([y, z], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertIsNone(dy)
self.assertEqual(self.evaluate(dz), 3.0)
@test_util.run_in_graph_and_eager_modes
def testDifferentiatingScalarCache(self):
# In the following test, if x2 = x1 (i.e the objects are the exact same),
# then y is essentially, 2*x1, and dy/dx1 = 2.
# When we had a pure scalar cache in eager, this would be the case. This
# test prevents us from going back to that case.
with backprop.GradientTape(persistent=False) as g:
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.0)
g.watch(x1)
g.watch(x2)
y = x1 + x2
grad = g.gradient(target=y, sources=[x1])
self.assertEqual(self.evaluate(grad), [1.0])
def testVariablesAndConstantsProduceTheSameGradients(self):
# In the following test, differentiating [y, z] against [a, b] gives:
# (dy/da + dz/da, dy/db + dz/db).
# If a and b are the same constant, dz/da will not be 0 (which it should
# be).
# This is solved by using variable since doing a read_value on a tensor will
# produce a new tensor and corresponding TensorHandle, and not reuse the
# same tensor (which would happen if we are using a cache and reusing
# EagerTensor objects).
def get_grads(a, b):
with backprop.GradientTape() as tape:
tape.watch([a, b])
y = a**3
z = b**2
return tape.gradient([y, z], [a, b])
gradients_constants = get_grads(
constant_op.constant(2.0), constant_op.constant(2.0))
gradients_variables = get_grads(
resource_variable_ops.ResourceVariable(2.0),
resource_variable_ops.ResourceVariable(2.0))
self.assertAllEqual(gradients_constants, gradients_variables)
def testUnknownShapes(self):
with ops.Graph().as_default():
with backprop.GradientTape() as tape:
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
tape.watch(a)
b = a**3
db_da = tape.gradient(b, a)
with self.cached_session() as sess:
self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0}))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientInEagerAndGraph(self):
@custom_gradient.custom_gradient
def f(x):
y = x * x
def grad(dy):
return [4 * dy]
return y, grad
with backprop.GradientTape() as t:
c = constant_op.constant(1.0)
t.watch(c)
g = f(c)
self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0)
def testOverrideSecondOrderWithCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
def first_order_grad(dz):
@custom_gradient.custom_gradient
def first_order_custom(unused_x):
def h(ddz):
return -2.1 * ddz
return -1.1, h
return dz * first_order_custom(x)
return x + 10., first_order_grad
c = constant_op.constant(1.)
with backprop.GradientTape() as outer:
outer.watch(c)
with backprop.GradientTape() as inner:
inner.watch(c)
d = f(c)**4.
dd = inner.gradient(d, c)
self.assertAllClose(4. * f(c)**3. * -1.1, dd)
self.assertAllClose(3. * 4. * f(c)**2. * -1.1 * -1.1 + 4. * f(c)**3. * -2.1,
outer.gradient(dd, c))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientForwardprop(self):
@custom_gradient.custom_gradient
def f(x):
z = 2. * tensor_util.constant_value(x)
def g(dz):
@custom_gradient.custom_gradient
def first_order(unused_x, unused_dz):
def second_order_and_transpose(unused_ddz):
return 2.2, 3.1
return 2.1, second_order_and_transpose
return first_order(x, dz)
return z, g
with backprop.GradientTape(persistent=True) as t:
with backprop.GradientTape() as tt:
c = constant_op.constant(1.)
t.watch(c)
tt.watch(c)
output_grad = array_ops.ones([])
t.watch(output_grad)
output = f(c)
self.assertAllClose(2., output)
gc = tt.gradient(output, c, output_gradients=output_grad)
self.assertAllClose(2.1, gc)
ggc = t.gradient(gc, c)
self.assertAllClose(2.2, ggc)
# Note that executed eagerly this kind of transpose is not efficient. But
# from a tf.function we could prune out the first-order gradient
# computation.
transpose = t.gradient(gc, output_grad)
self.assertAllClose(3.1, transpose)
@test_util.run_in_graph_and_eager_modes
def testWatchBadThing(self):
g = backprop.GradientTape()
with self.assertRaisesRegex(ValueError, 'ndarray'):
g.watch(np.array(1.))
def testWatchComposite(self):
"""Test that tape.watch expands composites and watches component Tensors."""
with backprop.GradientTape() as t:
values = constant_op.constant([1.0, 2.0], dtypes.float32)
s = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=values, dense_shape=[3, 4])
t.watch(s)
z = sparse_ops.sparse_reduce_sum_v2(s)
result = t.gradient(z, values)
self.assertAllEqual(result, [1.0, 1.0])
def testWatchedVariablesAfterNonPersistentGradientCall(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
tape.gradient(x, x)
self.assertEqual((x,), tape.watched_variables())
def testWatchedVariablesOnlyHasVariablesFromLastTape(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
with backprop.GradientTape(persistent=False) as tape:
z = resource_variable_ops.ResourceVariable(2.0)
tape.watch(z)
tape.gradient(z, z)
self.assertEqual((z,), tape.watched_variables())
def testWatchedVariablesRespectReset(self):
with backprop.GradientTape(persistent=False) as tape:
x = resource_variable_ops.ResourceVariable(1.0)
tape.watch(x)
self.assertEqual((x,), tape.watched_variables())
tape.reset()
z = resource_variable_ops.ResourceVariable(2.0)
tape.watch(z)
self.assertEqual((z,), tape.watched_variables())
tape.gradient(z, z)
self.assertEqual((z,), tape.watched_variables())
def testNameScope(self):
def fn(x):
with ops.name_scope('my_scope'):
a = math_ops.cos(x)
b = math_ops.cos(x)
return math_ops.add(a, b)
@def_function.function
def grad_fn(x):
return backprop.gradients_function(fn)(x)
grad_ops = grad_fn.get_concrete_function(
constant_op.constant(1.0)).graph.get_operations()
num_sin_ops_found = 0
for op in grad_ops:
if op.type == 'Sin':
num_sin_ops_found += 1
self.assertIn('gradient_tape/my_scope/', op.name)
self.assertEqual(num_sin_ops_found, 2)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testRecomputeGradWithDifferentShape(self):
@custom_gradient.recompute_grad
def outer(x):
return [x[0] + 1, x[1] + 1]
x = [
variables.Variable([1.0, 2.0], name='a'),
variables.Variable(1.0, name='b')
]
with backprop.GradientTape():
y = outer(x)
self.assertAllEqual(y[0], [2.0, 3.0])
self.assertAllEqual(y[1], 2.0)
@custom_gradient.recompute_grad
def outer_dict(x):
for key in x.keys():
x[key] = x[key] + 1
return x
x = {x[0].ref(): x[0], x[1].ref(): x[1]}
with backprop.GradientTape():
y = outer_dict(x)
y = list(y.values())
self.assertAllEqual(y[0], [2.0, 3.0])
self.assertAllEqual(y[1], 2.0)
@parameterized.parameters([(True), (False)])
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testRecomputeGradWithNestedFunctionAndWhileLoop(self, reduce_retracing):
@custom_gradient.recompute_grad
@def_function.function(reduce_retracing=reduce_retracing)
def outer(x):
@def_function.function(reduce_retracing=reduce_retracing)
def middle(y):
@def_function.function(reduce_retracing=reduce_retracing)
def inner(z):
return z + 1
i = constant_op.constant(0.0)
c = lambda y, i: i < 10.
b = lambda y, i: (inner(y), i + 1.0)
y, i = while_loop.while_loop(c, b, [y, i])
return y
return middle(x)
with MemoryChecker() as memory_checker:
for _ in range(5):
x = variables.Variable(1.0, name='x')
with backprop.GradientTape():
y = outer(x)
self.assertAllEqual(y, 11.0)
memory_checker.report()
memory_checker.assert_no_leak_if_all_possibly_except_one()
| BackpropTest |
python | doocs__leetcode | solution/1900-1999/1971.Find if Path Exists in Graph/Solution3.py | {
"start": 563,
"end": 824
} | class ____:
def validPath(
self, n: int, edges: List[List[int]], source: int, destination: int
) -> bool:
uf = UnionFind(n)
for u, v in edges:
uf.union(u, v)
return uf.find(source) == uf.find(destination)
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/tracers/root_listeners.py | {
"start": 599,
"end": 2314
} | class ____(BaseTracer):
"""Tracer that calls listeners on run start, end, and error."""
log_missing_parent = False
"""Whether to log a warning if the parent is missing."""
def __init__(
self,
*,
config: RunnableConfig,
on_start: Listener | None,
on_end: Listener | None,
on_error: Listener | None,
) -> None:
"""Initialize the tracer.
Args:
config: The runnable config.
on_start: The listener to call on run start.
on_end: The listener to call on run end.
on_error: The listener to call on run error
"""
super().__init__(_schema_format="original+chat")
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: UUID | None = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
elif self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
| RootListenersTracer |
python | getsentry__sentry | src/sentry/relocation/api/endpoints/abort.py | {
"start": 706,
"end": 2429
} | class ____(Endpoint):
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
# TODO(getsentry/team-ospo#214): Stabilize before GA.
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (SuperuserOrStaffFeatureFlaggedPermission,)
def put(self, request: Request, relocation_uuid: str) -> Response:
"""
Immediately aborts an in-progress relocation.
``````````````````````````````````````````````````
This operation differs from the superficially similar `/cancel/` endpoint in that it does
not attempt to do an orderly teardown, and instead fails the relocation immediately. An
abrupt shutdown like this could leave data in an unpredictable state, so unless you have a
very good reason, you should prefer `/cancel/` to `/abort/`, and only use the latter when
the former fails.
:pparam string relocation_uuid: a UUID identifying the relocation.
:auth: required
"""
logger.info("relocations.abort.put.start", extra={"caller": request.user.id})
try:
relocation: Relocation = Relocation.objects.get(uuid=relocation_uuid)
except Relocation.DoesNotExist:
raise ResourceDoesNotExist
if relocation.status in {Relocation.Status.FAILURE.value, Relocation.Status.SUCCESS.value}:
return Response(
{"detail": ERR_NOT_ABORTABLE_STATUS},
status=400,
)
relocation.status = Relocation.Status.FAILURE.value
relocation.failure_reason = "This relocation was aborted by an administrator."
relocation.save()
return self.respond(serialize(relocation))
| RelocationAbortEndpoint |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 74565,
"end": 75870
} | class ____(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path: StrPath) -> None:
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name: str) -> bool:
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name: str) -> str:
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata) -> None:
replacement_char = '�'
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name: str) -> Iterator[str]:
return yield_lines(self.get_metadata(name))
| FileMetadata |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_date.py | {
"start": 931,
"end": 1912
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_date"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_date(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidDate |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 1753,
"end": 2245
} | class ____:
values: Container[Any]
_ConstraintAnnotation = Union[
annotated_types.Le,
annotated_types.Ge,
annotated_types.Lt,
annotated_types.Gt,
annotated_types.Len,
annotated_types.MultipleOf,
annotated_types.Timezone,
annotated_types.Interval,
annotated_types.Predicate,
# common predicates not included in annotated_types
_Eq,
_NotEq,
_In,
_NotIn,
# regular expressions
Pattern[str],
]
@dataclass(**_slots_frozen)
| _NotIn |
python | scrapy__scrapy | scrapy/linkextractors/lxmlhtml.py | {
"start": 5139,
"end": 10118
} | class ____:
_csstranslator = HTMLTranslator()
def __init__(
self,
allow: _RegexOrSeveral = (),
deny: _RegexOrSeveral = (),
allow_domains: str | Iterable[str] = (),
deny_domains: str | Iterable[str] = (),
restrict_xpaths: str | Iterable[str] = (),
tags: str | Iterable[str] = ("a", "area"),
attrs: str | Iterable[str] = ("href",),
canonicalize: bool = False,
unique: bool = True,
process_value: Callable[[Any], Any] | None = None,
deny_extensions: str | Iterable[str] | None = None,
restrict_css: str | Iterable[str] = (),
strip: bool = True,
restrict_text: _RegexOrSeveral | None = None,
):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
self.link_extractor = LxmlParserLinkExtractor(
tag=partial(operator.contains, tags),
attr=partial(operator.contains, attrs),
unique=unique,
process=process_value,
strip=strip,
canonicalized=not canonicalize,
)
self.allow_res: list[re.Pattern[str]] = self._compile_regexes(allow)
self.deny_res: list[re.Pattern[str]] = self._compile_regexes(deny)
self.allow_domains: set[str] = set(arg_to_iter(allow_domains))
self.deny_domains: set[str] = set(arg_to_iter(deny_domains))
self.restrict_xpaths: tuple[str, ...] = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(
map(self._csstranslator.css_to_xpath, arg_to_iter(restrict_css))
)
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.canonicalize: bool = canonicalize
self.deny_extensions: set[str] = {"." + e for e in arg_to_iter(deny_extensions)}
self.restrict_text: list[re.Pattern[str]] = self._compile_regexes(restrict_text)
@staticmethod
def _compile_regexes(value: _RegexOrSeveral | None) -> list[re.Pattern[str]]:
return [
x if isinstance(x, re.Pattern) else re.compile(x)
for x in arg_to_iter(value)
]
def _link_allowed(self, link: Link) -> bool:
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(
parsed_url, self.allow_domains
):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(
parsed_url, self.deny_extensions
):
return False
return not self.restrict_text or _matches(link.text, self.restrict_text)
def matches(self, url: str) -> bool:
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = (
(regex.search(url) for regex in self.allow_res)
if self.allow_res
else [True]
)
denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links: list[Link]) -> list[Link]:
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(link.url)
return self.link_extractor._process_links(links)
def _extract_links(self, *args: Any, **kwargs: Any) -> list[Link]:
return self.link_extractor._extract_links(*args, **kwargs)
def extract_links(self, response: TextResponse) -> list[Link]:
"""Returns a list of :class:`~scrapy.link.Link` objects from the
specified :class:`response <scrapy.http.Response>`.
Only links that match the settings passed to the ``__init__`` method of
the link extractor are returned.
Duplicate links are omitted if the ``unique`` attribute is set to ``True``,
otherwise they are returned.
"""
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [
subdoc for x in self.restrict_xpaths for subdoc in response.xpath(x)
]
else:
docs = [response.selector]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
if self.link_extractor.unique:
return unique_list(all_links, key=self.link_extractor.link_key)
return all_links
| LxmlLinkExtractor |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 1534,
"end": 1645
} | class ____:
class Inner:
pass
@deco
def __init__(self):
pass
| ClassWithDecoInitWithInner |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-with-an-element-from-array.py | {
"start": 847,
"end": 1492
} | class ____(object):
def maximizeXor(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
nums.sort()
max_val = max(nums[-1], max(queries, key=lambda x: x[0])[0])
queries = sorted(enumerate(queries), key=lambda x: x[1][1])
trie = Trie(max_val.bit_length())
result = [-1]*len(queries)
j = 0
for i, (x, m) in queries:
while j < len(nums) and nums[j] <= m:
trie.insert(nums[j])
j += 1
result[i] = trie.query(x)
return result
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.