language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/1654. Minimum Jumps to Reach Home/1654.py | {
"start": 24,
"end": 78
} | class ____(Enum):
FORWARD = 0
BACKWARD = 1
| Direction |
python | ray-project__ray | doc/source/serve/doc_code/monitoring/logging_config.py | {
"start": 1171,
"end": 1415
} | class ____:
def __call__(self) -> int:
return "hello world"
# __logs_dir_end__
# __enable_access_log_start__
import requests
import logging
from ray import serve
@serve.deployment(logging_config={"enable_access_log": False})
| Model |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 9936,
"end": 10026
} | class ____(VyperException):
"""Interface is not fully implemented."""
| InterfaceViolation |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 25413,
"end": 29378
} | class ____(nn.Module):
def __init__(self, config: DabDetrConfig, is_first: bool = False):
super().__init__()
hidden_size = config.hidden_size
self.cross_attn_query_content_proj = nn.Linear(hidden_size, hidden_size)
self.cross_attn_query_pos_proj = nn.Linear(hidden_size, hidden_size)
self.cross_attn_key_content_proj = nn.Linear(hidden_size, hidden_size)
self.cross_attn_key_pos_proj = nn.Linear(hidden_size, hidden_size)
self.cross_attn_value_proj = nn.Linear(hidden_size, hidden_size)
self.cross_attn_query_pos_sine_proj = nn.Linear(hidden_size, hidden_size)
self.decoder_attention_heads = config.decoder_attention_heads
self.cross_attn_layer_norm = nn.LayerNorm(hidden_size)
self.cross_attn = DabDetrAttention(config, is_cross=True)
self.keep_query_pos = config.keep_query_pos
if not self.keep_query_pos and not is_first:
self.cross_attn_query_pos_proj = None
self.is_first = is_first
self.dropout = config.dropout
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
query_position_embeddings: Optional[torch.Tensor] = None,
object_queries: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
query_sine_embed: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
):
query_content = self.cross_attn_query_content_proj(hidden_states)
key_content = self.cross_attn_key_content_proj(encoder_hidden_states)
value = self.cross_attn_value_proj(encoder_hidden_states)
batch_size, num_queries, n_model = query_content.shape
_, height_width, _ = key_content.shape
key_pos = self.cross_attn_key_pos_proj(object_queries)
# For the first decoder layer, we add the positional embedding predicted from
# the object query (the positional embedding) into the original query (key) in DETR.
if self.is_first or self.keep_query_pos:
query_pos = self.cross_attn_query_pos_proj(query_position_embeddings)
query = query_content + query_pos
key = key_content + key_pos
else:
query = query_content
key = key_content
query = query.view(
batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads
)
query_sine_embed = self.cross_attn_query_pos_sine_proj(query_sine_embed)
query_sine_embed = query_sine_embed.view(
batch_size, num_queries, self.decoder_attention_heads, n_model // self.decoder_attention_heads
)
query = torch.cat([query, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2)
key = key.view(batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads)
key_pos = key_pos.view(
batch_size, height_width, self.decoder_attention_heads, n_model // self.decoder_attention_heads
)
key = torch.cat([key, key_pos], dim=3).view(batch_size, height_width, n_model * 2)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.cross_attn(
hidden_states=query,
attention_mask=encoder_attention_mask,
key_states=key,
value_states=value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.cross_attn_layer_norm(hidden_states)
return hidden_states, cross_attn_weights
| DabDetrDecoderLayerCrossAttention |
python | kamyu104__LeetCode-Solutions | Python/maximum-possible-number-by-binary-concatenation.py | {
"start": 54,
"end": 391
} | class ____(object):
def maxGoodNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return int("".join(sorted(map(lambda x: bin(x)[2:], nums), cmp=lambda x, y: (x+y > y+x)-(x+y < y+x), reverse=True)), 2)
# Time: O(n! * nlogr)
# Space: O(nlogr)
import itertools
# brute force
| Solution |
python | sqlalchemy__sqlalchemy | test/ext/test_hybrid.py | {
"start": 60399,
"end": 68261
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""tests against hybrids that return a non-ClauseElement.
use cases derived from the example at
https://techspot.zzzeek.org/2011/10/21/hybrids-and-value-agnostic-types/
"""
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
from sqlalchemy import literal
symbols = ("usd", "gbp", "cad", "eur", "aud")
currency_lookup = {
(currency_from, currency_to): Decimal(str(rate))
for currency_to, values in zip(
symbols,
[
(1, 1.59009, 0.988611, 1.37979, 1.02962),
(0.628895, 1, 0.621732, 0.867748, 0.647525),
(1.01152, 1.6084, 1, 1.39569, 1.04148),
(0.724743, 1.1524, 0.716489, 1, 0.746213),
(0.971228, 1.54434, 0.960166, 1.34009, 1),
],
)
for currency_from, rate in zip(symbols, values)
}
class Amount:
def __init__(self, amount, currency):
self.currency = currency
self.amount = amount
def __add__(self, other):
return Amount(
self.amount + other.as_currency(self.currency).amount,
self.currency,
)
def __sub__(self, other):
return Amount(
self.amount - other.as_currency(self.currency).amount,
self.currency,
)
def __lt__(self, other):
return self.amount < other.as_currency(self.currency).amount
def __gt__(self, other):
return self.amount > other.as_currency(self.currency).amount
def __eq__(self, other):
return self.amount == other.as_currency(self.currency).amount
def as_currency(self, other_currency):
return Amount(
currency_lookup[(self.currency, other_currency)]
* self.amount,
other_currency,
)
def __clause_element__(self):
# helper method for SQLAlchemy to interpret
# the Amount object as a SQL element
if isinstance(self.amount, (float, int, Decimal)):
return literal(self.amount)
else:
return self.amount
def __str__(self):
return "%2.4f %s" % (self.amount, self.currency)
def __repr__(self):
return "Amount(%r, %r)" % (self.amount, self.currency)
Base = declarative_base()
class BankAccount(Base):
__tablename__ = "bank_account"
id = Column(Integer, primary_key=True)
_balance = Column("balance", Numeric)
@hybrid.hybrid_property
def balance(self):
"""Return an Amount view of the current balance."""
return Amount(self._balance, "usd")
@balance.setter
def balance(self, value):
self._balance = value.as_currency("usd").amount
cls.Amount = Amount
cls.BankAccount = BankAccount
def test_instance_one(self):
BankAccount, Amount = self.BankAccount, self.Amount
account = BankAccount(balance=Amount(4000, "usd"))
# 3b. print balance in usd
eq_(account.balance.amount, 4000)
def test_instance_two(self):
BankAccount, Amount = self.BankAccount, self.Amount
account = BankAccount(balance=Amount(4000, "usd"))
# 3c. print balance in gbp
eq_(account.balance.as_currency("gbp").amount, Decimal("2515.58"))
def test_instance_three(self):
BankAccount, Amount = self.BankAccount, self.Amount
account = BankAccount(balance=Amount(4000, "usd"))
# 3d. perform currency-agnostic comparisons, math
is_(account.balance > Amount(500, "cad"), True)
def test_instance_four(self):
BankAccount, Amount = self.BankAccount, self.Amount
account = BankAccount(balance=Amount(4000, "usd"))
eq_(
account.balance + Amount(500, "cad") - Amount(50, "eur"),
Amount(Decimal("4425.316"), "usd"),
)
def test_query_one(self):
BankAccount, Amount = self.BankAccount, self.Amount
session = fixture_session()
query = session.query(BankAccount).filter(
BankAccount.balance == Amount(10000, "cad")
)
self.assert_compile(
query,
"SELECT bank_account.id AS bank_account_id, "
"bank_account.balance AS bank_account_balance "
"FROM bank_account "
"WHERE bank_account.balance = :balance_1",
checkparams={"balance_1": Decimal("9886.110000")},
)
def test_query_two(self):
BankAccount, Amount = self.BankAccount, self.Amount
session = fixture_session()
# alternatively we can do the calc on the DB side.
query = (
session.query(BankAccount)
.filter(
BankAccount.balance.as_currency("cad") > Amount(9999, "cad")
)
.filter(
BankAccount.balance.as_currency("cad") < Amount(10001, "cad")
)
)
self.assert_compile(
query,
"SELECT bank_account.id AS bank_account_id, "
"bank_account.balance AS bank_account_balance "
"FROM bank_account "
"WHERE :balance_1 * bank_account.balance > :param_1 "
"AND :balance_2 * bank_account.balance < :param_2",
checkparams={
"balance_1": Decimal("1.01152"),
"balance_2": Decimal("1.01152"),
"param_1": Decimal("9999"),
"param_2": Decimal("10001"),
},
)
def test_query_three(self):
BankAccount = self.BankAccount
session = fixture_session()
query = session.query(BankAccount).filter(
BankAccount.balance.as_currency("cad")
> BankAccount.balance.as_currency("eur")
)
self.assert_compile(
query,
"SELECT bank_account.id AS bank_account_id, "
"bank_account.balance AS bank_account_balance "
"FROM bank_account "
"WHERE :balance_1 * bank_account.balance > "
":param_1 * :balance_2 * bank_account.balance",
checkparams={
"balance_1": Decimal("1.01152"),
"balance_2": Decimal("0.724743"),
"param_1": Decimal("1.39569"),
},
)
def test_query_four(self):
BankAccount = self.BankAccount
session = fixture_session()
# 4c. query all amounts, converting to "CAD" on the DB side
query = session.query(BankAccount.balance.as_currency("cad").amount)
self.assert_compile(
query,
"SELECT :balance_1 * bank_account.balance AS anon_1 "
"FROM bank_account",
checkparams={"balance_1": Decimal("1.01152")},
)
def test_query_five(self):
BankAccount = self.BankAccount
session = fixture_session()
# 4d. average balance in EUR
query = session.query(func.avg(BankAccount.balance.as_currency("eur")))
self.assert_compile(
query,
"SELECT avg(:balance_1 * bank_account.balance) AS avg_1 "
"FROM bank_account",
checkparams={"balance_1": Decimal("0.724743")},
)
def test_docstring(self):
BankAccount = self.BankAccount
eq_(
BankAccount.balance.__doc__,
"Return an Amount view of the current balance.",
)
| SpecialObjectTest |
python | facebook__pyre-check | client/log/log.py | {
"start": 9722,
"end": 12387
} | class ____:
_should_stop_reading_stream = False
_current_section: Optional[str]
_server_log_pattern: Pattern[str] = re.compile(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+)(.*)"
)
def __init__(self, stream: Iterable[str]) -> None:
self._reader = threading.Thread(target=self._read_stream, args=(stream,))
self._reader.daemon = True
self._current_section = None
def join(self) -> None:
self._reader.join()
def _log_server_stderr_message(self, server_message: str) -> None:
line = server_message.rstrip()
match = self._server_log_pattern.match(line)
if match:
section = match.groups()[0]
message = match.groups()[1]
self._current_section = section
else:
section = self._current_section
message = line
if section == "ERROR":
LOG.error(message)
elif section == "INFO":
LOG.info(message)
elif section == "DUMP":
LOG.warning(message)
elif section == "WARNING":
LOG.warning(message)
elif section == "PROGRESS":
LOG.info(message)
elif section == "PARSER":
LOG.error(message)
elif section is not None:
LOG.debug("[%s] %s", section, message)
else:
LOG.debug(line)
def _read_stream(self, stream: Iterable[str]) -> None:
try:
for line in stream:
if self._should_stop_reading_stream:
return
self._log_server_stderr_message(line)
except Exception:
pass
def __enter__(self) -> "StreamLogger":
self._should_stop_reading_stream = False
self._reader.start()
return self
def __exit__(
self,
_type: Optional[BaseException],
_value: Optional[BaseException],
_traceback: Optional[TracebackType],
) -> None:
self._should_stop_reading_stream = True
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=f" (Default: `{default}`): ")
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
def truncate(message: str, size: int) -> str:
if len(message) <= size:
return message
return f"{message[:size]}..[truncated {len(message) - size} characters]"
| StreamLogger |
python | django__django | django/contrib/postgres/search.py | {
"start": 10021,
"end": 11957
} | class ____(Func):
function = "ts_headline"
template = "%(function)s(%(expressions)s%(options)s)"
output_field = TextField()
def __init__(
self,
expression,
query,
*,
config=None,
start_sel=None,
stop_sel=None,
max_words=None,
min_words=None,
short_word=None,
highlight_all=None,
max_fragments=None,
fragment_delimiter=None,
):
if not hasattr(query, "resolve_expression"):
query = SearchQuery(query)
options = {
"StartSel": start_sel,
"StopSel": stop_sel,
"MaxWords": max_words,
"MinWords": min_words,
"ShortWord": short_word,
"HighlightAll": highlight_all,
"MaxFragments": max_fragments,
"FragmentDelimiter": fragment_delimiter,
}
self.options = {
option: value for option, value in options.items() if value is not None
}
expressions = (expression, query)
if config is not None:
config = SearchConfig.from_parameter(config)
expressions = (config, *expressions)
super().__init__(*expressions)
def as_sql(self, compiler, connection, function=None, template=None):
options_sql = ""
options_params = ()
if self.options:
options_params = (
", ".join(
connection.ops.compose_sql(f"{option}=%s", [value])
for option, value in self.options.items()
),
)
options_sql = ", %s"
sql, params = super().as_sql(
compiler,
connection,
function=function,
template=template,
options=options_sql,
)
return sql, params + options_params
SearchVectorField.register_lookup(SearchVectorExact)
| SearchHeadline |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 15301,
"end": 15561
} | class ____(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
p = gevent.pool.Pool()
g = p.spawn(gevent.sleep, 10)
try:
p.join(timeout=timeout)
finally:
g.kill()
| TestJoinSleep |
python | sympy__sympy | sympy/physics/mechanics/inertia.py | {
"start": 2554,
"end": 6172
} | class ____(namedtuple('Inertia', ['dyadic', 'point'])):
"""Inertia object consisting of a Dyadic and a Point of reference.
Explanation
===========
This is a simple class to store the Point and Dyadic, belonging to an
inertia.
Attributes
==========
dyadic : Dyadic
The dyadic of the inertia.
point : Point
The reference point of the inertia.
Examples
========
>>> from sympy.physics.mechanics import ReferenceFrame, Point, Inertia
>>> N = ReferenceFrame('N')
>>> Po = Point('Po')
>>> Inertia(N.x.outer(N.x) + N.y.outer(N.y) + N.z.outer(N.z), Po)
((N.x|N.x) + (N.y|N.y) + (N.z|N.z), Po)
In the example above the Dyadic was created manually, one can however also
use the ``inertia`` function for this or the class method ``from_tensor`` as
shown below.
>>> Inertia.from_inertia_scalars(Po, N, 1, 1, 1)
((N.x|N.x) + (N.y|N.y) + (N.z|N.z), Po)
"""
__slots__ = ()
def __new__(cls, dyadic, point):
# Switch order if given in the wrong order
if isinstance(dyadic, Point) and isinstance(point, Dyadic):
point, dyadic = dyadic, point
if not isinstance(point, Point):
raise TypeError('Reference point should be of type Point')
if not isinstance(dyadic, Dyadic):
raise TypeError('Inertia value should be expressed as a Dyadic')
return super().__new__(cls, dyadic, point)
@classmethod
def from_inertia_scalars(cls, point, frame, ixx, iyy, izz, ixy=0, iyz=0,
izx=0):
"""Simple way to create an Inertia object based on the tensor values.
Explanation
===========
This class method uses the :func`~.inertia` to create the Dyadic based
on the tensor values.
Parameters
==========
point : Point
The reference point of the inertia.
frame : ReferenceFrame
The frame the inertia is defined in.
ixx : Sympifyable
The xx element in the inertia dyadic.
iyy : Sympifyable
The yy element in the inertia dyadic.
izz : Sympifyable
The zz element in the inertia dyadic.
ixy : Sympifyable
The xy element in the inertia dyadic.
iyz : Sympifyable
The yz element in the inertia dyadic.
izx : Sympifyable
The zx element in the inertia dyadic.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.mechanics import ReferenceFrame, Point, Inertia
>>> ixx, iyy, izz, ixy, iyz, izx = symbols('ixx iyy izz ixy iyz izx')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> I = Inertia.from_inertia_scalars(P, N, ixx, iyy, izz, ixy, iyz, izx)
The tensor values can easily be seen when converting the dyadic to a
matrix.
>>> I.dyadic.to_matrix(N)
Matrix([
[ixx, ixy, izx],
[ixy, iyy, iyz],
[izx, iyz, izz]])
"""
return cls(inertia(frame, ixx, iyy, izz, ixy, iyz, izx), point)
def __add__(self, other):
raise TypeError(f"unsupported operand type(s) for +: "
f"'{self.__class__.__name__}' and "
f"'{other.__class__.__name__}'")
def __mul__(self, other):
raise TypeError(f"unsupported operand type(s) for *: "
f"'{self.__class__.__name__}' and "
f"'{other.__class__.__name__}'")
__radd__ = __add__
__rmul__ = __mul__
| Inertia |
python | pexpect__pexpect | tests/test_socket_pexpect.py | {
"start": 1286,
"end": 2539
} | class ____(PexpectTestCase.PexpectTestCase):
def setUp(self):
print(self.id())
PexpectTestCase.PexpectTestCase.setUp(self)
def test_socket (self):
socket = open_file_socket('TESTDATA.txt')
s = socket_pexpect.SocketSpawn(socket)
s.expect(b'This is the end of test data:')
s.expect(pexpect.EOF)
self.assertEqual(s.before, b' END\n')
def test_maxread (self):
socket = open_file_socket('TESTDATA.txt')
s = socket_pexpect.SocketSpawn(socket)
s.maxread = 100
s.expect('2')
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
self.assertEqual(s.before, b' END\n')
def test_socket_isalive (self):
socket = open_file_socket('TESTDATA.txt')
s = socket_pexpect.SocketSpawn(socket)
assert s.isalive()
s.close()
assert not s.isalive(), "Should not be alive after close()"
def test_socket_isatty (self):
socket = open_file_socket('TESTDATA.txt')
s = socket_pexpect.SocketSpawn(socket)
assert not s.isatty()
s.close()
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(ExpectTestCase)
| ExpectTestCase |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 40048,
"end": 41898
} | class ____(PreTrainedModel):
config: BridgeTowerConfig
base_model_prefix = "bridgetower"
input_modalities = ("image", "text")
supports_gradient_checkpointing = False
_no_split_modules = ["BridgeTowerSelfAttention", "BridgeTowerResidualAttention"]
_skip_keys_device_placement = "past_key_values"
@torch.no_grad()
def _init_weights(self, module: nn.Module):
std = self.config.initializer_factor
if isinstance(module, BridgeTowerVisionTransformer):
proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5)
attn_std = self.config.hidden_size**-0.5
fc_std = (2 * self.config.hidden_size) ** -0.5
for block in module.transformer.resblocks:
init.normal_(block.attn.in_proj_weight, std=attn_std * std)
init.zeros_(block.attn.in_proj_bias)
init.normal_(block.attn.out_proj.weight, std=proj_std * std)
init.normal_(block.mlp.c_fc.weight, std=fc_std * std)
init.normal_(block.mlp.c_proj.weight, std=proj_std * std)
init.normal_(module.embeddings.class_embedding, std=attn_std * std)
init.normal_(module.embeddings.position_embedding.weight, std=attn_std * std)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)):
init.normal_(module.weight, mean=0.0, std=0.05 * std)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, BridgeTowerForContrastiveLearning):
init.constant_(module.logit_scale, self.config.logit_scale_init_value)
if isinstance(module, (nn.Linear, BridgeTowerMLMHead)) and module.bias is not None:
init.zeros_(module.bias)
| BridgeTowerPreTrainedModel |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-vertex/tests/test_embeddings_vertex.py | {
"start": 626,
"end": 4500
} | class ____(unittest.TestCase):
@patch("vertexai.init")
@patch("vertexai.language_models.TextEmbeddingModel.from_pretrained")
def test_init(self, model_mock: Mock, mock_init: Mock):
mock_cred = Mock(return_value="mock_credentials_instance")
embedding = VertexTextEmbedding(
model_name="textembedding-gecko@001",
project="test-project",
location="us-test-location",
credentials=mock_cred,
embed_mode=VertexEmbeddingMode.RETRIEVAL_MODE,
embed_batch_size=100,
num_workers=2,
)
mock_init.assert_called_once_with(
project="test-project",
location="us-test-location",
credentials=mock_cred,
)
self.assertIsInstance(embedding, BaseEmbedding)
self.assertEqual(embedding.model_name, "textembedding-gecko@001")
self.assertEqual(embedding.embed_mode, VertexEmbeddingMode.RETRIEVAL_MODE)
self.assertEqual(embedding.embed_batch_size, 100)
self.assertEqual(embedding.num_workers, 2)
@patch("vertexai.init")
@patch("vertexai.language_models.TextEmbeddingModel.from_pretrained")
def test_get_embedding_retrieval(self, model_mock: Mock, init_mock: Mock):
model = MagicMock()
model_mock.return_value = model
mock_cred = Mock(return_value="mock_credentials_instance")
embedding = VertexTextEmbedding(
project="test-project",
location="us-test-location",
credentials=mock_cred,
embed_mode=VertexEmbeddingMode.RETRIEVAL_MODE,
additional_kwargs={"auto_truncate": True},
)
model.get_embeddings.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])]
result = embedding.get_text_embedding("some text")
model.get_embeddings.assert_called_once()
positional_args, keyword_args = model.get_embeddings.call_args
model.get_embeddings.reset_mock()
self.assertEqual(len(positional_args[0]), 1)
self.assertEqual(positional_args[0][0].text, "some text")
self.assertEqual(positional_args[0][0].task_type, "RETRIEVAL_DOCUMENT")
self.assertEqual(result, [0.1, 0.2, 0.3])
self.assertTrue(keyword_args["auto_truncate"])
model.get_embeddings.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])]
result = embedding.get_query_embedding("some query text")
model.get_embeddings.assert_called_once()
positional_args, keyword_args = model.get_embeddings.call_args
self.assertEqual(len(positional_args[0]), 1)
self.assertEqual(positional_args[0][0].text, "some query text")
self.assertEqual(positional_args[0][0].task_type, "RETRIEVAL_QUERY")
self.assertEqual(result, [0.1, 0.2, 0.3])
self.assertTrue(keyword_args["auto_truncate"])
def test_unsupported_task_type_model(self):
texts = ["text1", "text2"]
for model_name in _UNSUPPORTED_TASK_TYPE_MODEL:
with self.subTest(model_name=model_name):
result = _get_embedding_request(
texts, VertexEmbeddingMode.RETRIEVAL_MODE, False, model_name
)
self.assertTrue(
all(isinstance(item, TextEmbeddingInput) for item in result)
)
self.assertTrue(all(item.task_type is None for item in result))
def test_supported_task_type_model(self):
texts = ["text1", "text2"]
model_name = "textembedding-gecko@003"
result = _get_embedding_request(
texts, VertexEmbeddingMode.RETRIEVAL_MODE, False, model_name
)
self.assertTrue(all(isinstance(item, TextEmbeddingInput) for item in result))
self.assertTrue(all(item.task_type == "RETRIEVAL_DOCUMENT" for item in result))
| VertexTextEmbeddingTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/ghostwriter/test_ghostwriter.py | {
"start": 8719,
"end": 16621
} | class ____(UnicodeDecodeError):
pass
@pytest.mark.parametrize(
"exceptions,output",
[
# Discard subclasses of other exceptions to catch, including non-builtins,
# and replace OSError aliases with OSError.
((Exception, UnicodeError), "Exception"),
((UnicodeError, MyError), "UnicodeError"),
((IOError,), "OSError"),
((IOError, UnicodeError), "(OSError, UnicodeError)"),
],
)
def test_exception_deduplication(exceptions, output):
_, body = ghostwriter._make_test_body(
lambda: None,
ghost="",
test_body="pass",
except_=exceptions,
style="pytest",
annotate=False,
)
assert f"except {output}:" in body
def test_run_ghostwriter_roundtrip():
# This test covers the whole lifecycle: first, we get the default code.
# The first argument is unknown, so we fail to draw from st.nothing()
source_code = ghostwriter.roundtrip(json.dumps, json.loads)
with pytest.raises(Unsatisfiable):
get_test_function(source_code)()
# Replacing that nothing() with a strategy for JSON allows us to discover
# two possible failures: `nan` is not equal to itself, and if dumps is
# passed allow_nan=False it is a ValueError to pass a non-finite float.
source_code = source_code.replace(
"st.nothing()",
"st.recursive(st.one_of(st.none(), st.booleans(), st.floats(), st.text()), "
"lambda v: st.lists(v, max_size=2) | st.dictionaries(st.text(), v, max_size=2)"
", max_leaves=2)",
)
s = settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
try:
get_test_function(source_code, settings_decorator=s)()
except (AssertionError, ValueError, BaseExceptionGroup):
pass
# Finally, restricting ourselves to finite floats makes the test pass!
source_code = source_code.replace(
"st.floats()", "st.floats(allow_nan=False, allow_infinity=False)"
)
get_test_function(source_code, settings_decorator=s)()
@varied_excepts
@pytest.mark.parametrize("func", [sorted, timsort])
def test_ghostwriter_idempotent(func, ex):
source_code = ghostwriter.idempotent(func, except_=ex)
test = get_test_function(source_code)
if "=st.nothing()" in source_code:
with pytest.raises(Unsatisfiable):
test()
else:
test()
def test_overlapping_args_use_union_of_strategies():
def f(arg: int) -> None:
pass
def g(arg: float) -> None:
pass
source_code = ghostwriter.equivalent(f, g)
assert "arg=st.one_of(st.integers(), st.floats())" in source_code
def test_module_with_mock_does_not_break():
# Before we added an explicit check for unspec'd mocks, they would pass
# through the initial validation and then fail when used in more detailed
# logic in the ghostwriter machinery.
ghostwriter.magic(unittest.mock)
def compose_types(x: type, y: type):
pass
def test_unrepr_identity_elem():
# Works with inferred identity element
source_code = ghostwriter.binary_operation(compose_types)
exec(source_code, {})
# and also works with explicit identity element
source_code = ghostwriter.binary_operation(compose_types, identity=type)
exec(source_code, {})
@pytest.mark.parametrize(
"strategy, imports",
# The specifics don't matter much here; we're just demonstrating that
# we can walk the strategy and collect all the objects to import.
[
# Lazy from_type() is handled without being unwrapped
(LazyStrategy(from_type, (enum.Enum,), {}), {("enum", "Enum")}),
# Mapped, filtered, and flatmapped check both sides of the method
(
builds(enum.Enum).map(Decimal),
{("enum", "Enum"), ("decimal", "Decimal")},
),
(
builds(enum.Enum).flatmap(Decimal),
{("enum", "Enum"), ("decimal", "Decimal")},
),
(
builds(enum.Enum).filter(Decimal).filter(re.compile),
{("enum", "Enum"), ("decimal", "Decimal"), ("re", "compile")},
),
# one_of() strategies recurse into all the branches
(
builds(enum.Enum) | builds(Decimal) | builds(re.compile),
{("enum", "Enum"), ("decimal", "Decimal"), ("re", "compile")},
),
# and builds() checks the arguments as well as the target
(
builds(enum.Enum, builds(Decimal), kw=builds(re.compile)),
{("enum", "Enum"), ("decimal", "Decimal"), ("re", "compile")},
),
# lists recurse on imports
(
lists(builds(Decimal)),
{("decimal", "Decimal")},
),
# find the needed import for from_regex if needed
(
from_regex(re.compile(".+")),
{"re"},
),
# but don't add superfluous imports
(
from_regex(".+"),
set(),
),
],
)
def test_get_imports_for_strategy(strategy, imports):
assert ghostwriter._imports_for_strategy(strategy) == imports
@pytest.fixture
def temp_script_file():
"""Fixture to yield a Path to a temporary file in the local directory. File name will end
in .py and will include an importable function.
"""
p = Path("my_temp_script.py")
if p.exists():
raise FileExistsError(f"Did not expect {p} to exist during testing")
p.write_text(
dedent(
"""
def say_hello():
print("Hello world!")
"""
),
encoding="utf-8",
)
yield p
p.unlink()
@pytest.fixture
def temp_script_file_with_py_function():
"""Fixture to yield a Path to a temporary file in the local directory. File name will end
in .py and will include an importable function named "py"
"""
p = Path("my_temp_script_with_py_function.py")
if p.exists():
raise FileExistsError(f"Did not expect {p} to exist during testing")
p.write_text(
dedent(
"""
def py():
print('A function named "py" has been called')
"""
),
encoding="utf-8",
)
yield p
p.unlink()
def test_obj_name(temp_script_file, temp_script_file_with_py_function):
# Module paths (strings including a "/") should raise a meaningful UsageError
with pytest.raises(click.exceptions.UsageError) as e:
cli.obj_name("mydirectory/myscript.py")
assert e.match(
"Remember that the ghostwriter should be passed the name of a module, not a path."
)
# Windows paths (strings including a "\") should also raise a meaningful UsageError
with pytest.raises(click.exceptions.UsageError) as e:
cli.obj_name(R"mydirectory\myscript.py")
assert e.match(
"Remember that the ghostwriter should be passed the name of a module, not a path."
)
# File names of modules (strings ending in ".py") should raise a meaningful UsageError
with pytest.raises(click.exceptions.UsageError) as e:
cli.obj_name("myscript.py")
assert e.match(
"Remember that the ghostwriter should be passed the name of a module, not a file."
)
# File names of modules (strings ending in ".py") that exist should get a suggestion
with pytest.raises(click.exceptions.UsageError) as e:
cli.obj_name(str(temp_script_file))
assert e.match(
"Remember that the ghostwriter should be passed the name of a module, not a file."
f"\n\tTry: hypothesis write {temp_script_file.stem}"
)
# File names of modules (strings ending in ".py") that define a py function should succeed
assert isinstance(
cli.obj_name(str(temp_script_file_with_py_function)), FunctionType
)
def test_gets_public_location_not_impl_location():
assert ghostwriter._get_module(assume) == "hypothesis" # not "hypothesis.control"
| MyError |
python | mitmproxy__pdoc | test/testdata/typed_dict.py | {
"start": 100,
"end": 283
} | class ____(Foo, total=False):
"""A TypedDict subclass. Before 3.12, TypedDict botches __mro__."""
b: int
"""Second attribute."""
c: str
# undocumented attribute
| Bar |
python | google__pytype | pytype/overlays/special_builtins.py | {
"start": 10950,
"end": 12009
} | class ____(UnaryPredicate):
"""The callable() function."""
_NAME = "callable"
def _call_predicate(self, node, obj):
return self._is_callable(node, obj)
def _is_callable(self, node, obj):
"""Check if the object is callable.
Args:
node: The given node.
obj: A BaseValue, the arg of a callable() call.
Returns:
(node, result) where result = True if the object is callable,
False if it is not, and None if it is ambiguous.
"""
# NOTE: This duplicates logic in the matcher; if this function gets any
# longer consider calling matcher._match_value_against_type(obj,
# convert.callable) instead.
val = obj.data
if isinstance(val, abstract.AMBIGUOUS_OR_EMPTY):
return node, None
# Classes are always callable.
if isinstance(val, abstract.Class):
return node, True
# Otherwise, see if the object has a __call__ method.
node, ret = self.ctx.attribute_handler.get_attribute(
node, val, "__call__", valself=obj
)
return node, ret is not None
| IsCallable |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_method.py | {
"start": 128,
"end": 365
} | class ____:
def aaaa(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
def bbbb(self):
"""should be overridden in concrete class"""
raise NotImplementedError()
| Abstract |
python | sphinx-doc__sphinx | sphinx/util/inventory.py | {
"start": 8281,
"end": 9383
} | class ____:
"""Inventory data in memory."""
__slots__ = ('data',)
data: dict[str, dict[str, _InventoryItem]]
def __init__(self, data: dict[str, dict[str, _InventoryItem]], /) -> None:
# type -> name -> _InventoryItem
self.data: dict[str, dict[str, _InventoryItem]] = data
def __repr__(self) -> str:
return f'_Inventory({self.data!r})'
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Inventory):
return NotImplemented
return self.data == other.data
def __hash__(self) -> int:
return hash(self.data)
def __getitem__(self, item: tuple[str, str]) -> _InventoryItem:
obj_type, name = item
return self.data.setdefault(obj_type, {})[name]
def __setitem__(self, item: tuple[str, str], value: _InventoryItem) -> None:
obj_type, name = item
self.data.setdefault(obj_type, {})[name] = value
def __contains__(self, item: tuple[str, str]) -> bool:
obj_type, name = item
return obj_type in self.data and name in self.data[obj_type]
| _Inventory |
python | marshmallow-code__marshmallow | tests/test_decorators.py | {
"start": 30090,
"end": 34392
} | class ____:
def __init__(self, nested):
self.nested = nested
example = Example(nested=[Nested(x) for x in range(1)])
@pytest.mark.parametrize(
("data", "expected_data", "expected_original_data"),
([example, {"foo": 0}, example.nested[0]],),
)
def test_decorator_post_dump_with_nested_original_and_pass_collection(
data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@post_dump(pass_collection=False, pass_original=True)
def check_pass_original_when_pass_collection_false(
self, data, original_data, **kwargs
):
assert data == expected_data
assert original_data == expected_original_data
return data
@post_dump(pass_collection=True, pass_original=True)
def check_pass_original_when_pass_collection_true(
self, data, original_data, many, **kwargs
):
assert many is True
assert data == [expected_data]
assert original_data == [expected_original_data]
return data
class ExampleSchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = ExampleSchema()
assert schema.dump(data) == {"nested": [{"foo": 0}]}
@pytest.mark.parametrize(
("data", "expected_data", "expected_original_data"),
([{"nested": [{"foo": 0}]}, {"foo": 0}, {"foo": 0}],),
)
def test_decorator_post_load_with_nested_original_and_pass_collection(
data, expected_data, expected_original_data
):
class NestedSchema(Schema):
foo = fields.Int(required=True)
@post_load(pass_collection=False, pass_original=True)
def check_pass_original_when_pass_collection_false(
self, data, original_data, **kwargs
):
assert data == expected_data
assert original_data == expected_original_data
return data
@post_load(pass_collection=True, pass_original=True)
def check_pass_original_when_pass_collection_true(
self, data, original_data, many, **kwargs
):
assert many is True
assert data == [expected_data]
assert original_data == [expected_original_data]
return data
class ExampleSchema(Schema):
nested = fields.Nested(NestedSchema, required=True, many=True)
schema = ExampleSchema()
assert schema.load(data) == data
@pytest.mark.parametrize("usage_location", ["meta", "init", "load"])
@pytest.mark.parametrize("unknown_val", (EXCLUDE, INCLUDE))
def test_load_processors_receive_unknown(usage_location, unknown_val):
class ExampleSchema(Schema):
foo = fields.Int()
@validates_schema
def check_unknown_validates(self, data, unknown, **kwargs):
assert unknown == unknown_val
@pre_load
def check_unknown_pre(self, data, unknown, **kwargs):
assert unknown == unknown_val
return data
@post_load
def check_unknown_post(self, data, unknown, **kwargs):
assert unknown == unknown_val
return data
if usage_location == "meta":
class ExampleSchemaChild(ExampleSchema):
class Meta:
unknown = unknown_val
ExampleSchemaChild().load({"foo": 42})
if usage_location == "init":
ExampleSchema(unknown=unknown_val).load({"foo": 42})
else:
ExampleSchema().load({"foo": 42}, unknown=unknown_val)
# https://github.com/marshmallow-code/marshmallow/issues/1755
def test_post_load_method_that_appends_to_data():
class MySchema(Schema):
foo = fields.Int()
@post_load(pass_collection=True)
def append_to_data(self, data, **kwargs):
data.append({"foo": 42})
return data
@post_load(pass_collection=False, pass_original=True)
def noop(self, data, original_data, **kwargs):
if original_data is None: # added item
assert data == {"foo": 42}
else:
assert original_data == {"foo": 24}
assert data == {"foo": 24}
return data
schema = MySchema(many=True)
assert schema.load([{"foo": 24}]) == [{"foo": 24}, {"foo": 42}]
| Example |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/optimizers.py | {
"start": 3306,
"end": 3982
} | class ____():
def __init__(self, learning_rate=0.01, rho=0.9):
self.learning_rate = learning_rate
self.Eg = None # Running average of the square gradients at w
self.eps = 1e-8
self.rho = rho
def update(self, w, grad_wrt_w):
# If not initialized
if self.Eg is None:
self.Eg = np.zeros(np.shape(grad_wrt_w))
self.Eg = self.rho * self.Eg + (1 - self.rho) * np.power(grad_wrt_w, 2)
# Divide the learning rate for a weight by a running average of the magnitudes of recent
# gradients for that weight
return w - self.learning_rate * grad_wrt_w / np.sqrt(self.Eg + self.eps)
| RMSprop |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 9557,
"end": 10979
} | class ____(nn.Module):
r"""
Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
Parameters:
config : ([`SwitchTransformersConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
is_sparse (`bool`):
Whether the MLP layer is a `Sparse` layer (contains a Mixture of Experts) or not
"""
def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
super().__init__()
self.is_sparse = is_sparse
# Check if it is a sparse layer, if not then it is a dense layer
if not self.is_sparse:
self.mlp = SwitchTransformersDenseActDense(config)
else:
self.mlp = SwitchTransformersSparseMLP(config)
self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, **kwargs):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.mlp(forwarded_states)
output = hidden_states + self.dropout(forwarded_states)
return output
| SwitchTransformersLayerFF |
python | kamyu104__LeetCode-Solutions | Python/create-components-with-same-value.py | {
"start": 2543,
"end": 3299
} | class ____(object):
def componentValue(self, nums, edges):
"""
:type nums: List[int]
:type edges: List[List[int]]
:rtype: int
"""
def dfs(u, p, target):
total = nums[u]
for v in adj[u]:
if v == p:
continue
total += dfs(v, u, target)
return total if total != target else 0
result = 0
adj = [[] for _ in xrange(len(nums))]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
total = sum(nums)
for cnt in reversed(xrange(2, len(nums)+1)):
if total%cnt == 0 and dfs(0, -1, total//cnt) == 0:
return cnt-1
return 0
| Solution3 |
python | weaviate__weaviate-python-client | weaviate/cluster/models.py | {
"start": 2704,
"end": 2814
} | class ____(TypedDict):
shardingState: _ReplicationShardingState
@dataclass
| _ReplicationShardingStateResponse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operators/since_operator.py | {
"start": 1076,
"end": 3386
} | class ____:
"""Convenience class for manipulating metadata relevant to historical SinceCondition evaluations.
Tracks the previous evaluation id and timestamp of the last evaluation where the trigger condition
and reset conditions were true.
"""
trigger_evaluation_id: Optional[int]
trigger_timestamp: Optional[float]
reset_evaluation_id: Optional[int]
reset_timestamp: Optional[float]
@staticmethod
def from_metadata(metadata: Optional[MetadataMapping]) -> "SinceConditionData":
def _get_int(key: str) -> Optional[int]:
metadata_val = metadata.get(key, None) if metadata else None
return metadata_val.value if isinstance(metadata_val, IntMetadataValue) else None
def _get_float(key: str) -> Optional[float]:
metadata_val = metadata.get(key, None) if metadata else None
return metadata_val.value if isinstance(metadata_val, FloatMetadataValue) else None
return SinceConditionData(
trigger_evaluation_id=_get_int("trigger_evaluation_id"),
trigger_timestamp=_get_float("trigger_timestamp"),
reset_evaluation_id=_get_int("reset_evaluation_id"),
reset_timestamp=_get_float("reset_timestamp"),
)
def to_metadata(self) -> Mapping[str, Union[IntMetadataValue, FloatMetadataValue]]:
return dict(
trigger_evaluation_id=IntMetadataValue(self.trigger_evaluation_id),
trigger_timestamp=FloatMetadataValue(self.trigger_timestamp),
reset_evaluation_id=IntMetadataValue(self.reset_evaluation_id),
reset_timestamp=FloatMetadataValue(self.reset_timestamp),
)
def update(
self,
evaluation_id: int,
timestamp: float,
trigger_result: AutomationResult,
reset_result: AutomationResult,
) -> "SinceConditionData":
updated = self
if not trigger_result.true_subset.is_empty:
updated = replace(
updated, trigger_evaluation_id=evaluation_id, trigger_timestamp=timestamp
)
if not reset_result.true_subset.is_empty:
updated = replace(updated, reset_evaluation_id=evaluation_id, reset_timestamp=timestamp)
return updated
@whitelist_for_serdes
@record
| SinceConditionData |
python | fabric__fabric | tests/group.py | {
"start": 731,
"end": 4167
} | class ____:
class init:
"__init__"
def may_be_empty(self):
assert len(Group()) == 0
def takes_splat_arg_of_host_strings(self):
g = Group("foo", "bar")
assert g[0].host == "foo"
assert g[1].host == "bar"
def takes_splat_kwargs_and_passes_them_to_Connections(self):
g = Group("foo", "bar", user="admin", forward_agent=True)
assert g[0].host == "foo"
assert g[0].user == "admin"
assert g[0].forward_agent is True
assert g[1].host == "bar"
assert g[1].user == "admin"
assert g[1].forward_agent is True
class from_connections:
def inits_from_iterable_of_Connections(self):
g = Group.from_connections((Connection("foo"), Connection("bar")))
assert len(g) == 2
assert g[1].host == "bar"
def acts_like_an_iterable_of_Connections(self):
g = Group("foo", "bar", "biz")
assert g[0].host == "foo"
assert g[-1].host == "biz"
assert len(g) == 3
for c in g:
assert isinstance(c, Connection)
@mark.parametrize("method", ALL_METHODS)
def abstract_methods_not_implemented(self, method):
group = Group()
with raises(NotImplementedError):
getattr(group, method)()
class close_and_contextmanager_behavior:
def close_closes_all_member_connections(self):
cxns = [Mock(name=x) for x in ("foo", "bar", "biz")]
g = Group.from_connections(cxns)
g.close()
for c in cxns:
c.close.assert_called_once_with()
def contextmanager_behavior_works_like_Connection(self):
cxns = [Mock(name=x) for x in ("foo", "bar", "biz")]
g = Group.from_connections(cxns)
with g as my_g:
assert my_g is g
for c in cxns:
c.close.assert_called_once_with()
class get:
class local_defaults_to_host_interpolated_path:
def when_no_arg_or_kwarg_given(self):
g = Group("host1", "host2")
g._do = Mock()
g.get(remote="whatever")
g._do.assert_called_with(
"get", remote="whatever", local="{host}/"
)
def not_when_arg_given(self):
g = Group("host1", "host2")
g._do = Mock()
g.get("whatever", "lol")
# No local kwarg passed.
g._do.assert_called_with("get", "whatever", "lol")
def not_when_kwarg_given(self):
g = Group("host1", "host2")
g._do = Mock()
g.get(remote="whatever", local="lol")
# Doesn't stomp given local arg
g._do.assert_called_with("get", remote="whatever", local="lol")
def _make_serial_tester(method, cxns, index, args, kwargs):
args = args[:]
kwargs = kwargs.copy()
def tester(*a, **k): # Don't care about doing anything with our own args.
car, cdr = index, index + 1
predecessors = cxns[:car]
successors = cxns[cdr:]
for predecessor in predecessors:
getattr(predecessor, method).assert_called_with(*args, **kwargs)
for successor in successors:
assert not getattr(successor, method).called
return tester
| Group_ |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/advanced_activations.py | {
"start": 1278,
"end": 2810
} | class ____(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Usage:
>>> layer = tf.keras.layers.LeakyReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.9, -0.3, 0.0, 2.0]
>>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.3, -0.1, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Args:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
if alpha is None:
raise ValueError('The alpha value of a Leaky ReLU layer '
'cannot be None, needs a float. '
'Got %s' % alpha)
self.supports_masking = True
self.alpha = backend.cast_to_floatx(alpha)
def call(self, inputs):
return backend.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| LeakyReLU |
python | django__django | tests/prefetch_related/models.py | {
"start": 1901,
"end": 2113
} | class ____(models.Model):
author = models.OneToOneField(
Author,
models.CASCADE,
primary_key=True,
to_field="name",
)
books = models.ManyToManyField(Book, blank=True)
| Bio |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ScatterPlotItem.py | {
"start": 4847,
"end": 10646
} | class ____(object):
"""
Used to efficiently construct a single QPixmap containing all rendered symbols
for a ScatterPlotItem. This is required for fragment rendering.
Use example:
atlas = SymbolAtlas()
sc1 = atlas[[('o', 5, QPen(..), QBrush(..))]]
sc2 = atlas[[('t', 10, QPen(..), QBrush(..))]]
pm = atlas.pixmap
"""
_idGenerator = itertools.count()
def __init__(self):
self._dpr = 1.0
self.clear()
def __getitem__(self, styles):
"""
Given a list of tuples, (symbol, size, pen, brush), return a list of coordinates of
corresponding symbols within the atlas. Note that these coordinates may change if the atlas is rebuilt.
"""
keys = self._keys(styles)
new = {key: style for key, style in zip(keys, styles) if key not in self._coords}
if new:
self._extend(new)
return list(map(self._coords.__getitem__, keys))
def __len__(self):
return len(self._coords)
def devicePixelRatio(self):
return self._dpr
def setDevicePixelRatio(self, dpr):
self._dpr = dpr
@property
def pixmap(self):
if self._pixmap is None:
self._pixmap = self._createPixmap()
return self._pixmap
@property
def maxWidth(self):
# return the max logical width
return self._maxWidth / self._dpr
def rebuild(self, styles=None):
profiler = debug.Profiler() # noqa: profiler prints on GC
if styles is None:
data = []
else:
keys = set(self._keys(styles))
data = list(self._itemData(keys))
self.clear()
if data:
self._extendFromData(data)
def clear(self):
self._data = np.zeros((0, 0, 4), dtype=np.ubyte) # numpy array of atlas image
self._coords = {}
self._pixmap = None
self._maxWidth = 0
self._totalWidth = 0
self._totalArea = 0
self._pos = (0, 0)
self._rowShape = (0, 0)
def diagnostics(self):
n = len(self)
w, h, _ = self._data.shape
a = self._totalArea
return dict(count=n,
width=w,
height=h,
area=w * h,
area_used=1.0 if n == 0 else a / (w * h),
squareness=1.0 if n == 0 else 2 * w * h / (w**2 + h**2))
def _keys(self, styles):
def getId(obj):
try:
return obj._id
except AttributeError:
obj._id = next(SymbolAtlas._idGenerator)
return obj._id
return [
(symbol if isinstance(symbol, (str, int)) else getId(symbol), size, getId(pen), getId(brush))
for symbol, size, pen, brush in styles
]
def _itemData(self, keys):
for key in keys:
y, x, h, w = self._coords[key]
yield key, self._data[x:x + w, y:y + h]
def _extend(self, styles):
profiler = debug.Profiler()
images = []
data = []
for key, style in styles.items():
img = renderSymbol(*style, dpr=self._dpr)
arr = fn.ndarray_from_qimage(img)
images.append(img) # keep these to delay garbage collection
data.append((key, arr))
profiler('render')
self._extendFromData(data)
profiler('insert')
def _extendFromData(self, data):
self._pack(data)
# expand array if necessary
wNew, hNew = self._minDataShape()
wOld, hOld, _ = self._data.shape
if (wNew > wOld) or (hNew > hOld):
arr = np.zeros((wNew, hNew, 4), dtype=np.ubyte)
arr[:wOld, :hOld] = self._data
self._data = arr
# insert data into array
for key, arr in data:
y, x, h, w = self._coords[key]
self._data[x:x+w, y:y+h] = arr
self._pixmap = None
def _pack(self, data):
# pack each item rectangle as efficiently as possible into a larger, expanding, approximate square
n = len(self)
wMax = self._maxWidth
wSum = self._totalWidth
aSum = self._totalArea
x, y = self._pos
wRow, hRow = self._rowShape
# update packing statistics
for _, arr in data:
w, h, _ = arr.shape
wMax = max(w, wMax)
wSum += w
aSum += w * h
n += len(data)
# maybe expand row width for squareness and to accommodate largest width
wRowEst = int(wSum / (n ** 0.5))
if wRowEst > 2 * wRow:
wRow = wRowEst
wRow = max(wMax, wRow)
# set coordinates by packing along rows
# sort by rectangle height first to improve packing density
for key, arr in sorted(data, key=lambda data: data[1].shape[1]):
w, h, _ = arr.shape
if x + w > wRow:
# move up a row
x = 0
y += hRow
hRow = h
hRow = max(h, hRow)
self._coords[key] = (y, x, h, w)
x += w
self._maxWidth = wMax
self._totalWidth = wSum
self._totalArea = aSum
self._pos = (x, y)
self._rowShape = (wRow, hRow)
def _minDataShape(self):
x, y = self._pos
w, h = self._rowShape
return int(w), int(y + h)
def _createPixmap(self):
profiler = debug.Profiler() # noqa: profiler prints on GC
if self._data.size == 0:
pm = QtGui.QPixmap(0, 0)
else:
img = fn.ndarray_to_qimage(self._data,
QtGui.QImage.Format.Format_ARGB32_Premultiplied)
pm = QtGui.QPixmap.fromImage(img)
return pm
| SymbolAtlas |
python | kamyu104__LeetCode-Solutions | Python/decode-xored-array.py | {
"start": 29,
"end": 312
} | class ____(object):
def decode(self, encoded, first):
"""
:type encoded: List[int]
:type first: int
:rtype: List[int]
"""
result = [first]
for x in encoded:
result.append(result[-1]^x)
return result
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 11308,
"end": 12011
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_OPENAI, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
dimensions: Optional[int]
model: Optional[str]
modelVersion: Optional[str]
type_: Optional[OpenAIType]
vectorizeClassName: bool
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.type_ is not None:
ret_dict["type"] = ret_dict.pop("type_")
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
ret_dict["isAzure"] = False
return ret_dict
| _Text2VecOpenAIConfig |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_test.py | {
"start": 140297,
"end": 160320
} | class ____(test.TestCase):
def setUp(self):
super(VocabularyFileCategoricalColumnTest, self).setUp()
# Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
self._warriors_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/warriors_vocabulary.txt')
self._warriors_vocabulary_size = 5
# Contains strings, character names from 'The Wire': omar, stringer, marlo
self._wire_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/wire_vocabulary.txt')
self._wire_vocabulary_size = 3
def test_defaults(self):
column = fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column._var_scope_name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column._parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegex(ValueError, 'key must be a string.'):
fc._categorical_column_with_vocabulary_file(
key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
def test_all_constructor_args(self):
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path_to_file',
vocabulary_size=3,
num_oov_buckets=4,
dtype=dtypes.int32)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path_to_file',
vocabulary_size=3,
num_oov_buckets=4,
dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_vocabulary_file_none(self):
with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=None, vocabulary_size=3)
def test_vocabulary_file_empty_string(self):
with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='', vocabulary_size=3)
def test_invalid_vocabulary_file(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegex(errors.OpError, 'file_does_not_exist'):
with self.cached_session():
lookup_ops.tables_initializer().run()
def test_invalid_vocabulary_size(self):
with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=-1)
with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=0)
def test_too_large_vocabulary_size(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size + 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegex(errors.OpError, 'Invalid vocab_size'):
with self.cached_session():
lookup_ops.tables_initializer().run()
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path',
vocabulary_size=3,
num_oov_buckets=-1)
def test_invalid_dtype(self):
with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file='path',
vocabulary_size=3,
dtype=dtypes.float64)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegex(ValueError,
'both num_oov_buckets and default_value'):
fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
dtype=dtypes.string)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_invalid_input_dtype_string(self):
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example(self):
a = fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]), self.evaluate(features['aaa']))
def test_get_sparse_tensors(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_none_vocabulary_size(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
def test_get_sparse_tensors_weight_collections(self):
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertCountEqual([],
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertCountEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_small_vocabulary_size(self):
with ops.Graph().as_default():
# 'marlo' is the last entry in our vocabulary file, so be setting
# `vocabulary_size` to 1 less than number of entries in file, we take
# 'marlo' out of the vocabulary.
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size - 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((-1, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
with ops.Graph().as_default():
default_value = -100
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
with ops.Graph().as_default():
column = fc._categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc._categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_keras_linear_model(self):
wire_column = fc._categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)),
self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
| VocabularyFileCategoricalColumnTest |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/patchelf/package.py | {
"start": 228,
"end": 944
} | class ____(AutotoolsPackage):
"""PatchELF is a small utility to modify the dynamic linker and RPATH of
ELF executables."""
homepage = "https://nixos.org/patchelf.html"
url = "https://nixos.org/releases/patchelf/patchelf-0.10/patchelf-0.10.tar.gz"
list_url = "https://nixos.org/releases/patchelf/"
list_depth = 1
version("0.10", sha256="b2deabce05c34ce98558c0efb965f209de592197b2c88e930298d740ead09019")
version("0.9", sha256="f2aa40a6148cb3b0ca807a1bf836b081793e55ec9e5540a5356d800132be7e0a")
version("0.8", sha256="14af06a2da688d577d64ff8dac065bb8903bbffbe01d30c62df7af9bf4ce72fe")
def install(self, spec, prefix):
install_tree(self.stage.source_path, prefix)
| Patchelf |
python | kevin1024__vcrpy | vcr/stubs/aiohttp_stubs.py | {
"start": 556,
"end": 639
} | class ____(asyncio.StreamReader, streams.AsyncStreamReaderMixin):
pass
| MockStream |
python | pytorch__pytorch | test/functorch/test_aot_joint_with_descriptors.py | {
"start": 24140,
"end": 54202
} | class ____(torch.nn.Module):
def forward(
self,
primals,
tangents,
):
primals_1: "f32[2, 3]" # ParamAOTInput(target='linear1.weight')
primals_2: "f32[2]" # ParamAOTInput(target='linear1.bias')
primals_3: "f32[4, 3]" # ParamAOTInput(target='linear2.weight')
primals_4: "f32[4]" # ParamAOTInput(target='linear2.bias')
primals_5: "f32[4, 3]" # PlainAOTInput(idx=0)
tangents_1: "f32[4, 2]" # TangentAOTInput(output=PlainAOTOutput(idx=0))
tangents_2: "f32[4, 4]" # TangentAOTInput(output=PlainAOTOutput(idx=1))
primals_1, primals_2, primals_3, primals_4, primals_5, tangents_1, tangents_2, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec)
transpose: "f32[3, 2]" = torch.ops.prims.transpose.default(primals_1, [1, 0]); primals_1 = None
mm: "f32[4, 2]" = torch.ops.aten.mm.default(primals_5, transpose); transpose = None
mul: "f32[4, 2]" = torch.ops.prims.mul.default(mm, 1.0); mm = None
mul_1: "f32[2]" = torch.ops.prims.mul.default(primals_2, 1.0); primals_2 = None
broadcast_in_dim: "f32[4, 2]" = torch.ops.prims.broadcast_in_dim.default(mul_1, [4, 2], [1]); mul_1 = None
add: "f32[4, 2]" = torch.ops.prims.add.default(mul, broadcast_in_dim); mul = broadcast_in_dim = None
transpose_1: "f32[3, 4]" = torch.ops.prims.transpose.default(primals_3, [1, 0]); primals_3 = None
mm_1: "f32[4, 4]" = torch.ops.aten.mm.default(primals_5, transpose_1); transpose_1 = None
mul_2: "f32[4, 4]" = torch.ops.prims.mul.default(mm_1, 1.0); mm_1 = None
mul_3: "f32[4]" = torch.ops.prims.mul.default(primals_4, 1.0); primals_4 = None
broadcast_in_dim_1: "f32[4, 4]" = torch.ops.prims.broadcast_in_dim.default(mul_3, [4, 4], [1]); mul_3 = None
add_1: "f32[4, 4]" = torch.ops.prims.add.default(mul_2, broadcast_in_dim_1); mul_2 = broadcast_in_dim_1 = None
transpose_2: "f32[4, 4]" = torch.ops.prims.transpose.default(tangents_2, [1, 0])
mm_2: "f32[4, 3]" = torch.ops.aten.mm.default(transpose_2, primals_5); transpose_2 = None
transpose_3: "f32[3, 4]" = torch.ops.prims.transpose.default(mm_2, [1, 0]); mm_2 = None
sum_1: "f32[4]" = torch.ops.prims.sum.default(tangents_2, [0]); tangents_2 = None
broadcast_in_dim_2: "f32[1, 4]" = torch.ops.prims.broadcast_in_dim.default(sum_1, [1, 4], [1]); sum_1 = None
as_strided: "f32[4]" = torch.ops.aten.as_strided.default(broadcast_in_dim_2, [4], [1]); broadcast_in_dim_2 = None
transpose_4: "f32[4, 3]" = torch.ops.prims.transpose.default(transpose_3, [1, 0]); transpose_3 = None
transpose_5: "f32[2, 4]" = torch.ops.prims.transpose.default(tangents_1, [1, 0])
mm_3: "f32[2, 3]" = torch.ops.aten.mm.default(transpose_5, primals_5); transpose_5 = primals_5 = None
transpose_6: "f32[3, 2]" = torch.ops.prims.transpose.default(mm_3, [1, 0]); mm_3 = None
sum_2: "f32[2]" = torch.ops.prims.sum.default(tangents_1, [0]); tangents_1 = None
broadcast_in_dim_3: "f32[1, 2]" = torch.ops.prims.broadcast_in_dim.default(sum_2, [1, 2], [1]); sum_2 = None
as_strided_1: "f32[2]" = torch.ops.aten.as_strided.default(broadcast_in_dim_3, [2], [1]); broadcast_in_dim_3 = None
transpose_7: "f32[2, 3]" = torch.ops.prims.transpose.default(transpose_6, [1, 0]); transpose_6 = None
return pytree.tree_unflatten([
add, # PlainAOTOutput(idx=0)
add_1, # PlainAOTOutput(idx=1)
transpose_7, # GradAOTOutput(grad_of=ParamAOTInput(target='linear1.weight'))
as_strided_1, # GradAOTOutput(grad_of=ParamAOTInput(target='linear1.bias'))
transpose_4, # GradAOTOutput(grad_of=ParamAOTInput(target='linear2.weight'))
as_strided, # GradAOTOutput(grad_of=ParamAOTInput(target='linear2.bias'))
None, # None
], self._out_spec)
""", # noqa: B950
)
# Compile the result
parallel_model_fn = aot_compile_joint_with_descriptors(
joint_with_descriptors
)
# Test functional correctness
expected_output = model(*inputs)
actual_output = parallel_model_fn(
*dict(model.named_parameters()).values(), *inputs
)
# Check both outputs
self.assertEqual(len(expected_output), len(actual_output))
for exp, act in zip(expected_output, actual_output):
self.assertEqual(exp, act)
def test_in_out_specs(self):
"""Test that in_spec and out_spec are properly set"""
class SimpleModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
return self.linear(x)
model = SimpleModule()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
# Export joint with descriptors
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
# Test that specs are available
self.assertIsNotNone(joint_with_descriptors.in_spec)
self.assertIsNotNone(joint_with_descriptors.out_spec)
self.assertIsNotNone(joint_with_descriptors.params_spec)
self.assertIsNotNone(joint_with_descriptors.buffers_spec)
# Test that they work with pytree operations
flat_inputs, _ = pytree.tree_flatten((inputs, {}))
self.assertTrue(len(flat_inputs) > 0)
# Test parameter and buffer specs contain expected entries
self.assertIn("linear.weight", joint_with_descriptors.params_spec)
self.assertIn("linear.bias", joint_with_descriptors.params_spec)
self.assertEqual(
len(joint_with_descriptors.buffers_spec), 0
) # No buffers in simple linear
# Compile the result to ensure everything works together
parallel_model_fn = aot_compile_joint_with_descriptors(
joint_with_descriptors
)
# Test functional correctness
expected_output = model(*inputs)
actual_output = parallel_model_fn(
*dict(model.named_parameters()).values(), *inputs
)
self.assertEqual(expected_output, actual_output)
def test_fx_utils_simple_linear(self):
"""Test FX utilities on a simple linear module"""
class SimpleLinear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
return self.linear(x)
model = SimpleLinear()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
graph = joint_with_descriptors.graph_module.graph
# Test get_named_param_nodes
named_params = get_named_param_nodes(graph)
self.assertIn("linear.weight", named_params)
self.assertIn("linear.bias", named_params)
self.assertEqual(len(named_params), 2)
# Test get_param_nodes
param_nodes = get_param_nodes(graph)
self.assertEqual(len(param_nodes), 2)
# Test get_named_buffer_nodes (should be empty for simple linear)
named_buffers = get_named_buffer_nodes(graph)
self.assertEqual(len(named_buffers), 0)
# Test get_buffer_nodes
buffer_nodes = get_buffer_nodes(graph)
self.assertEqual(len(buffer_nodes), 0)
# Test get_all_input_and_grad_nodes
input_grad_nodes = get_all_input_and_grad_nodes(graph)
self.assertEqual(len(input_grad_nodes), 4) # 2 params + 1 input + 1 tangent
# Verify that parameters have gradients
param_grads = get_param_and_grad_nodes(graph)
self.assertEqual(len(param_grads), 2)
for desc, (param_node, grad_node) in param_grads.items():
self.assertIsInstance(desc, ParamAOTInput)
self.assertIsNotNone(param_node)
self.assertIsNotNone(grad_node) # Should have gradients
# Test get_plain_input_and_grad_nodes
plain_input_grads = get_plain_input_and_grad_nodes(graph)
self.assertEqual(len(plain_input_grads), 1) # 1 plain input
for desc, (input_node, grad_node) in plain_input_grads.items():
self.assertIsInstance(desc, PlainAOTInput)
self.assertIsNotNone(input_node)
self.assertIsNone(grad_node) # Plain inputs don't have gradients
# Test get_all_output_and_tangent_nodes
output_tangent_nodes = get_all_output_and_tangent_nodes(graph)
self.assertEqual(len(output_tangent_nodes), 3) # 1 output + 2 grad outputs
# Test get_plain_output_and_tangent_nodes
plain_output_tangents = get_plain_output_and_tangent_nodes(graph)
self.assertEqual(len(plain_output_tangents), 1)
for desc, (output_node, tangent_node) in plain_output_tangents.items():
self.assertIsInstance(desc, PlainAOTOutput)
self.assertIsNotNone(output_node)
self.assertIsNotNone(
tangent_node
) # Should have tangents for backward pass
def test_fx_utils_conv_bn_module(self):
"""Test FX utilities on a conv+batchnorm module with buffers"""
class ConvBN(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 3, 3, padding=1)
self.bn = nn.BatchNorm2d(3)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return torch.relu(x)
model = ConvBN()
model.train() # Important for batch norm
inputs = (torch.randn(2, 1, 4, 4),)
with ExitStack() as stack:
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
graph = joint_with_descriptors.graph_module.graph
# Test get_named_param_nodes
named_params = get_named_param_nodes(graph)
expected_params = ["conv.weight", "conv.bias", "bn.weight", "bn.bias"]
for param_name in expected_params:
self.assertIn(param_name, named_params)
self.assertEqual(len(named_params), 4)
# Test get_named_buffer_nodes
named_buffers = get_named_buffer_nodes(graph)
expected_buffers = [
"bn.running_mean",
"bn.running_var",
"bn.num_batches_tracked",
]
for buffer_name in expected_buffers:
self.assertIn(buffer_name, named_buffers)
self.assertEqual(len(named_buffers), 3)
# Test get_buffer_nodes
buffer_nodes = get_buffer_nodes(graph)
self.assertEqual(len(buffer_nodes), 3)
# Test that all inputs include params, buffers, and plain inputs
input_grad_nodes = get_all_input_and_grad_nodes(graph)
self.assertEqual(
len(input_grad_nodes), 9
) # 4 params + 3 buffers + 1 input + 1 tangent
# Verify buffer handling
buffer_count = 0
for desc, (node, _grad_node) in input_grad_nodes.items():
if isinstance(desc, BufferAOTInput):
buffer_count += 1
self.assertIsNotNone(node)
# Buffers typically don't have gradients unless they're trainable
self.assertEqual(buffer_count, 3)
def test_fx_utils_multiple_outputs(self):
"""Test FX utilities on a module with multiple outputs"""
class MultiOutputModule(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 2)
self.linear2 = nn.Linear(3, 4)
def forward(self, x):
out1 = self.linear1(x)
out2 = self.linear2(x)
return out1, out2
model = MultiOutputModule()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
graph = joint_with_descriptors.graph_module.graph
# Test get_all_output_and_tangent_nodes
output_tangent_nodes = get_all_output_and_tangent_nodes(graph)
self.assertEqual(len(output_tangent_nodes), 6) # 2 outputs + 4 grad outputs
# Test get_plain_output_and_tangent_nodes
plain_output_tangents = get_plain_output_and_tangent_nodes(graph)
self.assertEqual(len(plain_output_tangents), 2)
# Verify each output has a tangent
for desc, (output_node, tangent_node) in plain_output_tangents.items():
self.assertIsInstance(desc, PlainAOTOutput)
self.assertIsNotNone(output_node)
self.assertIsNotNone(tangent_node)
# Test parameter handling with multiple outputs
param_grads = get_param_and_grad_nodes(graph)
self.assertEqual(len(param_grads), 4) # 2 weights + 2 biases
# All parameters should have gradients
for desc, (param_node, grad_node) in param_grads.items():
self.assertIsInstance(desc, ParamAOTInput)
self.assertIsNotNone(param_node)
self.assertIsNotNone(grad_node)
def test_fx_utils_node_consistency(self):
"""Test that FX utilities return consistent node references"""
class SimpleModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
return self.linear(x)
model = SimpleModule()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
graph = joint_with_descriptors.graph_module.graph
# Get nodes through different APIs and verify consistency
named_params = get_named_param_nodes(graph)
param_nodes = get_param_nodes(graph)
param_grads = get_param_and_grad_nodes(graph)
all_input_grads = get_all_input_and_grad_nodes(graph)
# Check that get_param_nodes returns the same nodes as get_named_param_nodes
self.assertEqual(len(param_nodes), len(named_params))
for node in param_nodes:
self.assertIn(node, named_params.values())
# Check that param_grads contains the same parameter nodes
for desc, (param_node, _grad_node) in param_grads.items():
self.assertIn(param_node, param_nodes)
self.assertEqual(param_node, named_params[desc.target])
# Check that all_input_grads contains the parameter nodes
param_count = 0
for desc, (input_node, _grad_node) in all_input_grads.items():
if isinstance(desc, ParamAOTInput):
param_count += 1
self.assertIn(input_node, param_nodes)
self.assertEqual(input_node, named_params[desc.target])
self.assertEqual(param_count, len(param_nodes))
def test_export_and_compile(self):
class SimpleModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
return self.linear(x)
model = SimpleModule()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs
)
model_fn = aot_compile_joint_with_descriptors(joint_with_descriptors)
compiled_fn = torch.compile(fullgraph=True)(model_fn)
compiled_fn(*dict(model.named_parameters()).values(), inputs).sum().backward()
self.assertIsNotNone(model.linear.weight.grad)
def test_preserve_annotate_simple(self):
"""Test basic linear module with aot_export_joint_with_descriptors"""
class SimpleLinear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
with fx_traceback.annotate({"pp_stage": 0}):
y = self.linear(x)
return y - 1
inputs = (torch.randn(4, 3),)
model = SimpleLinear()
for with_export in [True, False]:
graph_module = graph_capture(model, inputs, with_export)
custom_metadata = fx_traceback._get_custom_metadata(graph_module)
self.assertExpectedInline(
str(custom_metadata),
"""\
('call_function', 't', {'pp_stage': 0})
('call_function', 'addmm', {'pp_stage': 0})
('call_function', 't_1', {'pp_stage': 0})
('call_function', 'mm', {'pp_stage': 0})
('call_function', 't_2', {'pp_stage': 0})
('call_function', 'sum_1', {'pp_stage': 0})
('call_function', 'view', {'pp_stage': 0})
('call_function', 't_3', {'pp_stage': 0})""",
)
@requires_cuda
def test_preserve_annotate_flex_attention(self):
def score_mod(score, b, h, m, n):
return score
def _get_block_causal_mask_mod(seq_idx):
def block_causal_mask(b, h, q_idx, kv_idx):
# must use this more complicated mask_mod so autograd seq_nr increases
return (seq_idx[b, q_idx] == seq_idx[b, kv_idx]) & (q_idx >= kv_idx)
return block_causal_mask
a = 12
b = 24
batch_size = 2
seqlen = a * b
device = "cuda"
# Create seq_idx tensor - maps each position to a document/sequence ID
# Example: Split sequence into 2 documents for each batch
# First half (0:384) belongs to document 0, second half (384:768) to document 1
seq_idx = torch.zeros(batch_size, seqlen, dtype=torch.int32, device=device)
seq_idx[:, seqlen // 2 :] = 1 # Second half belongs to document 1
# Get the mask_mod function with seq_idx captured in closure
mask_mod = _get_block_causal_mask_mod(seq_idx)
# Create block_mask with the mask_mod function (which only takes 4 args)
# Note: We don't compile create_block_mask itself, just flex_attention
block_mask = create_block_mask(mask_mod, None, None, seqlen, seqlen)
class FlexAttentionModule(torch.nn.Module):
"""Flex attention submodule similar to the sdpa in Llama3 Attention"""
def forward(self, xq, xk, xv):
"""
Args:
xq: Query tensor (bs, n_heads, seqlen, head_dim)
xk: Key tensor (bs, n_heads, seqlen, head_dim)
xv: Value tensor (bs, n_heads, seqlen, head_dim)
Returns:
Output tensor (bs, n_heads, seqlen, head_dim)
"""
with fx_traceback.annotate({"compile_with_inductor": "flex_attention"}):
output = flex_attention(
xq, xk, xv, block_mask=block_mask, score_mod=score_mod
)
return output
# Model configuration
n_heads = 4
head_dim = 64
# Create input tensors in the shape expected by FlexAttentionModule
# Shape: (bs, n_heads, seqlen, head_dim)
xq = torch.randn(
batch_size, n_heads, seqlen, head_dim, requires_grad=True, device=device
)
xk = torch.randn(
batch_size, n_heads, seqlen, head_dim, requires_grad=True, device=device
)
xv = torch.randn(
batch_size, n_heads, seqlen, head_dim, requires_grad=True, device=device
)
model = FlexAttentionModule().to(device)
inputs = (xq, xk, xv)
gm = graph_capture(model, inputs, with_export=True)
custom_metadata = fx_traceback._get_custom_metadata(gm)
# not using assertExpectedInline because some CI runs has fewer detach nodes in graph
# than other CI runs, so we can't use a fixed string to compare against
self.assertTrue(
"('get_attr', 'sdpa_score0', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('get_attr', 'sdpa_mask0', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('call_function', 'flex_attention', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('get_attr', 'fw_graph0', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('get_attr', 'joint_graph0', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('get_attr', 'mask_graph0', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
self.assertTrue(
"('call_function', 'flex_attention_backward', {'compile_with_inductor': 'flex_attention'})"
in custom_metadata
)
def test_preserve_annotate_function(self):
"""Test basic annotate_fn usage"""
@fx_traceback.annotate_fn({"pp_stage": 1})
def example_function(x):
return x * x
class SimpleLinear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
with fx_traceback.annotate({"pp_stage": 0}):
y = self.linear(x)
y = example_function(y)
return y - 1
inputs = (torch.randn(4, 3),)
model = SimpleLinear()
for with_export in [True, False]:
graph_module = graph_capture(model, inputs, with_export)
custom_metadata = fx_traceback._get_custom_metadata(graph_module)
self.assertExpectedInline(
str(custom_metadata),
"""\
('call_function', 't', {'pp_stage': 0})
('call_function', 'addmm', {'pp_stage': 0})
('call_function', 'mul', {'pp_stage': 1})
('call_function', 'mul_1', {'pp_stage': 1})
('call_function', 'mul_2', {'pp_stage': 1})
('call_function', 't_1', {'pp_stage': 0})
('call_function', 'mm', {'pp_stage': 0})
('call_function', 't_2', {'pp_stage': 0})
('call_function', 'sum_1', {'pp_stage': 0})
('call_function', 'view', {'pp_stage': 0})
('call_function', 't_3', {'pp_stage': 0})""",
)
@skipIfCrossRef
def test_custom_op_stack_trace(self):
@torch.library.custom_op("my_lib::foo", mutates_args={})
def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
@foo.register_fake
def foo_fake_impl(x, y):
return torch.empty_like(x)
def foo_setup_context(ctx, inputs, output):
pass
def foo_backward(ctx, grad_output):
return grad_output, grad_output
foo.register_autograd(foo_backward, setup_context=foo_setup_context)
class CustomOpModule(torch.nn.Module):
def forward(self, x, y):
return foo(x, y)
model = CustomOpModule()
inputs = (torch.randn(4, 3), torch.randn(4, 3))
gm = graph_capture(model, inputs, with_export=True)
foo_node = None
for node in gm.graph.nodes:
if node.op == "call_function" and node.name == "foo":
foo_node = node
break
self.assertTrue(foo_node is not None)
self.assertTrue("return foo(x, y)" in foo_node.meta.get("stack_trace", None))
self.assertTrue("return foo(x, y)" in gm.print_readable(print_output=False))
self.assertFalse("self._opoverload" in foo_node.meta.get("stack_trace", None))
self.assertFalse("self._opoverload" in gm.print_readable(print_output=False))
def test_preserve_annotate_replay_view(self):
"""Test stack trace and annotation are correct on nodes regenerated in functionalization"""
def _unpermute(out, input_shape, permuted_indices):
"""
Unpermute operation from torchtitan MoE utils.
"""
out_unpermuted = out.new_empty(input_shape)
out_unpermuted[permuted_indices, :] = out
out = out_unpermuted[:-1]
return out
class Module(nn.Module):
def __init__(self):
super().__init__()
self.input_shape = (5, 3)
self.permuted_indices = torch.tensor([2, 0, 3, 1])
def forward(self, x):
with fx_traceback.annotate({"pp_stage": 0}):
routed_output = _unpermute(
x, self.input_shape, self.permuted_indices
)
return routed_output.cos()
inputs = (torch.randn(4, 3, requires_grad=True),)
model = Module()
graph_module = graph_capture(model, inputs, True)
custom_metadata = fx_traceback._get_custom_metadata(graph_module)
slice_nodes = graph_module.graph.find_nodes(
op="call_function", target=torch.ops.aten.slice.Tensor
)
self.assertEqual(len(slice_nodes), 1)
slice_backward_nodes = graph_module.graph.find_nodes(
op="call_function", target=torch.ops.aten.slice_backward.default
)
self.assertEqual(len(slice_backward_nodes), 1)
slice_node = slice_nodes[0]
slice_backward_node = slice_backward_nodes[0]
self.assertEqual(slice_node.meta["seq_nr"], slice_backward_node.meta["seq_nr"])
self.assertTrue("out = out_unpermuted[:-1]" in slice_node.meta["stack_trace"])
self.assertExpectedInline(
str(custom_metadata),
"""\
('call_function', 'new_empty', {'pp_stage': 0})
('get_attr', '_tensor_constant0', {'pp_stage': 0})
('call_function', 'index_put', {'pp_stage': 0})
('call_function', 'slice_2', {'pp_stage': 0})
('call_function', 'slice_backward', {'pp_stage': 0})
('get_attr', '_tensor_constant0_1', {'pp_stage': 0})
('call_function', 'index', {'pp_stage': 0})""",
)
def test_static_input_indices(self):
"""Test basic linear module with aot_export_joint_with_descriptors"""
class SimpleLinear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
def forward(self, x):
return self.linear(x)
model = SimpleLinear()
inputs = (torch.randn(4, 3),)
gm = dynamo_graph_capture_for_export(model)(*inputs)
fake_mode = gm.meta.get("fake_mode", None)
with tracing(TracingContext(fake_mode)):
with ExitStack() as stack:
joint = aot_export_joint_with_descriptors(
stack,
gm,
inputs,
)
self.assertEqual(joint._aot_state.fw_metadata.static_input_indices, [0, 1])
def test_no_annotation_on_gradient_acc_nodes(self):
"""Test basic linear module with aot_export_joint_with_descriptors"""
class SimpleLinear(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 2)
self.linear2 = nn.Linear(3, 2)
def forward(self, x):
with fx_traceback.annotate({"test": 1}):
return self.linear(x) - self.linear2(x)
model = SimpleLinear()
inputs = (torch.randn(4, 3, requires_grad=True),)
graph_module = graph_capture(model, inputs, True)
add_nodes = graph_module.graph.find_nodes(
op="call_function", target=torch.ops.aten.add.Tensor
)
self.assertEqual(len(add_nodes), 1)
gradient_acc_node = add_nodes[0]
self.assertTrue(gradient_acc_node.meta["is_gradient_acc"])
self.assertEqual(gradient_acc_node.meta.get("custom", {}), {})
custom_metadata = fx_traceback._get_custom_metadata(graph_module)
self.assertExpectedInline(
str(custom_metadata),
"""\
('call_function', 't', {'test': 1})
('call_function', 'addmm', {'test': 1})
('call_function', 't_1', {'test': 1})
('call_function', 'addmm_1', {'test': 1})
('call_function', 'sub', {'test': 1})
('call_function', 'neg', {'test': 1})
('call_function', 't_2', {'test': 1})
('call_function', 'mm', {'test': 1})
('call_function', 't_3', {'test': 1})
('call_function', 'mm_1', {'test': 1})
('call_function', 't_4', {'test': 1})
('call_function', 'sum_1', {'test': 1})
('call_function', 'view', {'test': 1})
('call_function', 't_5', {'test': 1})
('call_function', 't_6', {'test': 1})
('call_function', 'mm_2', {'test': 1})
('call_function', 't_7', {'test': 1})
('call_function', 'mm_3', {'test': 1})
('call_function', 't_8', {'test': 1})
('call_function', 'sum_2', {'test': 1})
('call_function', 'view_1', {'test': 1})
('call_function', 't_9', {'test': 1})""",
)
if __name__ == "__main__":
run_tests()
| inner_f |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 22629,
"end": 24770
} | class ____(TestCase):
def setUp(self):
self.patterns = [
path('api/v1/example/', views.ExampleListView.as_view()),
path('api/v1/example/<int:pk>/', views.ExampleDetailView.as_view()),
path('api/v1/example/<int:pk>/sub/', views.ExampleDetailView.as_view()),
]
def test_schema_for_regular_views(self):
"""
Ensure that schema generation with an API that is not at the URL
root continues to use correct structure for link keys.
"""
generator = SchemaGenerator(title='Example API', patterns=self.patterns)
schema = generator.get_schema()
expected = coreapi.Document(
url='',
title='Example API',
content={
'example': {
'create': coreapi.Link(
url='/api/v1/example/',
action='post',
fields=[]
),
'list': coreapi.Link(
url='/api/v1/example/',
action='get',
fields=[]
),
'read': coreapi.Link(
url='/api/v1/example/{id}/',
action='get',
fields=[
coreapi.Field('id', required=True, location='path', schema=coreschema.String())
]
),
'sub': {
'list': coreapi.Link(
url='/api/v1/example/{id}/sub/',
action='get',
fields=[
coreapi.Field('id', required=True, location='path', schema=coreschema.String())
]
)
}
}
}
)
assert schema == expected
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| TestSchemaGeneratorNotAtRoot |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/help.py | {
"start": 2711,
"end": 4839
} | class ____(BlockSlackMessageBuilder):
def __init__(self, command: str | None = None, integration_id: int | None = None) -> None:
super().__init__()
self.command = command
self.integration_id = integration_id
def get_header_blocks(self) -> Sequence[SlackBlock]:
blocks = []
if self.command and self.command != "help":
_logger.info("slack.event.unknown-command", extra={"command": self.command})
blocks.append(
self.get_markdown_block(UNKNOWN_COMMAND_MESSAGE.format(command=self.command))
)
blocks.append(self.get_markdown_block(HEADER_MESSAGE))
return blocks
def get_help_message(self) -> SlackBlock:
return self._build_blocks(
*self.get_header_blocks(),
self.get_markdown_block(DM_COMMAND_HEADER),
self.get_markdown_block(DM_COMMANDS_MESSAGE),
self.get_markdown_block(CHANNEL_COMMANDS_HEADER),
self.get_markdown_block(CHANNEL_COMMANDS_MESSAGE),
self.get_markdown_block(HELP_COMMANDS_HEADER),
self.get_markdown_block(HELP_COMMANDS_HEADER_MESSAGE),
self.get_markdown_block(HELP_COMMANDS_MESSAGE),
self.get_markdown_block(CONTACT_HEADER),
self.get_markdown_block(CONTACT_MESSAGE),
self.get_divider(),
self.get_markdown_block(GENERAL_MESSAGE),
)
def get_support_message(self) -> SlackBlock:
return self._build_blocks(
self.get_markdown_block(SUPPORT_HEADER_MESSAGE),
self.get_markdown_block(SUPPORT_OPTIONS_MESSAGE),
)
def get_docs_message(self) -> SlackBlock:
return self._build_blocks(
self.get_markdown_block(DOCS_HEADER_MESSAGE),
self.get_markdown_block(DOCS_OPTIONS_MESSAGE),
)
def build(self) -> SlackBlock:
if self.command == "support":
return self.get_support_message()
elif self.command == "docs":
return self.get_docs_message()
else:
return self.get_help_message()
| SlackHelpMessageBuilder |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/dropout.py | {
"start": 66,
"end": 806
} | class ____(torch.nn.Dropout):
r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
And this is a placeholder to enable models where fp32 tensors
had dropout to work with quantized tensors in train and eval mode.
Args:
p: probability of an element to be zeroed
inplace: can optionally do the operation in-place. Default: ``False``
"""
def forward(self, input):
return input
def _get_name(self):
return "QuantizedDropout"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
return cls(mod.p, mod.inplace)
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(mod.p, mod.inplace)
| Dropout |
python | getsentry__sentry | src/sentry/web/frontend/disabled_member_view.py | {
"start": 338,
"end": 1302
} | class ____(ReactPageView):
def is_member_disabled_from_limit(self, request: object, organization) -> bool:
return False
def handle(self, request: HttpRequest, organization, **kwargs) -> HttpResponse:
user = request.user
# if org member is not restricted, redirect user out of the disabled view
try:
member = organization_service.check_membership_by_id(
user_id=user.id, organization_id=organization.id
)
if member and not member.flags["member-limit:restricted"]:
return self.redirect(
reverse("sentry-organization-issue-list", args=[organization.slug])
)
except OrganizationMember.DoesNotExist:
# this shouldn't happen but we can default to basic handling
pass
# otherwise, just do the basic handling
return super().handle(request, organization, **kwargs)
| DisabledMemberView |
python | pandas-dev__pandas | pandas/tests/indexes/test_old_base.py | {
"start": 720,
"end": 32541
} | class ____:
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2),
Index(np.arange(5, dtype=np.float64)),
Index(np.arange(5, dtype=np.float32)),
Index(np.arange(5, dtype=np.uint64)),
Index(range(0, 20, 2), dtype=np.int64),
Index(range(0, 20, 2), dtype=np.int32),
Index(range(0, 20, 2), dtype=np.int16),
Index(range(0, 20, 2), dtype=np.int8),
Index(list("abcde")),
Index([0, "a", 1, "b", 2, "c"]),
period_range("20130101", periods=5, freq="D"),
TimedeltaIndex(
[
"0 days 01:00:00",
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
],
dtype="timedelta64[ns]",
freq="D",
),
DatetimeIndex(
["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"],
dtype="datetime64[ns]",
freq="D",
),
IntervalIndex.from_breaks(range(11), closed="right"),
]
)
def simple_index(self, request):
return request.param
def test_pickle_compat_construction(self, simple_index):
# need an object to create with
if isinstance(simple_index, RangeIndex):
pytest.skip("RangeIndex() is a valid constructor")
msg = "|".join(
[
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed",
r"DatetimeIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"TimedeltaIndex\(\) must be called with a collection of some "
r"kind, None was passed",
r"__new__\(\) missing 1 required positional argument: 'data'",
r"__new__\(\) takes at least 2 arguments \(1 given\)",
r"'NoneType' object is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
type(simple_index)()
def test_shift(self, simple_index):
# GH8083 test the base class for shift
if isinstance(simple_index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
pytest.skip("Tested in test_ops/test_arithmetic")
idx = simple_index
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self, simple_index):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = simple_index
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self, simple_index):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = simple_index.copy()
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self, simple_index):
idx = simple_index
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
pytest.skip("Not applicable for Index")
if is_numeric_dtype(simple_index.dtype) or isinstance(
simple_index, TimedeltaIndex
):
pytest.skip("Tested elsewhere.")
typ = type(idx._data).__name__
cls = type(idx).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: ({cls}|{typ})",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: ({cls}|{typ})",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self, simple_index):
if simple_index.dtype in (object, "string"):
pytest.skip("Tested elsewhere.")
idx = simple_index
if idx.dtype.kind in "iufcbm":
assert idx.all() == idx._values.all()
assert idx.all() == idx.to_series().all()
assert idx.any() == idx._values.any()
assert idx.any() == idx.to_series().any()
else:
msg = "does not support operation '(any|all)'"
with pytest.raises(TypeError, match=msg):
idx.all()
with pytest.raises(TypeError, match=msg):
idx.any()
def test_repr_roundtrip(self, simple_index):
if isinstance(simple_index, IntervalIndex):
pytest.skip(f"Not a valid repr for {type(simple_index).__name__}")
idx = simple_index
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self, simple_index):
# GH10182
if isinstance(simple_index, IntervalIndex):
pytest.skip(f"Not a valid repr for {type(simple_index).__name__}")
idx = simple_index
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
pytest.skip(
"RangeIndex cannot be initialized from data, "
"MultiIndex and CategoricalIndex are tested separately"
)
elif index.dtype == object and index.inferred_type in ["boolean", "string"]:
init_kwargs["dtype"] = index.dtype
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if isinstance(index.dtype, DatetimeTZDtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type.from_ordinals(ordinals=index.asi8, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
elif type(index) is Index and not isinstance(index.dtype, np.dtype):
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_index_equal(result, index)
if isinstance(index._values, BaseMaskedArray):
assert np.shares_memory(index._values._data, result._values._data)
tm.assert_numpy_array_equal(
index._values._data, result._values._data, check_same="same"
)
assert np.shares_memory(index._values._mask, result._values._mask)
tm.assert_numpy_array_equal(
index._values._mask, result._values._mask, check_same="same"
)
elif (
isinstance(index.dtype, StringDtype) and index.dtype.storage == "python"
):
assert np.shares_memory(index._values._ndarray, result._values._ndarray)
tm.assert_numpy_array_equal(
index._values._ndarray, result._values._ndarray, check_same="same"
)
elif (
isinstance(index.dtype, StringDtype)
and index.dtype.storage == "pyarrow"
):
assert tm.shares_memory(result._values, index._values)
else:
raise NotImplementedError(index.dtype)
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
# Index[EA] has engine but it does not have a Hashtable .mapping
if not isinstance(index, (RangeIndex, IntervalIndex)) and not (
type(index) is Index and not isinstance(index.dtype, np.dtype)
):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_memory_usage_doesnt_trigger_engine(self, index):
index._cache.clear()
assert "_engine" not in index._cache
res_without_engine = index.memory_usage()
assert "_engine" not in index._cache
# explicitly load and cache the engine
_ = index._engine
assert "_engine" in index._cache
res_with_engine = index.memory_usage()
# the empty engine doesn't affect the result even when initialized with values,
# because engine.sizeof() doesn't consider the content of engine.values
assert res_with_engine == res_without_engine
if len(index) == 0:
assert res_without_engine == 0
assert res_with_engine == 0
else:
assert res_without_engine > 0
assert res_with_engine > 0
def test_argsort(self, index):
if isinstance(index, CategoricalIndex):
pytest.skip(f"{type(self).__name__} separately tested")
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
result = np.argsort(index, kind="mergesort")
expected = index.argsort(kind="mergesort")
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(index, (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self, simple_index):
rep = 2
idx = simple_index.copy()
new_index_cls = idx._constructor
expected = new_index_cls(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
idx = simple_index
rep = np.arange(len(idx))
expected = new_index_cls(idx.values.repeat(rep), name=idx.name)
tm.assert_index_equal(idx.repeat(rep), expected)
def test_numpy_repeat(self, simple_index):
rep = 2
idx = simple_index
expected = idx.repeat(rep)
tm.assert_index_equal(np.repeat(idx, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(idx, rep, axis=0)
def test_where(self, listlike_box, simple_index):
if isinstance(simple_index, (IntervalIndex, PeriodIndex)) or is_numeric_dtype(
simple_index.dtype
):
pytest.skip("Tested elsewhere.")
klass = listlike_box
idx = simple_index
if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
# where does not preserve freq
idx = idx._with_freq(None)
cond = [True] * len(idx)
result = idx.where(klass(cond))
expected = idx
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype)
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
# GH#51363
trimmed = index[1:4]
if not len(index):
pytest.skip("Not applicable for empty index")
result = trimmed.insert(0, index[0])
assert index[0:4].equals(result)
def test_insert_out_of_bounds(self, index, using_infer_string):
# TypeError/IndexError matches what np.insert raises in these cases
if len(index) > 0:
err = TypeError
else:
err = IndexError
if len(index) == 0:
# 0 vs 0.5 in error message varies with numpy version
msg = "index (0|0.5) is out of bounds for axis 0 with size 0"
else:
msg = "slice indices must be integers or None or have an __index__ method"
if using_infer_string:
if index.dtype == "string" or index.dtype == "category":
msg = "loc must be an integer between"
elif index.dtype == "object" and len(index) == 0:
msg = "loc must be an integer between"
err = TypeError
with pytest.raises(err, match=msg):
index.insert(0.5, "foo")
msg = "|".join(
[
r"index -?\d+ is out of bounds for axis 0 with size \d+",
"loc must be an integer between",
]
)
with pytest.raises(IndexError, match=msg):
index.insert(len(index) + 1, 1)
with pytest.raises(IndexError, match=msg):
index.insert(-len(index) - 1, 1)
def test_delete_base(self, index):
if not len(index):
pytest.skip("Not applicable for empty index")
if isinstance(index, RangeIndex):
# tested in class
pytest.skip(f"{type(self).__name__} tested elsewhere")
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_equals(self, index):
if isinstance(index, IntervalIndex):
pytest.skip(f"{type(index).__name__} tested elsewhere")
is_ea_idx = type(index) is Index and not isinstance(index.dtype, np.dtype)
assert index.equals(index)
assert index.equals(index.copy())
if not is_ea_idx:
# doesn't hold for e.g. IntegerDtype
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex) and not is_ea_idx:
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self, simple_index):
# GH9947, GH10637
index_a = simple_index
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_series_equal(index_a == series_a, Series(expected1))
tm.assert_series_equal(index_a == series_c, Series(expected2))
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
pytest.skip("Not relevant for empty index")
elif index.dtype == bool:
pytest.skip(f"{index.dtype} cannot hold NAs")
elif isinstance(index, Index) and is_integer_dtype(index.dtype):
pytest.skip(f"Not relevant for Index with {index.dtype}")
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "fillna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = idx._values
values[1] = np.nan
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self, simple_index):
# GH 15270
idx = simple_index
assert not idx.empty
assert idx[:0].empty
def test_join_self_unique(self, join_type, simple_index):
idx = simple_index
if idx.is_unique:
joined = idx.join(idx, how=join_type)
expected = simple_index
if join_type == "outer":
expected = algos.safe_sort(expected)
tm.assert_index_equal(joined, expected)
def test_map(self, simple_index):
# callable
if isinstance(simple_index, (TimedeltaIndex, PeriodIndex)):
pytest.skip("Tested elsewhere.")
idx = simple_index
result = idx.map(lambda x: x)
# RangeIndex are equivalent to the similar Index with int64 dtype
tm.assert_index_equal(result, idx, exact="equiv")
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_map_dictlike(self, mapper, simple_index, request):
idx = simple_index
if isinstance(idx, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
pytest.skip("Tested elsewhere.")
identity = mapper(idx.values, idx)
result = idx.map(identity)
# RangeIndex are equivalent to the similar Index with int64 dtype
tm.assert_index_equal(result, idx, exact="equiv")
# empty mappable
dtype = None
if idx.dtype.kind == "f":
dtype = idx.dtype
expected = Index([np.nan] * len(idx), dtype=dtype)
result = idx.map(mapper(expected, idx))
tm.assert_index_equal(result, expected)
def test_map_str(self, simple_index):
# GH 31202
if isinstance(simple_index, CategoricalIndex):
pytest.skip("See test_map.py")
idx = simple_index
result = idx.map(str)
expected = Index([str(x) for x in idx])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered, simple_index):
# GH 18630
idx = simple_index
if name:
idx = idx.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = idx.astype(dtype, copy=copy)
expected = CategoricalIndex(idx, name=name, ordered=ordered)
tm.assert_index_equal(result, expected, exact=True)
# non-standard categories
dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered)
msg = "Constructing a Categorical with a dtype and values containing"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = idx.astype(dtype, copy=copy)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = CategoricalIndex(idx, name=name, dtype=dtype)
tm.assert_index_equal(result, expected, exact=True)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = idx.astype("category", copy=copy)
expected = CategoricalIndex(idx, name=name)
tm.assert_index_equal(result, expected, exact=True)
def test_is_unique(self, simple_index):
# initialize a unique index
index = simple_index.drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self, simple_index):
# GH27585
index = simple_index.copy()
ref = weakref.ref(index)
index._engine
del index
assert ref() is None
def test_getitem_2d_deprecated(self, simple_index):
# GH#30588, GH#31479
if isinstance(simple_index, IntervalIndex):
pytest.skip("Tested elsewhere")
idx = simple_index
msg = "Multi-dimensional indexing|too many|only"
with pytest.raises((ValueError, IndexError), match=msg):
idx[:, None]
if not isinstance(idx, RangeIndex):
# GH#44051 RangeIndex already raised pre-2.0 with a different message
with pytest.raises((ValueError, IndexError), match=msg):
idx[True]
with pytest.raises((ValueError, IndexError), match=msg):
idx[False]
else:
msg = "only integers, slices"
with pytest.raises(IndexError, match=msg):
idx[True]
with pytest.raises(IndexError, match=msg):
idx[False]
def test_copy_shares_cache(self, simple_index):
# GH32898, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self, simple_index):
# GH32669, GH36840
idx = simple_index
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._view()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
def test_index_groupby(self, simple_index):
idx = simple_index[:5]
to_groupby = np.array([1, 2, np.nan, 2, 1])
tm.assert_dict_equal(
idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]}
)
to_groupby = DatetimeIndex(
[
datetime(2011, 11, 1),
datetime(2011, 12, 1),
pd.NaT,
datetime(2011, 12, 1),
datetime(2011, 11, 1),
],
tz="UTC",
).values
ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")]
expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
def test_append_preserves_dtype(self, simple_index):
# In particular Index with dtype float32
index = simple_index
N = len(index)
result = index.append(index)
assert result.dtype == index.dtype
tm.assert_index_equal(result[:N], index, exact=False, check_exact=True)
tm.assert_index_equal(result[N:], index, exact=False, check_exact=True)
alt = index.take(list(range(N)) * 2)
tm.assert_index_equal(result, alt, check_exact=True)
def test_inv(self, simple_index, using_infer_string):
idx = simple_index
if idx.dtype.kind in ["i", "u"]:
res = ~idx
expected = Index(~idx.values, name=idx.name)
tm.assert_index_equal(res, expected)
# check that we are matching Series behavior
res2 = ~Series(idx)
tm.assert_series_equal(res2, Series(expected))
else:
if idx.dtype.kind == "f":
msg = "ufunc 'invert' not supported for the input types"
else:
msg = "bad operand|__invert__ is not supported for string dtype"
with pytest.raises(TypeError, match=msg):
~idx
# check that we get the same behavior with Series
with pytest.raises(TypeError, match=msg):
~Series(idx)
| TestBase |
python | jazzband__prettytable | src/prettytable/prettytable.py | {
"start": 106207,
"end": 111735
} | class ____(HTMLParser):
def __init__(self, **kwargs) -> None:
HTMLParser.__init__(self)
self.kwargs = kwargs
self.tables: list[PrettyTable] = []
self.last_row: list[str] = []
self.rows: list[tuple[list[str], bool]] = []
self.max_row_width = 0
self.active: str | None = None
self.last_content = ""
self.is_last_row_header = False
self.colspan = 0
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
self.active = tag
if tag == "th":
self.is_last_row_header = True
for key, value in attrs:
if key == "colspan":
self.colspan = int(value) # type: ignore[arg-type]
def handle_endtag(self, tag: str) -> None:
if tag in ["th", "td"]:
stripped_content = self.last_content.strip()
self.last_row.append(stripped_content)
if self.colspan:
for _ in range(1, self.colspan):
self.last_row.append("")
self.colspan = 0
if tag == "tr":
self.rows.append((self.last_row, self.is_last_row_header))
self.max_row_width = max(self.max_row_width, len(self.last_row))
self.last_row = []
self.is_last_row_header = False
if tag == "table":
table = self.generate_table(self.rows)
self.tables.append(table)
self.rows = []
self.last_content = " "
self.active = None
def handle_data(self, data: str) -> None:
self.last_content += data
def generate_table(self, rows: list[tuple[list[str], bool]]) -> PrettyTable:
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1, appends):
row[0].append("-")
if row[1]:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table
def make_fields_unique(self, fields: list[str]) -> None:
"""
iterates over the row and make each field unique
"""
for i in range(len(fields)):
for j in range(i + 1, len(fields)):
if fields[i] == fields[j]:
fields[j] += "'"
def from_html(html_code: str, **kwargs) -> list[PrettyTable]:
"""
Generates a list of PrettyTables from a string of HTML code. Each <table> in
the HTML becomes one PrettyTable object.
"""
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
def from_html_one(html_code: str, **kwargs) -> PrettyTable:
"""
Generates a PrettyTable from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
msg = "More than one <table> in provided HTML code. Use from_html instead."
raise ValueError(msg)
return tables[0]
def from_mediawiki(wiki_text: str, **kwargs) -> PrettyTable:
"""
Returns a PrettyTable instance from simple MediaWiki table markup.
Note that the table should have a header row.
Arguments:
wiki_text -- Multiline string containing MediaWiki table markup
(Enter within ''' ''')
"""
lines = wiki_text.strip().split("\n")
table = PrettyTable(**kwargs)
header = None
rows = []
inside_table = False
for line in lines:
line = line.strip()
if line.startswith("{|"):
inside_table = True
continue
if line.startswith("|}"):
break
if not inside_table:
continue
if line.startswith("|-"):
continue
if line.startswith("|+"):
continue
if line.startswith("!"):
header = [cell.strip() for cell in re.split(r"\s*!!\s*", line[1:])]
table.field_names = header
continue
if line.startswith("|"):
row_data = [cell.strip() for cell in re.split(r"\s*\|\|\s*", line[1:])]
rows.append(row_data)
continue
if header:
for row in rows:
if len(row) != len(header):
error_message = "Row length mismatch between header and body."
raise ValueError(error_message)
table.add_row(row)
else:
msg = "No valid header found in the MediaWiki table."
raise ValueError(msg)
return table
def _warn_deprecation(name: str, module_globals: dict[str, Any]) -> Any:
if (val := module_globals.get(f"_DEPRECATED_{name}")) is None:
msg = f"module '{__name__}' has no attribute '{name}'"
raise AttributeError(msg)
module_globals[name] = val
if name in {"FRAME", "ALL", "NONE", "HEADER"}:
msg = (
f"the '{name}' constant is deprecated, "
"use the 'HRuleStyle' and 'VRuleStyle' enums instead"
)
else:
msg = f"the '{name}' constant is deprecated, use the 'TableStyle' enum instead"
import warnings
warnings.warn(msg, DeprecationWarning, stacklevel=3)
return val
def __getattr__(name: str) -> Any:
return _warn_deprecation(name, module_globals=globals())
| TableHandler |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 19127,
"end": 19769
} | class ____(_BaseAddress):
__slots__ = ()
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyinterface')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Interface, IPv6Interface]:
try:
return IPv4Interface(value)
except ValueError:
pass
try:
return IPv6Interface(value)
except ValueError:
raise errors.IPvAnyInterfaceError()
| IPvAnyInterface |
python | tensorflow__tensorflow | tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py | {
"start": 2823,
"end": 5789
} | class ____(object):
"""//////////////////////////////////////////////////////////////////////////////
ProfileAnalysis service provide entry point for profiling TPU and for
serving profiled data to Tensorboard through GRPC
//////////////////////////////////////////////////////////////////////////////
"""
def NewSession(self, request, context):
"""Starts a profiling session, blocks until it completes.
TPUProfileAnalysis service delegate this to TPUProfiler service.
Populate the profiled data in repository, then return status to caller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EnumSessions(self, request, context):
"""Enumerate existing sessions and return available profile tools."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSessionToolData(self, request, context):
"""Retrieve specific tool's data for specific session."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProfileAnalysisServicer_to_server(servicer, server):
rpc_method_handlers = {
'NewSession':
grpc.unary_unary_rpc_method_handler(
servicer.NewSession,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.NewProfileSessionResponse.SerializeToString,
),
'EnumSessions':
grpc.unary_unary_rpc_method_handler(
servicer.EnumSessions,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.EnumProfileSessionsAndToolsResponse.SerializeToString,
),
'GetSessionToolData':
grpc.unary_unary_rpc_method_handler(
servicer.GetSessionToolData,
request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataRequest.FromString,
response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2
.ProfileSessionDataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.ProfileAnalysis', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| ProfileAnalysisServicer |
python | astropy__astropy | astropy/coordinates/builtin_frames/skyoffset.py | {
"start": 3825,
"end": 8120
} | class ____(BaseCoordinateFrame):
"""
A frame which is relative to some specific position and oriented to match
its frame.
SkyOffsetFrames always have component names for spherical coordinates
of ``lon``/``lat``, *not* the component names for the frame of ``origin``.
This is useful for calculating offsets and dithers in the frame of the sky
relative to an arbitrary position. Coordinates in this frame are both centered on the position specified by the
``origin`` coordinate, *and* they are oriented in the same manner as the
``origin`` frame. E.g., if ``origin`` is `~astropy.coordinates.ICRS`, this
object's ``lat`` will be pointed in the direction of Dec, while ``lon``
will point in the direction of RA.
For more on skyoffset frames, see :ref:`astropy:astropy-skyoffset-frames`.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
origin : coordinate-like
The coordinate which specifies the origin of this frame. Note that this
origin is used purely for on-sky location/rotation. It can have a
``distance`` but it will not be used by this ``SkyOffsetFrame``.
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Notes
-----
``SkyOffsetFrame`` is a factory class. That is, the objects that it
yields are *not* actually objects of class ``SkyOffsetFrame``. Instead,
distinct classes are created on-the-fly for whatever the frame class is
of ``origin``.
"""
rotation = QuantityAttribute(
default=0, unit=u.deg, doc="The rotation angle for the frame orientation"
)
origin = CoordinateAttribute(
default=None, frame=None, doc="The origin of the offset frame"
)
def __new__(cls, *args, **kwargs):
# We don't want to call this method if we've already set up
# an skyoffset frame for this class.
if not (issubclass(cls, SkyOffsetFrame) and cls is not SkyOffsetFrame):
# We get the origin argument, and handle it here.
try:
origin_frame = kwargs["origin"]
except KeyError:
raise TypeError(
"Can't initialize a SkyOffsetFrame without origin= keyword."
)
if hasattr(origin_frame, "frame"):
origin_frame = origin_frame.frame
newcls = make_skyoffset_cls(origin_frame.__class__)
return newcls.__new__(newcls, *args, **kwargs)
# http://stackoverflow.com/questions/19277399/why-does-object-new-work-differently-in-these-three-cases
# See above for why this is necessary. Basically, because some child
# may override __new__, we must override it here to never pass
# arguments to the object.__new__ method.
if super().__new__ is object.__new__:
return super().__new__(cls)
return super().__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.origin is not None and not self.origin.has_data:
raise ValueError("The origin supplied to SkyOffsetFrame has no data.")
if self.has_data:
self._set_skyoffset_data_lon_wrap_angle(self.data)
@staticmethod
def _set_skyoffset_data_lon_wrap_angle(data):
if hasattr(data, "lon"):
data.lon.wrap_angle = 180.0 * u.deg
return data
def represent_as(self, base, s="base", in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_skyoffset_data_lon_wrap_angle(data)
return data
def __reduce__(self):
return (_skyoffset_reducer, (self.origin,), self.__dict__)
def _skyoffset_reducer(origin):
return SkyOffsetFrame.__new__(SkyOffsetFrame, origin=origin)
| SkyOffsetFrame |
python | openai__openai-python | src/openai/types/realtime/realtime_mcp_tool_execution_error_param.py | {
"start": 234,
"end": 380
} | class ____(TypedDict, total=False):
message: Required[str]
type: Required[Literal["tool_execution_error"]]
| RealtimeMcpToolExecutionErrorParam |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/datatable_hot_reloading.py | {
"start": 732,
"end": 1260
} | class ____(App[None]):
CSS_PATH = CSS_PATH
def compose(self) -> ComposeResult:
yield DataTable(zebra_stripes=True, cursor_type="row")
def on_mount(self) -> None:
dt = self.query_one(DataTable)
dt.add_column("A", width=10)
self.c = dt.add_column("B")
dt.fixed_columns = 1
dt.add_row("one", "two")
dt.add_row("three", "four")
dt.add_row("five", "six")
if __name__ == "__main__":
app = DataTableHotReloadingApp()
app.run()
| DataTableHotReloadingApp |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/markers.py | {
"start": 1141,
"end": 5164
} | class ____(object):
"""
This class is used to evaluate marker expressions.
"""
operations = {
'==': lambda x, y: x == y,
'===': lambda x, y: x == y,
'~=': lambda x, y: x == y or x > y,
'!=': lambda x, y: x != y,
'<': lambda x, y: x < y,
'<=': lambda x, y: x == y or x < y,
'>': lambda x, y: x > y,
'>=': lambda x, y: x == y or x > y,
'and': lambda x, y: x and y,
'or': lambda x, y: x or y,
'in': lambda x, y: x in y,
'not in': lambda x, y: x not in y,
}
def evaluate(self, expr, context):
"""
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
"""
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr]
else:
assert isinstance(expr, dict)
op = expr['op']
if op not in self.operations:
raise NotImplementedError('op not implemented: %s' % op)
elhs = expr['lhs']
erhs = expr['rhs']
if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
lhs = self.evaluate(elhs, context)
rhs = self.evaluate(erhs, context)
if ((_is_version_marker(elhs) or _is_version_marker(erhs)) and
op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):
lhs = LV(lhs)
rhs = LV(rhs)
elif _is_version_marker(elhs) and op in ('in', 'not in'):
lhs = LV(lhs)
rhs = _get_versions(rhs)
result = self.operations[op](lhs, rhs)
return result
_DIGITS = re.compile(r'\d+\.\d+')
def default_context():
def format_full_version(info):
version = '%s.%s.%s' % (info.major, info.minor, info.micro)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
if hasattr(sys, 'implementation'):
implementation_version = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
implementation_version = '0'
implementation_name = ''
ppv = platform.python_version()
m = _DIGITS.match(ppv)
pv = m.group(0)
result = {
'implementation_name': implementation_name,
'implementation_version': implementation_version,
'os_name': os.name,
'platform_machine': platform.machine(),
'platform_python_implementation': platform.python_implementation(),
'platform_release': platform.release(),
'platform_system': platform.system(),
'platform_version': platform.version(),
'platform_in_venv': str(in_venv()),
'python_full_version': ppv,
'python_version': pv,
'sys_platform': sys.platform,
}
return result
DEFAULT_CONTEXT = default_context()
del default_context
evaluator = Evaluator()
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
try:
expr, rest = parse_marker(marker)
except Exception as e:
raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
if rest and rest[0] != '#':
raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
context = dict(DEFAULT_CONTEXT)
if execution_context:
context.update(execution_context)
return evaluator.evaluate(expr, context)
| Evaluator |
python | conda__conda | conda/models/match_spec.py | {
"start": 39129,
"end": 39241
} | class ____(GlobStrMatch):
def __init__(self, value):
super().__init__(value.lower())
| GlobLowerStrMatch |
python | huggingface__transformers | examples/modular-transformers/image_processing_new_imgproc_model.py | {
"start": 1265,
"end": 14909
} | class ____(BaseImageProcessor):
r"""
Constructs a IMGPROC_MODEL image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 384, "width": 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
do_convert_rgb: Optional[bool] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
return encoded_outputs
def new_image_processing_method(self, pixel_values: torch.FloatTensor):
return pixel_values / 2
| ImgprocModelImageProcessor |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 3921,
"end": 5444
} | class ____(ast.NodeVisitor):
"""Check if a name is a local dict."""
def __init__(self, name: str, keys: set[str]) -> None:
"""Initialize the visitor.
Args:
name: The name to check.
keys: The keys to populate.
"""
self.name = name
self.keys = keys
@override
def visit_Subscript(self, node: ast.Subscript) -> None:
"""Visit a subscript node.
Args:
node: The node to visit.
"""
if (
isinstance(node.ctx, ast.Load)
and isinstance(node.value, ast.Name)
and node.value.id == self.name
and isinstance(node.slice, ast.Constant)
and isinstance(node.slice.value, str)
):
# we've found a subscript access on the name we're looking for
self.keys.add(node.slice.value)
@override
def visit_Call(self, node: ast.Call) -> None:
"""Visit a call node.
Args:
node: The node to visit.
"""
if (
isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, ast.Name)
and node.func.value.id == self.name
and node.func.attr == "get"
and len(node.args) in {1, 2}
and isinstance(node.args[0], ast.Constant)
and isinstance(node.args[0].value, str)
):
# we've found a .get() call on the name we're looking for
self.keys.add(node.args[0].value)
| IsLocalDict |
python | pytorch__pytorch | torch/utils/data/sampler.py | {
"start": 7519,
"end": 9995
} | class ____(Sampler[int]):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
generator (Generator): Generator used in sampling.
Example:
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> list(
... WeightedRandomSampler(
... [0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True
... )
... )
[4, 4, 1, 4, 5]
>>> list(
... WeightedRandomSampler(
... [0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False
... )
... )
[0, 1, 4, 3, 2]
"""
weights: torch.Tensor
num_samples: int
replacement: bool
def __init__(
self,
weights: Sequence[float],
num_samples: int,
replacement: bool = True,
generator=None,
) -> None:
if (
not isinstance(num_samples, int)
or isinstance(num_samples, bool)
or num_samples <= 0
):
raise ValueError(
f"num_samples should be a positive integer value, but got num_samples={num_samples}"
)
if not isinstance(replacement, bool):
raise ValueError(
f"replacement should be a boolean value, but got replacement={replacement}"
)
weights_tensor = torch.as_tensor(weights, dtype=torch.double)
if len(weights_tensor.shape) != 1:
raise ValueError(
"weights should be a 1d sequence but given "
f"weights have shape {tuple(weights_tensor.shape)}"
)
self.weights = weights_tensor
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
rand_tensor = torch.multinomial(
self.weights, self.num_samples, self.replacement, generator=self.generator
)
yield from iter(rand_tensor.tolist())
def __len__(self) -> int:
return self.num_samples
| WeightedRandomSampler |
python | tensorflow__tensorflow | third_party/xla/xla/backends/cpu/testlib/elemental_kernel_emitter_test.py | {
"start": 8863,
"end": 13004
} | class ____(parameterized.TestCase):
def test_map(self, input_dimensions, dtype):
scalar_shape = xla_extension.Shape.scalar_shape(dtype)
shape = xla_extension.Shape.array_shape(dtype, input_dimensions)
# Please note the double curly braces is to escape the python string
# formatting.
hlo = """
HloModule test_map
double {{
a = {scalar_shape} parameter(0)
b = {scalar_shape} constant(2)
ROOT doubled = {scalar_shape} multiply(a, b)
}}
ENTRY main {{
a = {shape} parameter(0)
ROOT mapped = {shape} map(a), to_apply=double
}}
""".format(scalar_shape=scalar_shape, shape=shape)
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
emitter = testlib_cpu.ElementalKernelEmitter(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
input_np = create_input([0, 10], input_dimensions, dtype, shuffle=True)
input_literal = create_literal(input_np)
output_literal = xla_extension.Literal(shape)
runner = testlib_cpu.KernelRunner.create(
emitter.emit_kernel_definition(), jit_compiler
)
runner.call([input_literal, output_literal])
np.testing.assert_equal(
np.asarray(output_literal),
input_np * 2,
)
def test_reduce(self, input_dimensions, dtype):
# Iterate over all combinations of reduce dimensions.
for reduce_dimensions in itertools.chain.from_iterable(
itertools.combinations(range(len(input_dimensions)), r)
for r in range(1, len(input_dimensions))
):
scalar_shape = xla_extension.Shape.scalar_shape(dtype)
input_shape = xla_extension.Shape.array_shape(dtype, input_dimensions)
output_dimensions = [
dim
for idx, dim in enumerate(input_dimensions)
if idx not in reduce_dimensions
]
# Result can overflow in int8 (which results in undefined behavior),
# so we use int16 instead.
output_dtype = np.dtype(np.int16) if (dtype == np.int8) else dtype
output_shape = xla_extension.Shape.array_shape(
output_dtype, output_dimensions
)
# Please note the double curly braces is to escape the python string
# formatting.
hlo = """
HloModule test_reduce
add_method {{
a = {scalar_shape} parameter(0)
b = {scalar_shape} parameter(1)
ROOT add = {scalar_shape} add(a, b)
}}
ENTRY main {{
array = {input_shape} parameter(0)
initial_value = {scalar_shape} parameter(1)
ROOT reduced = {output_shape} reduce(array, initial_value),
dimensions={{{reduce_dimensions}}}, to_apply=add_method
}}
""".format(
scalar_shape=scalar_shape,
input_shape=input_shape,
reduce_dimensions=",".join(map(str, reduce_dimensions)),
output_shape=output_shape,
)
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
emitter = testlib_cpu.ElementalKernelEmitter(
hlo_module.get_root_instruction(),
buffer_assignment,
jit_compiler.get_target_machine(),
)
input_np = create_input([0, 10], input_dimensions, dtype)
input_literal = create_literal(input_np)
initial_value_np = create_input([0, 10], (), dtype)
initial_value_literal = create_literal(initial_value_np)
output_literal = xla_extension.Literal(output_shape)
runner = testlib_cpu.KernelRunner.create(
emitter.emit_kernel_definition(), jit_compiler
)
runner.call([input_literal, initial_value_literal, output_literal])
np.testing.assert_array_almost_equal_nulp(
np.asarray(output_literal),
np.add.reduce(
input_np, axis=reduce_dimensions, initial=initial_value_np
),
nulp=3,
)
if __name__ == "__main__":
absltest.main()
| HloModuleKernelRunnerTest |
python | apache__airflow | scripts/ci/prek/check_deferrable_default.py | {
"start": 2553,
"end": 5278
} | class ____(cst.CSTTransformer):
def leave_Param(self, original_node: cst.Param, updated_node: cst.Param) -> cst.Param:
if original_node.name.value == "deferrable":
expected_default_cst = cst.parse_expression(
'conf.getboolean("operators", "default_deferrable", fallback=False)'
)
if updated_node.default and updated_node.default.deep_equals(expected_default_cst):
return updated_node
return updated_node.with_changes(default=expected_default_cst)
return updated_node
def _is_valid_deferrable_default(default: ast.AST) -> bool:
"""Check whether default is 'conf.getboolean("operators", "default_deferrable", fallback=False)'"""
return ast.unparse(default) == "conf.getboolean('operators', 'default_deferrable', fallback=False)"
def iter_check_deferrable_default_errors(module_filename: str) -> Iterator[str]:
ast_tree = ast.parse(open(module_filename).read())
visitor = DefaultDeferrableVisitor()
visitor.visit(ast_tree)
# We check the module using the ast once and then fix it through cst if needed.
# The primary reason we don't do it all through cst is performance.
if visitor.error_linenos:
_fix_invalid_deferrable_default_value(module_filename)
yield from (f"{module_filename}:{lineno}" for lineno in visitor.error_linenos)
def _fix_invalid_deferrable_default_value(module_filename: str) -> None:
context = CodemodContext(filename=module_filename)
AddImportsVisitor.add_needed_import(context, "airflow.configuration", "conf")
transformer = DefaultDeferrableTransformer()
source_cst_tree = cst.parse_module(open(module_filename).read())
modified_cst_tree = AddImportsVisitor(context).transform_module(source_cst_tree.visit(transformer))
if not source_cst_tree.deep_equals(modified_cst_tree):
with open(module_filename, "w") as writer:
writer.write(modified_cst_tree.code)
def main() -> int:
modules = itertools.chain(
glob.glob(f"{ROOT_DIR}/**/sensors/**.py", recursive=True),
glob.glob(f"{ROOT_DIR}/**/operators/**.py", recursive=True),
)
errors = [error for module in modules for error in iter_check_deferrable_default_errors(module)]
if errors:
print("Incorrect deferrable default values detected at:")
for error in errors:
print(f" {error}")
print(
"""Please set the default value of deferrable to """
""""conf.getboolean("operators", "default_deferrable", fallback=False)"\n"""
f"See: {DEFERRABLE_DOC}\n"
)
return len(errors)
if __name__ == "__main__":
sys.exit(main())
| DefaultDeferrableTransformer |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 24847,
"end": 24966
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = FileStrategy
| LinaroHostname |
python | sympy__sympy | sympy/physics/quantum/state.py | {
"start": 18796,
"end": 18915
} | class ____(State):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
| OrthogonalState |
python | mwaskom__seaborn | seaborn/_marks/line.py | {
"start": 8301,
"end": 8845
} | class ____(Paths):
"""
A line mark drawn as an oriented segment for each datapoint.
Examples
--------
.. include:: ../docstrings/objects.Dash.rst
"""
width: MappableFloat = Mappable(.8, grouping=False)
def _setup_segments(self, data, orient):
ori = ["x", "y"].index(orient)
xys = data[["x", "y"]].to_numpy().astype(float)
segments = np.stack([xys, xys], axis=1)
segments[:, 0, ori] -= data["width"] / 2
segments[:, 1, ori] += data["width"] / 2
return segments
| Dash |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/keyfunc_dict.py | {
"start": 638,
"end": 1110
} | class ____(Base):
__tablename__ = "note"
id: Mapped[int] = mapped_column(primary_key=True)
item_id: Mapped[int] = mapped_column(ForeignKey("item.id"))
keyword: Mapped[str]
text: Mapped[Optional[str]]
def __init__(self, keyword: str, text: str):
self.keyword = keyword
self.text = text
item = Item()
item.notes["a"] = Note("a", "atext")
if typing.TYPE_CHECKING:
assert_type(list(item.notes.items()), list[tuple[str, Note]])
| Note |
python | huggingface__transformers | src/transformers/models/olmo3/modeling_olmo3.py | {
"start": 15353,
"end": 15894
} | class ____(PreTrainedModel):
config: Olmo3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Olmo3DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Olmo3DecoderLayer,
"attentions": Olmo3Attention,
}
@auto_docstring
| Olmo3PreTrainedModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1253207,
"end": 1253476
} | class ____(sgqlc.types.Type, Node, AuditEntry, OauthApplicationAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a org.oauth_app_access_requested event."""
__schema__ = github_schema
__field_names__ = ()
| OrgOauthAppAccessRequestedAuditEntry |
python | spack__spack | lib/spack/spack/vendor/attr/exceptions.py | {
"start": 1549,
"end": 1915
} | class ____(TypeError):
"""
A ``attr.ib()`` requiring a callable has been set with a value
that is not callable.
.. versionadded:: 19.2.0
"""
def __init__(self, msg, value):
super(TypeError, self).__init__(msg, value)
self.msg = msg
self.value = value
def __str__(self):
return str(self.msg)
| NotCallableError |
python | huggingface__transformers | src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py | {
"start": 8638,
"end": 8918
} | class ____(PreTrainedModel):
config: MobileNetV2Config
base_model_prefix = "mobilenet_v2"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = False
_no_split_modules = []
@auto_docstring
| MobileNetV2PreTrainedModel |
python | fastapi__sqlmodel | docs_src/tutorial/connect/select/tutorial002.py | {
"start": 254,
"end": 2174
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero, Team).join(Team)
results = session.exec(statement)
for hero, team in results:
print("Hero:", hero, "Team:", team)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | getsentry__sentry | src/sentry/dynamic_sampling/rules/helpers/latest_releases.py | {
"start": 1173,
"end": 1866
} | class ____:
"""
Class that represents a boosted release fetched from Redis.
"""
id: int
timestamp: float
environment: str | None
# We also store the cache key corresponding to this boosted release entry, in order to remove it efficiently.
cache_key: str
def extend(self, release: Release, project_id: int) -> "ExtendedBoostedRelease":
return ExtendedBoostedRelease(
id=self.id,
timestamp=self.timestamp,
environment=self.environment,
cache_key=self.cache_key,
version=release.version,
platform=_get_project_platform(project_id),
)
@dataclass(frozen=True)
| BoostedRelease |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 331592,
"end": 332240
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateDiscussionComment"""
__schema__ = github_schema
__field_names__ = ("comment_id", "body", "client_mutation_id")
comment_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="commentId")
"""The Node ID of the discussion comment to update."""
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The new contents of the comment body."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateDiscussionCommentInput |
python | spyder-ide__spyder | spyder/widgets/tests/test_collectioneditor.py | {
"start": 2155,
"end": 45933
} | class ____(QWidget):
def __init__(self):
QWidget.__init__(self)
self.proxy_model = None
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture
def nonsettable_objects_data():
"""Rturn Python objects with immutable attribs to test CollectionEditor."""
test_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
expected_objs = [pandas.Period("2018-03"), pandas.Categorical([1, 2, 42])]
keys_test = [["_typ", "day", "dayofyear", "hour"],
["_typ", "nbytes", "ndim"]]
return zip(test_objs, expected_objs, keys_test)
# =============================================================================
# Tests
# ============================================================================
def test_rename_variable(qtbot):
"""Test renaming of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model().index(1, 0))
editor.rename_item(new_name='b2')
assert editor.model().rowCount() == 5
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'c'
assert data(editor.model(), 2, 0) == 'd'
assert data(editor.model(), 3, 0) == 'e'
assert data(editor.model(), 4, 0) == 'b2'
# Reset variables and try renaming one again
new_variables = {'a': 1,
'b': 2,
'b2': 2,
'c': 3,
'd': '4',
'e': 5}
editor.set_data(new_variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model().index(1, 0))
editor.rename_item(new_name='b3')
assert editor.model().rowCount() == 6
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'b2'
assert data(editor.model(), 2, 0) == 'c'
assert data(editor.model(), 3, 0) == 'd'
assert data(editor.model(), 4, 0) == 'e'
assert data(editor.model(), 5, 0) == 'b3'
def test_remove_variable(qtbot):
"""Test removing of the correct variable."""
variables = {'a': 1,
'b': 2,
'c': 3,
'd': '4',
'e': 5}
editor = CollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model().index(1, 0))
editor.remove_item(force=True)
assert editor.model().rowCount() == 4
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'c'
assert data(editor.model(), 2, 0) == 'd'
assert data(editor.model(), 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model().index(1, 0))
editor.remove_item(force=True)
assert editor.model().rowCount() == 4
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'c'
assert data(editor.model(), 2, 0) == 'd'
assert data(editor.model(), 3, 0) == 'e'
def test_remove_remote_variable(qtbot, monkeypatch):
"""Test the removing of the correct remote variable."""
variables = {'a': {'type': 'int',
'size': 1,
'view': '1',
'python_type': 'int',
'numpy_type': 'Unknown'},
'b': {'type': 'int',
'size': 1,
'view': '2',
'python_type': 'int',
'numpy_type': 'Unknown'},
'c': {'type': 'int',
'size': 1,
'view': '3',
'python_type': 'int',
'numpy_type': 'Unknown'},
'd': {'type': 'str',
'size': 1,
'view': '4',
'python_type': 'int',
'numpy_type': 'Unknown'},
'e': {'type': 'int',
'size': 1,
'view': '5',
'python_type': 'int',
'numpy_type': 'Unknown'}}
editor = RemoteCollectionsEditorTableView(None, variables.copy())
qtbot.addWidget(editor)
editor.setCurrentIndex(editor.model().index(1, 0))
# Monkey patch remove variables
def remove_values(ins, names):
assert names == ['b']
data = {'a': {'type': 'int',
'size': 1,
'view': '1',
'python_type': 'int',
'numpy_type': 'Unknown'},
'c': {'type': 'int',
'size': 1,
'view': '3',
'python_type': 'int',
'numpy_type': 'Unknown'},
'd': {'type': 'str',
'size': 1,
'view': '4',
'python_type': 'int',
'numpy_type': 'Unknown'},
'e': {'type': 'int',
'size': 1,
'view': '5',
'python_type': 'int',
'numpy_type': 'Unknown'}}
editor.set_data(data)
monkeypatch.setattr(
'spyder.widgets'
'.collectionseditor.RemoteCollectionsEditorTableView.remove_values',
remove_values)
editor.remove_item(force=True)
assert editor.model().rowCount() == 4
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'c'
assert data(editor.model(), 2, 0) == 'd'
assert data(editor.model(), 3, 0) == 'e'
# Reset variables and try removing one again
editor.set_data(variables.copy())
editor.adjust_columns()
editor.setCurrentIndex(editor.model().index(1, 0))
editor.remove_item(force=True)
assert editor.model().rowCount() == 4
assert data(editor.model(), 0, 0) == 'a'
assert data(editor.model(), 1, 0) == 'c'
assert data(editor.model(), 2, 0) == 'd'
assert data(editor.model(), 3, 0) == 'e'
def test_filter_rows(qtbot):
"""Test rows filtering."""
data = (
{'dfa':
{'type': 'DataFrame',
'size': (2, 1),
'view': 'Column names: 0',
'python_type': 'DataFrame',
'numpy_type': 'Unknown'},
'dfb':
{'type': 'DataFrame',
'size': (2, 1),
'view': 'Column names: 0',
'python_type': 'DataFrame',
'numpy_type': 'Unknown'}}
)
editor = RemoteCollectionsEditorTableView(None, data)
qtbot.addWidget(editor)
# Initially two rows
assert editor.model().rowCount() == 2
# Match two rows by name
editor.do_find("df")
assert editor.model().rowCount() == 2
# Match two rows by type
editor.do_find("DataFrame")
assert editor.model().rowCount() == 2
# Only one match
editor.do_find("dfb")
assert editor.model().rowCount() == 1
# No match
editor.do_find("dfbc")
assert editor.model().rowCount() == 0
def test_remote_make_data_function():
"""
Test that the function returned by make_data_function() is the expected
one.
"""
variables = {'a': {'type': 'int',
'size': 1,
'view': '1',
'python_type': 'int',
'numpy_type': 'Unknown'}}
mock_shellwidget = Mock()
editor = RemoteCollectionsEditorTableView(
None, variables, mock_shellwidget
)
index = editor.model().index(0, 0)
data_function = editor.delegate.make_data_function(index)
value = data_function()
mock_shellwidget.get_value.assert_called_once_with('a')
assert value == mock_shellwidget.get_value.return_value
def test_create_dataframeeditor_with_correct_format(qtbot):
df = pandas.DataFrame(['foo', 'bar'])
editor = CollectionsEditorTableView(None, {'df': df})
qtbot.addWidget(editor)
CONF.set('variable_explorer', 'dataframe_format', '10d')
editor.delegate.createEditor(None, None, editor.model().index(0, 3))
dataframe_editor = next(iter(editor.delegate._editors.values()))['editor']
qtbot.addWidget(dataframe_editor)
dataframe_editor.dataModel._format_spec == '10d'
def test_collectionsmodel_with_two_ints():
coll = {'y': 2, 'x': 1}
cm = CollectionsModel(MockParent(), coll)
assert cm.rowCount() == 2
assert cm.columnCount() == 4
# dict is sorted by insertion order
assert data(cm, 0, 0) == 'y'
assert data(cm, 0, 1) == 'int'
assert data(cm, 0, 2) == 1
assert data(cm, 0, 3) == '2'
assert data(cm, 1, 0) == 'x'
assert data(cm, 1, 1) == 'int'
assert data(cm, 1, 2) == 1
assert data(cm, 1, 3) == '1'
def test_collectionsmodel_with_index():
# Regression test for spyder-ide/spyder#3380,
# modified for spyder-ide/spyder#3758.
for rng_name, rng in generate_pandas_indexes().items():
coll = {'rng': rng}
cm = CollectionsModel(MockParent(), coll)
assert data(cm, 0, 0) == 'rng'
assert data(cm, 0, 1) == rng_name
assert data(cm, 0, 2) == '(20,)' or data(cm, 0, 2) == '(20L,)'
try:
assert data(cm, 0, 3) == rng._summary()
except AttributeError:
assert data(cm, 0, 3) == rng.summary()
def test_shows_dataframeeditor_when_editing_index(monkeypatch):
for __, rng in generate_pandas_indexes().items():
MockDataFrameEditor = Mock()
mockDataFrameEditor_instance = MockDataFrameEditor()
attr_to_patch_dfedit = ('spyder.plugins.variableexplorer.widgets.' +
'dataframeeditor.DataFrameEditor')
monkeypatch.setattr(attr_to_patch_dfedit, MockDataFrameEditor)
coll = {'rng': rng}
editor = CollectionsEditorTableView(None, coll)
editor.delegate.createEditor(None, None,
editor.model().index(0, 3))
mockDataFrameEditor_instance.show.assert_called_once_with()
def test_shows_collectioneditor_when_editing_frozenset():
fs = frozenset('Spyder')
editor = CollectionsEditorTableView(None, {'fs': fs})
name_to_patch = 'spyder.widgets.collectionseditor.CollectionsEditor'
with patch(name_to_patch) as MockCollectionsEditor:
editor.delegate.createEditor(
None, None, editor.model().index(0, 3)
)
MockCollectionsEditor.return_value.show.assert_called_once_with()
def test_sort_numpy_numeric_collectionsmodel():
if parse(numpy.__version__) >= parse("2.0.0"):
np20 = True
else:
np20 = False
var_list = [
numpy.float64(1e16),
numpy.float64(10),
numpy.float64(1),
numpy.float64(0.1),
numpy.float64(1e-6),
numpy.float64(0),
numpy.float64(-1e-6),
numpy.float64(-1),
numpy.float64(-10),
numpy.float64(-1e16),
]
cm = CollectionsModel(MockParent(), var_list)
assert cm.rowCount() == 10
assert cm.columnCount() == 4
# Sort by index
cm.sort(0)
assert data_table(cm, 10, 4) == [
list(range(0, 10)),
["float64"] * 10,
[1] * 10,
[
"np.float64(1e+16)" if np20 else "1e+16",
"np.float64(10.0)" if np20 else "10.0",
"np.float64(1.0)" if np20 else "1.0",
"np.float64(0.1)" if np20 else "0.1",
"np.float64(1e-06)" if np20 else "1e-06",
"np.float64(0.0)" if np20 else "0.0",
"np.float64(-1e-06)" if np20 else "-1e-06",
"np.float64(-1.0)" if np20 else "-1.0",
"np.float64(-10.0)" if np20 else "-10.0",
"np.float64(-1e+16)" if np20 else "-1e+16",
],
]
# Sort by value
cm.sort(3)
assert data_table(cm, 10, 4) == [
list(range(9, -1, -1)),
["float64"] * 10,
[1] * 10,
[
"np.float64(-1e+16)" if np20 else "-1e+16",
"np.float64(-10.0)" if np20 else "-10.0",
"np.float64(-1.0)" if np20 else "-1.0",
"np.float64(-1e-06)" if np20 else "-1e-06",
"np.float64(0.0)" if np20 else "0.0",
"np.float64(1e-06)" if np20 else "1e-06",
"np.float64(0.1)" if np20 else "0.1",
"np.float64(1.0)" if np20 else "1.0",
"np.float64(10.0)" if np20 else "10.0",
"np.float64(1e+16)" if np20 else "1e+16",
],
]
def test_sort_float_collectionsmodel():
var_list = [
float(1e16), float(10), float(1), float(0.1), float(1e-6),
float(0), float(-1e-6), float(-1), float(-10), float(-1e16)
]
cm = CollectionsModel(MockParent(), var_list)
assert cm.rowCount() == 10
assert cm.columnCount() == 4
cm.sort(0) # sort by index
assert data_table(cm, 10, 4) == [list(range(0, 10)),
[u'float']*10,
[1]*10,
['1e+16', '10.0', '1.0', '0.1',
'1e-06', '0.0', '-1e-06',
'-1.0', '-10.0', '-1e+16']]
cm.sort(3) # sort by value
assert data_table(cm, 10, 4) == [list(range(9, -1, -1)),
[u'float']*10,
[1]*10,
['-1e+16', '-10.0', '-1.0',
'-1e-06', '0.0', '1e-06',
'0.1', '1.0', '10.0', '1e+16']]
def test_sort_collectionsmodel():
var_list1 = [0, 1, 2]
var_list2 = [3, 4, 5, 6]
var_dataframe1 = pandas.DataFrame([[1, 2, 3], [20, 30, 40], [2, 2, 2]])
var_dataframe2 = pandas.DataFrame([[1, 2, 3], [20, 30, 40]])
var_series1 = pandas.Series(var_list1)
var_series2 = pandas.Series(var_list2)
coll = [1, 3, 2]
cm = CollectionsModel(MockParent(), coll)
assert cm.rowCount() == 3
assert cm.columnCount() == 4
cm.sort(0) # sort by index
assert data_table(cm, 3, 4) == [[0, 1, 2],
['int', 'int', 'int'],
[1, 1, 1],
['1', '3', '2']]
cm.sort(3) # sort by value
assert data_table(cm, 3, 4) == [[0, 2, 1],
['int', 'int', 'int'],
[1, 1, 1],
['1', '2', '3']]
coll = [1, var_list1, var_list2, var_dataframe1, var_dataframe2,
var_series1, var_series2]
cm = CollectionsModel(MockParent(), coll)
assert cm.rowCount() == 7
assert cm.columnCount() == 4
cm.sort(1) # sort by type
assert data_table(cm, 7, 4) == [
[3, 4, 5, 6, 0, 1, 2],
['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],
['(3, 3)', '(2, 3)', '(3,)', '(4,)', 1, 3, 4],
['Column names: 0, 1, 2',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Series object of pandas.core.series module',
'1',
'[0, 1, 2]',
'[3, 4, 5, 6]']]
cm.sort(2) # sort by size
assert data_table(cm, 7, 4) == [
[3, 4, 5, 6, 0, 1, 2],
['DataFrame', 'DataFrame', 'Series', 'Series', 'int', 'list', 'list'],
['(2, 3)', '(3,)', '(3, 3)', '(4,)', 1, 3, 4],
['Column names: 0, 1, 2',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Series object of pandas.core.series module',
'1',
'[0, 1, 2]',
'[3, 4, 5, 6]']] or data_table(cm, 7, 4) == [
[0, 1, 2, 4, 5, 3, 6],
[u'int', u'list', u'list', u'DataFrame', u'Series', u'DataFrame',
u'Series'],
[1, 3, 4, u'(2, 3)', u'(3,)', u'(3, 3)', u'(4,)'],
['1',
'[0, 1, 2]',
'[3, 4, 5, 6]',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
'Column names: 0, 1, 2',
'Series object of pandas.core.series module',
]]
def test_sort_and_fetch_collectionsmodel_with_many_rows():
coll = list(range(2*LARGE_NROWS))
cm = CollectionsModel(MockParent(), coll)
assert cm.rowCount() == cm.rows_loaded == ROWS_TO_LOAD
assert cm.columnCount() == 4
cm.sort(1) # This was causing an issue (#5232)
cm.fetchMore()
assert cm.rowCount() == 2 * ROWS_TO_LOAD
for _ in range(3):
cm.fetchMore()
assert cm.rowCount() == len(coll)
def test_dict_in_tableview_sorting(qtbot):
"""
Test clicking on a column header in an editor showing a dict cycles
through sorting in ascending, descending and insertion order.
"""
my_dict = {2: 3, 3: 1, 1: 2}
editor = CollectionsEditorTableView(None, my_dict)
qtbot.addWidget(editor)
editor.show()
# Test that dict is displayed in insertion order
assert data_col(editor.model(), 0) == [2, 3, 1]
assert data_col(editor.model(), 3) == ['3', '1', '2']
# Click on header of first column
header = editor.horizontalHeader()
x_col0 = header.sectionPosition(0) + header.sectionSize(0) // 2
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col0, 1)
)
# Test that dict is sorted by key
assert data_col(editor.model(), 0) == [1, 2, 3]
assert data_col(editor.model(), 3) == ['2', '3', '1']
# Click on header of first column
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col0, 1)
)
# Test that dict is sorted by key in reverse order
assert data_col(editor.model(), 0) == [3, 2, 1]
assert data_col(editor.model(), 3) == ['1', '3', '2']
# Click on header of first column
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col0, 1)
)
# Test that dict is displayed in insertion order
assert data_col(editor.model(), 0) == [2, 3, 1]
assert data_col(editor.model(), 3) == ['3', '1', '2']
# Click on header of fourth column
x_col3 = header.sectionPosition(3) + header.sectionSize(3) // 2
with qtbot.waitSignal(header.sectionClicked, timeout=2000):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col3, 1)
)
# Test that dict is sorted by value
assert data_col(editor.model(), 0) == [3, 1, 2]
assert data_col(editor.model(), 3) == ['1', '2', '3']
# Click on header of fourth column
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col3, 1)
)
# Test that dict is sorted by value in reverse order
assert data_col(editor.model(), 0) == [2, 1, 3]
assert data_col(editor.model(), 3) == ['3', '2', '1']
# Click on header of first column
header = editor.horizontalHeader()
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(
header.viewport(), Qt.LeftButton, pos=QPoint(x_col0, 1)
)
# Test that dict is sorted by key
assert data_col(editor.model(), 0) == [1, 2, 3]
assert data_col(editor.model(), 3) == ['2', '3', '1']
def test_rename_and_duplicate_item_in_collection_editor():
collections = {'list': ([1, 2, 3], False, True),
'tuple': ((1, 2, 3), False, False),
'dict': ({'a': 1, 'b': 2}, True, True)}
for coll, rename_enabled, duplicate_enabled in collections.values():
coll_copy = copy.copy(coll)
editor = CollectionsEditorTableView(None, coll)
assert editor.rename_action.isEnabled()
assert editor.duplicate_action.isEnabled()
editor.setCurrentIndex(editor.model().index(0, 0))
editor.refresh_menu()
assert editor.rename_action.isEnabled() == rename_enabled
assert editor.duplicate_action.isEnabled() == duplicate_enabled
if isinstance(coll, list):
editor.duplicate_item()
assert editor.source_model.get_data() == coll_copy + [coll_copy[0]]
def test_collectioneditorwidget_refresh_action_disabled():
"""
Test that the Refresh button is disabled by default.
"""
lst = [1, 2, 3, 4]
widget = CollectionsEditorWidget(None, lst.copy())
assert not widget.refresh_action.isEnabled()
def test_collectioneditor_refresh():
"""
Test that after pressing the refresh button, the value of the editor is
replaced by the return value of the data_function.
"""
old_list = [1, 2, 3, 4]
new_list = [3, 1, 4, 1, 5]
editor = CollectionsEditor(None, data_function=lambda: new_list)
editor.setup(old_list)
assert editor.get_value() == old_list
assert editor.widget.refresh_action.isEnabled()
editor.widget.refresh_action.trigger()
assert editor.get_value() == new_list
@pytest.mark.parametrize('result', [QMessageBox.Yes, QMessageBox.No])
def test_collectioneditor_refresh_after_edit(result):
"""
Test that after changing a value in the collections editor, refreshing the
editor opens a dialog box (which asks for confirmation), and that the
editor is only refreshed if the user clicks Yes.
"""
old_list = [1, 2, 3, 4]
edited_list = [1, 2, 3, 5]
new_list = [3, 1, 4, 1, 5]
editor = CollectionsEditor(None, data_function=lambda: new_list)
editor.setup(old_list)
editor.show()
model = editor.widget.editor.source_model
model.setData(model.index(3, 3), '5')
with patch('spyder.widgets.collectionseditor.QMessageBox.question',
return_value=result) as mock_question:
editor.widget.refresh_action.trigger()
mock_question.assert_called_once()
editor.accept()
if result == QMessageBox.Yes:
assert editor.get_value() == new_list
else:
assert editor.get_value() == edited_list
def test_collectioneditor_refresh_when_variable_deleted(qtbot):
"""
Test that if the variable is deleted and then the editor is refreshed
(resulting in data_function raising a KeyError), a critical dialog box
is displayed and that the editor is closed.
"""
def datafunc():
raise KeyError
lst = [1, 2, 3, 4]
editor = CollectionsEditor(None, data_function=datafunc)
editor.setup(lst)
with patch('spyder.widgets.collectionseditor.QMessageBox'
'.critical') as mock_critical, \
qtbot.waitSignal(editor.rejected, timeout=0):
editor.widget.refresh_action.trigger()
mock_critical.assert_called_once()
def test_collectioneditor_refresh_nested():
"""
Open an editor for a list with a tuple nested inside, and then open another
editor for the nested tuple. Test that refreshing the second editor works.
"""
old_list = [1, 2, 3, (4, 5)]
new_list = [1, 2, 3, (4,)]
editor = CollectionsEditor(None, data_function=lambda: new_list)
editor.setup(old_list)
view = editor.widget.editor
view.edit(view.model().index(3, 3))
nested_editor = list(view.delegate._editors.values())[0]['editor']
assert nested_editor.get_value() == (4, 5)
nested_editor.widget.refresh_action.trigger()
assert nested_editor.get_value() == (4,)
def test_edit_datetime(monkeypatch):
"""
Test datetimes are editable and NaT values are correctly handled.
Regression test for spyder-ide/spyder#13557 and spyder-ide/spyder#8329
"""
variables = [pandas.NaT, datetime.date.today()]
editor_list = CollectionsEditorTableView(None, variables)
# Test that the NaT value cannot be edited on the variable explorer
editor_list_value = editor_list.delegate.createEditor(
None, None, editor_list.model().index(0, 3))
assert editor_list_value is None
# Test that a date can be edited on the variable explorer
editor_list_value = editor_list.delegate.createEditor(
None, None, editor_list.model().index(1, 3))
assert isinstance(editor_list_value, QDateEdit)
def test_edit_mutable_and_immutable_types(monkeypatch):
"""
Test that mutable objs/vals are editable in VarExp; immutable ones aren't.
Regression test for spyder-ide/spyder#5991.
"""
MockQLineEdit = Mock()
attr_to_patch_qlineedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.QLineEdit')
monkeypatch.setattr(attr_to_patch_qlineedit, MockQLineEdit)
MockTextEditor = Mock()
attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.TextEditor')
monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)
MockQDateTimeEdit = Mock()
attr_to_patch_qdatetimeedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.QDateTimeEdit')
monkeypatch.setattr(attr_to_patch_qdatetimeedit, MockQDateTimeEdit)
MockCollectionsEditor = Mock()
mockCollectionsEditor_instance = MockCollectionsEditor()
attr_to_patch_coledit = ('spyder.widgets.' +
'collectionseditor.CollectionsEditor')
monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)
list_test = [1, "012345678901234567901234567890123456789012",
datetime.datetime(2017, 12, 24, 7, 9), [1, 2, 3], (2, "eggs")]
tup_test = tuple(list_test)
# Tests for mutable type (list) #
editor_list = CollectionsEditorTableView(None, list_test)
# Directly editable values inside list
editor_list_value = editor_list.delegate.createEditor(
None, None, editor_list.model().index(0, 3))
assert editor_list_value is not None
assert MockQLineEdit.call_count == 1
# Text Editor for long text inside list
editor_list.delegate.createEditor(None, None,
editor_list.model().index(1, 3))
assert MockTextEditor.call_count == 2
assert not MockTextEditor.call_args[1]["readonly"]
# Datetime inside list
editor_list_datetime = editor_list.delegate.createEditor(
None, None, editor_list.model().index(2, 3))
assert editor_list_datetime is not None
assert MockQDateTimeEdit.call_count == 1
# List inside list
editor_list.delegate.createEditor(None, None,
editor_list.model().index(3, 3))
assert mockCollectionsEditor_instance.show.call_count == 1
assert not mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tuple inside list
editor_list.delegate.createEditor(None, None,
editor_list.model().index(4, 3))
assert mockCollectionsEditor_instance.show.call_count == 2
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tests for immutable type (tuple) #
editor_tup = CollectionsEditorTableView(None, tup_test)
# Directly editable values inside tuple
editor_tup_value = editor_tup.delegate.createEditor(
None, None, editor_tup.model().index(0, 3))
assert editor_tup_value is None
assert MockQLineEdit.call_count == 1
# Text Editor for long text inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model().index(1, 3))
assert MockTextEditor.call_count == 4
assert MockTextEditor.call_args[1]["readonly"]
# Datetime inside tuple
editor_tup_datetime = editor_tup.delegate.createEditor(
None, None, editor_tup.model().index(2, 3))
assert editor_tup_datetime is None
assert MockQDateTimeEdit.call_count == 1
# List inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model().index(3, 3))
assert mockCollectionsEditor_instance.show.call_count == 3
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
# Tuple inside tuple
editor_tup.delegate.createEditor(None, None,
editor_tup.model().index(4, 3))
assert mockCollectionsEditor_instance.show.call_count == 4
assert mockCollectionsEditor_instance.setup.call_args[1]["readonly"]
@pytest.mark.parametrize(
'exponent, error_expected',
[(32_766, False), (32_767, True)]
)
def test_edit_large_int(monkeypatch, exponent, error_expected):
"""
Test editing large int values either works or displays an error.
Regression test for spyder-ide/spyder#21751.
"""
num = 10 ** exponent + 1
editor = CollectionsEditorTableView(None, [num])
index = editor.model().index(0, 3)
with patch(
'spyder.plugins.variableexplorer.widgets'
'.collectionsdelegate.QLineEdit'
) as MockQLineEdit:
with patch(
'spyder.plugins.variableexplorer.widgets'
'.collectionsdelegate.QMessageBox'
) as MockQMessageBox:
editor.delegate.createEditor(None, None, index)
if error_expected:
MockQLineEdit.assert_not_called()
MockQMessageBox.assert_called_once()
else:
MockQLineEdit.assert_called_once()
MockQMessageBox.assert_not_called()
line_edit_instance = Mock(spec=QLineEdit)
editor.delegate.setEditorData(line_edit_instance, index)
expected = '1' + (exponent - 1) * '0' + '1'
line_edit_instance.setText.assert_called_once_with(expected)
@flaky(max_runs=3)
def test_view_module_in_coledit():
"""
Test that modules don't produce an error when opening in Variable Explorer.
Also check that they are set as readonly. Regression test for
spyder-ide/spyder#6080.
"""
editor = CollectionsEditor()
editor.setup(os, "module_test", readonly=False)
assert editor.widget.editor.readonly
def test_notimplementederror_multiindex():
"""
Test that the NotImplementedError when scrolling a MultiIndex is handled.
Regression test for spyder-ide/spyder#6284.
"""
time_deltas = [pandas.Timedelta(minutes=minute)
for minute in range(5, 35, 5)]
time_delta_multiindex = pandas.MultiIndex.from_product([[0, 1, 2, 3, 4],
time_deltas])
col_model = CollectionsModel(MockParent(), time_delta_multiindex)
assert col_model.rowCount() == col_model.rows_loaded == ROWS_TO_LOAD
assert col_model.columnCount() == 4
col_model.fetchMore()
assert col_model.rowCount() == 2 * ROWS_TO_LOAD
for _ in range(3):
col_model.fetchMore()
assert col_model.rowCount() == 5 * ROWS_TO_LOAD
def test_editor_parent_set(monkeypatch):
"""
Test that editors have their parent set so they close with Spyder.
Regression test for spyder-ide/spyder#5696.
"""
# Mocking and setup
test_parent = QWidget()
MockCollectionsEditor = Mock()
attr_to_patch_coledit = ('spyder.widgets.' +
'collectionseditor.CollectionsEditor')
monkeypatch.setattr(attr_to_patch_coledit, MockCollectionsEditor)
MockArrayEditor = Mock()
attr_to_patch_arredit = ('spyder.plugins.variableexplorer.widgets.' +
'arrayeditor.ArrayEditor')
monkeypatch.setattr(attr_to_patch_arredit, MockArrayEditor)
MockDataFrameEditor = Mock()
attr_to_patch_dfedit = ('spyder.plugins.variableexplorer.widgets.' +
'dataframeeditor.DataFrameEditor')
monkeypatch.setattr(attr_to_patch_dfedit, MockDataFrameEditor)
MockTextEditor = Mock()
attr_to_patch_textedit = ('spyder.plugins.variableexplorer.widgets.' +
'collectionsdelegate.TextEditor')
monkeypatch.setattr(attr_to_patch_textedit, MockTextEditor)
MockObjectExplorer = Mock()
attr_to_patch_objectexplorer = ('spyder.plugins.variableexplorer.widgets.'
+ 'objectexplorer.ObjectExplorer')
monkeypatch.setattr(attr_to_patch_objectexplorer, MockObjectExplorer)
editor_data = [[0, 1, 2, 3, 4],
numpy.array([1.0, 42.0, 1337.0]),
pandas.DataFrame([[1, 2, 3], [20, 30, 40]]),
os,
"012345678901234567890123456789012345678901234567890123456"]
col_editor = CollectionsEditorTableView(test_parent, editor_data)
assert col_editor.parent() is test_parent
for idx, mock_class in enumerate([MockCollectionsEditor,
MockArrayEditor,
MockDataFrameEditor,
MockObjectExplorer,
MockTextEditor]):
col_editor.delegate.createEditor(col_editor.parent(), None,
col_editor.model().index(idx, 3))
assert mock_class.call_count == 1 + (idx // 4)
assert mock_class.call_args[1]["parent"] is test_parent
def test_xml_dom_element_view():
"""
Test that XML DOM ``Element``s are able to be viewied in CollectionsEditor.
Regression test for spyder-ide/spyder#5642.
"""
xml_path = path.join(LOCATION, 'dom_element_test.xml')
with open(xml_path) as xml_file:
xml_data = xml_file.read()
xml_content = parseString(xml_data)
xml_element = xml_content.getElementsByTagName("note")[0]
col_editor = CollectionsEditor(None)
col_editor.setup(xml_element)
col_editor.show()
assert col_editor.get_value()
col_editor.accept()
def test_pandas_dateoffset_view():
"""
Test that pandas ``DateOffset`` objs can be viewied in CollectionsEditor.
Regression test for spyder-ide/spyder#6729.
"""
test_dateoffset = pandas.DateOffset()
col_editor = CollectionsEditor(None)
col_editor.setup(test_dateoffset)
col_editor.show()
assert col_editor.get_value()
col_editor.accept()
def test_set_nonsettable_objects(nonsettable_objects_data):
"""
Test that errors trying to set attributes in ColEdit are handled properly.
Unit regression test for issues spyder-ide/spyder#6727 and
spyder-ide/spyder#6728.
"""
for test_obj, expected_obj, keys in nonsettable_objects_data:
col_model = CollectionsModel(None, test_obj)
col_model.load_all()
indicies = [col_model.get_index_from_key(key) for key in keys]
for idx in indicies:
assert not col_model.set_value(idx, "2")
# Due to numpy's deliberate breakage of __eq__ comparison
assert all([key == "_typ" or
(getattr(col_model.get_data().__obj__, key)
== getattr(expected_obj, key)) for key in keys])
@flaky(max_runs=3)
@pytest.mark.no_xvfb
def test_edit_nonsettable_objects(qtbot, nonsettable_objects_data):
"""
Test that errors trying to edit attributes in ColEdit are handled properly.
Integration regression test for issues spyder-ide/spyder#6727 and
spyder-ide/spyder#6728.
"""
for test_obj, expected_obj, keys in nonsettable_objects_data:
col_editor = CollectionsEditor(None)
col_editor.setup(test_obj)
with qtbot.waitExposed(col_editor):
col_editor.show()
view = col_editor.widget.editor
indicies = [view.source_model.get_index_from_key(key) for key in keys]
for _ in range(3):
qtbot.keyClick(view, Qt.Key_Right)
last_row = -1
rows_to_test = [index.row() for index in indicies]
for row in rows_to_test:
for _ in range(row - last_row - 1):
qtbot.keyClick(view, Qt.Key_Down)
qtbot.keyClick(view, Qt.Key_Space)
qtbot.keyClick(view.focusWidget(), Qt.Key_Backspace)
qtbot.keyClicks(view.focusWidget(), "2")
qtbot.keyClick(view.focusWidget(), Qt.Key_Down)
last_row = row
qtbot.wait(100)
# Due to numpy's deliberate breakage of __eq__ comparison
assert all([key == "_typ" or (getattr(col_editor.get_value(), key)
== getattr(expected_obj, key)) for key in keys])
col_editor.accept()
qtbot.wait(200)
# Same reason as above
assert all([key == "_typ" or (getattr(col_editor.get_value(), key)
== getattr(expected_obj, key)) for key in keys])
if getattr(test_obj, "_typ", None) is None:
keys.remove("_typ")
assert all([getattr(test_obj, key)
== getattr(expected_obj, key) for key in keys])
def test_collectionseditor_with_class_having_buggy_copy(qtbot):
"""
Test that editor for object whose .copy() returns a different type is
readonly; cf. spyder-ide/spyder#6936.
"""
class MyDictWithBuggyCopy(dict):
pass
md = MyDictWithBuggyCopy({1: 2})
editor = CollectionsEditor()
editor.setup(md)
assert editor.widget.editor.readonly
def test_collectionseditor_with_class_having_correct_copy(qtbot):
"""
Test that editor for object whose .copy() returns the same type is not
readonly; cf. spyder-ide/spyder#6936.
"""
class MyDictWithCorrectCopy(dict):
def copy(self):
return MyDictWithCorrectCopy(self)
md = MyDictWithCorrectCopy({1: 2})
editor = CollectionsEditor()
editor.setup(md)
assert not editor.widget.editor.readonly
def test_collectionseditor_when_clicking_on_header_and_large_rows(qtbot):
"""
Test that sorting works when clicking in its header and there's a
large number of rows.
"""
li = [1] * 10000
editor = CollectionsEditor()
editor.setup(li)
editor.show()
# Perform the sorting. It should be done quite quickly because
# there's a very small number of rows in display.
view = editor.widget.editor
header = view.horizontalHeader()
with qtbot.waitSignal(header.sectionClicked, timeout=200):
qtbot.mouseClick(header.viewport(), Qt.LeftButton, pos=QPoint(1, 1))
# Assert data was sorted correctly.
assert data(view.model(), 0, 0) == 9999
editor.accept()
def test_dicts_with_mixed_types_as_key(qtbot):
"""
Test that we can show dictionaries with mixed data types as keys.
This is a regression for spyder-ide/spyder#13481.
"""
colors = {1: 'red', 'Y': 'yellow'}
editor = CollectionsEditor()
editor.setup(colors)
assert editor.widget.editor.source_model.keys == [1, 'Y']
def test_dicts_natural_sorting(qtbot):
"""
Test that natural sorting actually does what it should do
"""
import random
numbers = list(range(100))
random.shuffle(numbers)
dictionary = {'test{}'.format(i): None for i in numbers}
data_sorted = sorted(list(dictionary.keys()), key=natsort)
# numbers should be as a human would sort, e.g. test3 before test100
# regular sort would sort test1, test10, test11,..., test2, test20,...
expected = ['test{}'.format(i) for i in list(range(100))]
editor = CollectionsEditor()
editor.setup(dictionary)
editor.widget.editor.source_model.sort(0)
assert data_sorted == expected, 'Function failed'
assert editor.widget.editor.source_model.keys == expected, \
'GUI sorting fail'
def test_dicts_natural_sorting_mixed_types():
"""
Test that natural sorting actually does what it should do.
testing for issue 13733, as mixed types were sorted incorrectly.
Sorting for other columns will be tested as well.
"""
import pandas as pd
dictionary = {'DSeries': pd.Series(dtype=int), 'aStr': 'algName',
'kDict': {2: 'asd', 3: 2}}
# put this here variable, as it might change later to reflect string length
str_size = get_size(dictionary['aStr'])
editor = CollectionsEditor()
editor.setup(dictionary)
cm = editor.widget.editor.source_model
keys = cm.keys
types = cm.types
sizes = cm.sizes
# Initially sorted by insertion order
assert keys == ['DSeries', 'aStr', 'kDict']
assert types == ['Series', 'str', 'dict']
assert sizes == [(0,), str_size, 2]
assert data_table(cm, 3, 3) == [
["DSeries", "aStr", "kDict"],
["Series", "str", "dict"],
["(0,)", str_size, 2],
]
# insert an item and check that it is still sorted correctly
editor.widget.editor.new_value('List', [1, 2, 3])
assert data_table(cm, 4, 3) == [
["DSeries", "aStr", "kDict", "List"],
["Series", "str", "dict", "list"],
["(0,)", str_size, 2, 3],
]
# now sort by key
cm.sort(0)
assert data_table(cm, 4, 3) == [['aStr', 'DSeries', 'kDict', 'List'],
['str', 'Series', 'dict', 'list'],
[str_size, '(0,)', 2, 3]]
# now sort for types
cm.sort(1)
assert data_table(cm, 4, 3) == [['DSeries', 'kDict', 'List', 'aStr'],
['Series', 'dict', 'list', 'str'],
['(0,)', 2, 3, str_size]]
# now sort for sizes
cm.sort(2)
assert data_table(cm, 4, 3) == [['DSeries', 'kDict', 'List', 'aStr'],
['Series', 'dict', 'list', 'str'],
['(0,)', 2, 3, str_size]]
def test_collectioneditor_plot(qtbot):
"""
Test that plotting a list from the collection editor calls the .plot()
function in the associated namespace browser and that the executing
`plot_function` plots the list.
"""
my_list = [4, 2]
mock_namespacebrowser = Mock()
cew = CollectionsEditorWidget(
None, {'list': my_list}, namespacebrowser=mock_namespacebrowser)
qtbot.addWidget(cew)
cew.editor.plot('list', 'plot')
mock_namespacebrowser.plot.assert_called_once()
plot_function = mock_namespacebrowser.plot.call_args.args[0]
mock_figure = Mock()
plot_function(mock_figure)
mock_figure.subplots.return_value.plot.assert_called_once_with(my_list)
def test_collectionseditor_select_row_button(qtbot):
"""Test that the button to select rows is working as expected."""
data = {"a": 10, "b": "This is a string"}
editor = CollectionsEditor()
editor.setup(data)
editor.show()
# This is necessary so that Qt paints
qtbot.wait(300)
# Coordinates to position the cursor on top of the select row button for
# the first row
table_view = editor.widget.editor
x = (
# Left x ccordinate for the first row
+ table_view.columnViewportPosition(0)
+ table_view.width()
- SELECT_ROW_BUTTON_SIZE // 2
)
y = (
# Top y ccordinate for the first row
+ table_view.rowViewportPosition(0)
+ table_view.rowHeight(0) // 2
)
# Move cursor
qtbot.mouseMove(table_view.viewport(), QPoint(x, y), delay=100)
# Click on that posiiton and check the first row was selected.
# Note: We can't use LeftButton here because it edits the row. However, it
# works as exoected in regular usage.
qtbot.mouseClick(table_view.viewport(), Qt.MiddleButton, pos=QPoint(x, y))
assert table_view.selected_rows() == {0}
# Click again and check the row was deselected
qtbot.mouseClick(table_view.viewport(), Qt.MiddleButton, pos=QPoint(x, y))
assert table_view.selected_rows() == set()
if __name__ == "__main__":
pytest.main()
| MockParent |
python | pytorch__pytorch | test/distributed/checkpoint/e2e/test_fsdp_ep.py | {
"start": 1719,
"end": 4128
} | class ____(DTensorTestBase, VerifyStateDictMixin):
@property
def world_size(self) -> int:
return min(8, torch.accelerator.device_count())
@with_comms
@skip_if_lt_x_gpu(8)
@with_temp_dir
def test_e2e(self):
model = TopModel(self.rank).to(self.device_type)
mesh_fsdp_tp = init_device_mesh(
self.device_type, (2, 4), mesh_dim_names=("dp", "tp")
)
# TODO: we are using an internal API atm. Change to a public API once it is ready.
mesh_fsdp_ep = mesh_fsdp_tp["dp"]
mesh_fsdp_ep._root_mesh = None
mesh_fsdp = init_device_mesh(self.device_type, (8,))
for i, l in enumerate(model.second.ep_layers):
model.second.ep_layers[i] = FSDP(
l, use_orig_params=True, device_mesh=mesh_fsdp_ep
)
model.second = FSDP(model.second, use_orig_params=True, device_mesh=mesh_fsdp)
model = FSDP(model, use_orig_params=True, device_mesh=mesh_fsdp)
optim = torch.optim.Adam(model.parameters(), lr=0.1)
msd, osd = get_state_dict(model, optim)
# FSDP only params
for key in (
"net.0.weight",
"net.0.bias",
"second.net.0.weight",
"second.net.0.bias",
):
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), tuple(range(8)))
# FSDP/EP params
layer = self.rank % 4
ranks = (layer, layer + 4)
for i in range(4):
for key in (
f"second.ep_layers.{i}.net1.0.weight",
f"second.ep_layers.{i}.net1.0.bias",
f"second.ep_layers.{i}.net2.0.weight",
f"second.ep_layers.{i}.net2.0.bias",
):
if layer != i:
self.assertTrue(key not in msd)
else:
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), ranks)
self.assertEqual(set(osd["state"].keys()), set(msd.keys()))
if __name__ == "__main__":
run_tests()
| TestFSDPWithEP |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 973296,
"end": 973690
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("SponsorableItem", graphql_name="node")
"""The item at the end of the edge."""
| SponsorableItemEdge |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_remote_logging.py | {
"start": 8843,
"end": 14354
} | class ____:
"""Tests opensearch configuration behaviors."""
def test_should_not_generate_secret_document_if_opensearch_disabled(self):
docs = render_chart(
values={"opensearch": {"enabled": False}},
show_only=[OS_SECRET_TEMPLATE],
)
assert len(docs) == 0
def test_should_raise_error_when_connection_not_provided(self):
with pytest.raises(CalledProcessError) as ex_ctx:
render_chart(
values={
"opensearch": {
"enabled": True,
}
},
show_only=[OS_SECRET_TEMPLATE],
)
assert (
"You must set one of the values opensearch.secretName or opensearch.connection "
"when using OpenSearch" in ex_ctx.value.stderr.decode()
)
def test_should_raise_error_when_conflicting_options(self):
with pytest.raises(CalledProcessError) as ex_ctx:
render_chart(
values={
"opensearch": {
"enabled": True,
"secretName": "my-test",
"connection": {
"user": "username!@#$%%^&*()",
"pass": "password!@#$%%^&*()",
"host": "opensearchhostname",
},
},
},
show_only=[OS_SECRET_TEMPLATE],
)
assert (
"You must not set both values opensearch.secretName and opensearch.connection"
in ex_ctx.value.stderr.decode()
)
def test_scheduler_should_add_log_port_when_local_executor_and_opensearch_disabled(self):
docs = render_chart(
values={"executor": "LocalExecutor"},
show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE],
)
assert jmespath.search("spec.template.spec.containers[0].ports", docs[0]) == [
{"name": "worker-logs", "containerPort": 8793}
]
def test_scheduler_should_omit_log_port_when_opensearch_enabled(self):
docs = render_chart(
values={
"executor": "LocalExecutor",
"opensearch": {
"enabled": True,
"secretName": "test-elastic-secret",
},
},
show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE],
)
assert "ports" not in jmespath.search("spec.template.spec.containers[0]", docs[0])
def test_env_should_omit_opensearch_host_var_if_os_disabled(self):
docs = render_chart(
values={},
show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE],
)
scheduler_env_keys = jmespath.search("spec.template.spec.containers[0].env[*].name", docs[0])
assert "AIRFLOW__OPENSEARCH__HOST" not in scheduler_env_keys
def test_env_should_add_opensearch_host_var_if_os_enabled(self):
docs = render_chart(
values={
"opensearch": {
"enabled": True,
"secretName": "test-opensearch-secret",
},
},
show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE],
)
scheduler_env = jmespath.search("spec.template.spec.containers[0].env", docs[0])
assert {
"name": "AIRFLOW__OPENSEARCH__HOST",
"valueFrom": {"secretKeyRef": {"name": "test-opensearch-secret", "key": "connection"}},
} in scheduler_env
def test_airflow_cfg_should_set_remote_logging_false_if_os_disabled(self):
docs = render_chart(
values={},
show_only=[CONFIGMAP_TEMPLATE],
)
airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0])
core_lines = CORE_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines()
assert "remote_logging = False" in core_lines
logging_lines = LOGGING_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines()
assert "remote_logging = False" in logging_lines
def test_airflow_cfg_should_set_remote_logging_true_if_os_enabled(self):
docs = render_chart(
values={
"opensearch": {
"enabled": True,
"secretName": "test-elastic-secret",
},
},
show_only=[CONFIGMAP_TEMPLATE],
)
airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0])
core_lines = CORE_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines()
assert "remote_logging = True" in core_lines
logging_lines = LOGGING_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines()
assert "remote_logging = True" in logging_lines
def test_should_raise_error_when_both_elasticsearch_and_opensearch_enabled():
with pytest.raises(CalledProcessError) as ex_ctx:
render_chart(
values={
"elasticsearch": {
"enabled": True,
"secretName": "test-elastic-secret",
},
"opensearch": {
"enabled": True,
"secretName": "test-elastic-secret",
},
},
show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE],
)
assert (
"You must not set both values elasticsearch.enabled and opensearch.enabled"
in ex_ctx.value.stderr.decode()
)
| TestOpenSearchConfig |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 15957,
"end": 16252
} | class ____:
"""
Indicates a state or state action (e.g. #pop) to apply.
For example default('#pop') is equivalent to ('', Token, '#pop')
Note that state tuples may be used as well.
.. versionadded:: 2.0
"""
def __init__(self, state):
self.state = state
| default |
python | scikit-learn__scikit-learn | sklearn/linear_model/tests/test_sgd.py | {
"start": 2173,
"end": 72779
} | class ____(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args, **kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array(
[
[-1, 1],
[-0.75, 0.5],
[-1.5, 1.5],
[1, 1],
[0.75, 0.5],
[1.5, 1.5],
[-1, -1],
[0, -0.5],
[1, -1],
]
)
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array(
[
[1, 0.9, 0.8, 0, 0, 0],
[1, 0.84, 0.98, 0, 0, 0],
[1, 0.96, 0.88, 0, 0, 0],
[1, 0.91, 0.99, 0, 0, 0],
[0, 0, 0, 0.89, 0.91, 1],
[0, 0, 0, 0.79, 0.84, 1],
[0, 0, 0, 0.91, 0.95, 1],
[0, 0, 0, 0.93, 1, 1],
]
)
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(
alpha=0.01, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr
)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize("lr", ["pa1", "pa2"])
@pytest.mark.parametrize(
["est", "loss"], [(SGDClassifier, "squared_hinge"), (SGDRegressor, "squared_error")]
)
def test_learning_rate_PA_raises(lr, est, loss):
"""Test that SGD raises with forbidden loss for passive-aggressive algo."""
est = est(loss=loss, learning_rate=lr)
with pytest.raises(ValueError):
est.fit(X, Y)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty="l1")
clf = clone(clf)
clf.set_params(penalty="l2")
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty="l2")
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=0.01)
clf.fit(X, Y)
assert hasattr(clf, "_average_coef")
assert hasattr(clf, "_average_intercept")
assert hasattr(clf, "_standard_intercept")
assert hasattr(clf, "_standard_coef")
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, "_average_coef")
assert not hasattr(clf, "_average_intercept")
assert not hasattr(clf, "_standard_intercept")
assert not hasattr(clf, "_standard_coef")
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_late_onset_averaging_reached(klass):
eta0 = 0.001
alpha = 0.0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(
average=7,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=2,
shuffle=False,
)
clf2 = klass(
average=False,
learning_rate="constant",
loss="squared_error",
eta0=eta0,
alpha=alpha,
max_iter=1,
shuffle=False,
)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = asgd(
klass,
X,
Y_encode,
eta0,
alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_,
)
assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3, max_iter=max_iter).fit(
X, Y
)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3, max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3, max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(
early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(
early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate="constant",
eta0=0.01,
tol=None,
max_iter=max_iter,
shuffle=shuffle,
)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction, random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction, random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [
klass(
early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4,
max_iter=1000,
)
.fit(X, Y)
.n_iter_
for n_iter_no_change in [2, 3, 10]
]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
@pytest.mark.parametrize("Estimator", [SGDClassifier, SGDRegressor])
@pytest.mark.parametrize("l1_ratio", [0, 0.7, 1])
def test_sgd_l1_ratio_not_used(Estimator, l1_ratio):
"""Check that l1_ratio is not used when penalty is not 'elasticnet'"""
clf1 = Estimator(penalty="l1", l1_ratio=None, random_state=0).fit(X, Y)
clf2 = Estimator(penalty="l1", l1_ratio=l1_ratio, random_state=0).fit(X, Y)
assert_allclose(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize(
"Estimator", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_failing_penalty_validation(Estimator):
clf = Estimator(penalty="elasticnet", l1_ratio=None)
with pytest.raises(
ValueError, match="l1_ratio must be set when penalty is 'elasticnet'"
):
clf.fit(X, Y)
# TODO(1.10): remove this test
@pytest.mark.parametrize(
"klass",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_power_t_limits(klass):
"""Check that a warning is raised when `power_t` is negative."""
# Check that negative values of `power_t` raise a warning
clf = klass(power_t=-1.0)
with pytest.warns(
FutureWarning, match="Negative values for `power_t` are deprecated"
):
clf.fit(X, Y)
# Check that values of 'power_t in range [0, inf) do not raise a warning
with warnings.catch_warnings(record=True) as w:
clf = klass(power_t=0.5)
clf.fit(X, Y)
assert len(w) == 0
###############################################################################
# Classification Test Case
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log_loss", "modified_huber"):
clf = klass(
penalty="l2",
alpha=0.01,
fit_intercept=True,
loss=loss,
max_iter=10,
shuffle=True,
)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": np.zeros((3,))}),
(SparseSGDClassifier, {"intercept_init": np.zeros((3,))}),
(SGDOneClassSVM, {"offset_init": np.zeros((3,))}),
(SparseSGDOneClassSVM, {"offset_init": np.zeros((3,))}),
],
)
def test_set_intercept_offset(klass, fit_params):
"""Check that `intercept_init` or `offset_init` is validated."""
sgd_estimator = klass()
with pytest.raises(ValueError, match="does not match dataset"):
sgd_estimator.fit(X, Y, **fit_params)
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDRegressor, SparseSGDRegressor]
)
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize(
"klass, fit_params",
[
(SGDClassifier, {"intercept_init": 0}),
(SparseSGDClassifier, {"intercept_init": 0}),
(SGDOneClassSVM, {"offset_init": 0}),
(SparseSGDOneClassSVM, {"offset_init": 0}),
],
)
def test_set_intercept_offset_binary(klass, fit_params):
"""Check that we can pass a scaler with binary classification to
`intercept_init` or `offset_init`."""
klass().fit(X5, Y5, **fit_params)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = 0.1
alpha = 2.0
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_, average_weights, decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (
r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can use a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\."
)
with pytest.raises(ValueError, match=regex):
klass(class_weight="balanced").partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = 0.001
alpha = 0.01
# Multi-class average test case
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
if loss in ("log_loss", "modified_huber"):
assert hasattr(clf, "predict_proba")
assert hasattr(clf, "predict_log_proba")
else:
inner_msg = "probability estimates are not available for loss={!r}".format(
loss
)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
with pytest.raises(
AttributeError, match="has no attribute 'predict_proba'"
) as exec_info:
clf.predict_proba
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
with pytest.raises(
AttributeError, match="has no attribute 'predict_log_proba'"
) as exec_info:
clf.predict_log_proba
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log_loss", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
# If predict_proba is 0, we get "RuntimeWarning: divide by zero encountered
# in log". We avoid it here.
with np.errstate(divide="ignore"):
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log_loss", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[0.1, -0.1], [0.3, 0.2]])
p = clf.predict_proba([[0.1, -0.1], [0.3, 0.2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.0] * 3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(
penalty="l1",
alpha=0.2,
fit_intercept=False,
max_iter=2000,
tol=None,
shuffle=False,
)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False, class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: 0.6, 2: 0.3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000, class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(
alpha=0.0001, max_iter=1000, class_weight="balanced", shuffle=False
).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average="weighted")
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average="weighted") > 0.96
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize(
"klass", [SGDClassifier, SparseSGDClassifier, SGDOneClassSVM, SparseSGDOneClassSVM]
)
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
if klass in [SGDClassifier, SparseSGDClassifier]:
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
clf = klass(nu=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2, learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.1,
loss="squared_epsilon_insensitive",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(
alpha=0.01,
learning_rate="constant",
eta0=0.01,
loss="squared_error",
random_state=random_state,
)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize("klass", [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
alpha = 0.01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:], y[: int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2) :][:], y[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = 0.001
alpha = 0.01
clf = klass(
loss="squared_error",
learning_rate="constant",
eta0=eta,
alpha=alpha,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = Y3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)][:], Y3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :][:], Y3[int(n_samples / 2) :])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_, average_weights, decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="squared_error", alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(
loss="epsilon_insensitive",
epsilon=0.01,
alpha=0.1,
max_iter=20,
fit_intercept=False,
)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20, fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False
)
cd.fit(X, y)
sgd = klass(
penalty="elasticnet",
max_iter=50,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=False,
)
sgd.fit(X, y)
err_msg = (
"cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)
)
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize("klass", [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions["huber"][1] == 0.1
###############################################################################
# SGD One Class SVM Test Case
# a simple implementation of ASGD to use for testing SGDOneClassSVM
def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0):
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass == SparseSGDOneClassSVM:
decay = 0.01
for i, entry in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - (eta * nu / 2))
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_coef, 1 - average_intercept
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def _test_warm_start_oneclass(klass, X, lr):
# Test that explicit warm restart...
clf = klass(nu=0.5, eta0=0.01, shuffle=False, learning_rate=lr)
clf.fit(X)
clf2 = klass(nu=0.1, eta0=0.01, shuffle=False, learning_rate=lr)
clf2.fit(X, coef_init=clf.coef_.copy(), offset_init=clf.offset_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(nu=0.5, eta0=0.01, shuffle=False, warm_start=True, learning_rate=lr)
clf3.fit(X)
assert clf3.t_ == clf.t_
assert_allclose(clf3.coef_, clf.coef_)
clf3.set_params(nu=0.1)
clf3.fit(X)
assert clf3.t_ == clf2.t_
assert_allclose(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start_oneclass(klass, lr):
_test_warm_start_oneclass(klass, X, lr)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_clone_oneclass(klass):
# Test whether clone works ok.
clf = klass(nu=0.5)
clf = clone(clf)
clf.set_params(nu=0.1)
clf.fit(X)
clf2 = klass(nu=0.1)
clf2.fit(X)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_partial_fit_oneclass(klass):
third = X.shape[0] // 3
clf = klass(nu=0.1)
clf.partial_fit(X[:third])
assert clf.coef_.shape == (X.shape[1],)
assert clf.offset_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1,)
previous_coefs = clf.coef_
clf.partial_fit(X[third:])
# check that coef_ haven't been re-allocated
assert clf.coef_ is previous_coefs
# raises ValueError if number of features does not match previous data
with pytest.raises(ValueError):
clf.partial_fit(X[:, 1])
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize("lr", ["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_oneclass(klass, lr):
clf = klass(nu=0.05, max_iter=2, eta0=0.01, learning_rate=lr, shuffle=False)
clf.fit(X)
y_scores = clf.decision_function(T)
t = clf.t_
coef = clf.coef_
offset = clf.offset_
clf = klass(nu=0.05, eta0=0.01, max_iter=1, learning_rate=lr, shuffle=False)
for _ in range(2):
clf.partial_fit(X)
y_scores2 = clf.decision_function(T)
assert clf.t_ == t
assert_allclose(y_scores, y_scores2)
assert_allclose(clf.coef_, coef)
assert_allclose(clf.offset_, offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_reached_oneclass(klass):
# Test average
eta0 = 0.001
nu = 0.05
# 2 passes over the training set but average only at second pass
clf1 = klass(
average=7, learning_rate="constant", eta0=eta0, nu=nu, max_iter=2, shuffle=False
)
# 1 pass over the training set with no averaging
clf2 = klass(
average=False,
learning_rate="constant",
eta0=eta0,
nu=nu,
max_iter=1,
shuffle=False,
)
clf1.fit(X)
clf2.fit(X)
# Start from clf2 solution, compute averaging using asgd function and
# compare with clf1 solution
average_coef, average_offset = asgd_oneclass(
klass, X, eta0, nu, coef_init=clf2.coef_.ravel(), offset_init=clf2.offset_
)
assert_allclose(clf1.coef_.ravel(), average_coef.ravel())
assert_allclose(clf1.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
# Tests the average SGD One-Class SVM matches the naive implementation
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.fit(X)
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_partial_fit_oneclass(klass):
# Tests whether the partial fit yields the same average as the fit
eta = 0.001
nu = 0.05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
clf.partial_fit(X[: int(n_samples / 2)][:])
clf.partial_fit(X[int(n_samples / 2) :][:])
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize("klass", [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_average_sparse_oneclass(klass):
# Checks the average coef on data with 0s
eta = 0.001
nu = 0.01
clf = klass(
learning_rate="constant",
eta0=eta,
nu=nu,
fit_intercept=True,
max_iter=1,
average=True,
shuffle=False,
)
n_samples = X3.shape[0]
clf.partial_fit(X3[: int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2) :])
average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
def test_sgd_oneclass():
# Test fit, decision_function, predict and score_samples on a toy
# dataset
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
clf = SGDOneClassSVM(
nu=0.5, eta0=1, learning_rate="constant", shuffle=False, max_iter=1
)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
def test_ocsvm_vs_sgdocsvm():
# Checks SGDOneClass SVM gives a good approximation of kernelized
# One-Class SVM
nu = 0.05
gamma = 2.0
random_state = 42
# Generate train and test data
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(500, 2)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(100, 2)
X_test = np.r_[X + 2, X - 2]
# One-Class SVM
clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu)
clf.fit(X_train)
y_pred_ocsvm = clf.predict(X_test)
dec_ocsvm = clf.decision_function(X_test).reshape(1, -1)
# SGDOneClassSVM using kernel approximation
max_iter = 15
transform = Nystroem(gamma=gamma, random_state=random_state)
clf_sgd = SGDOneClassSVM(
nu=nu,
shuffle=True,
fit_intercept=True,
max_iter=max_iter,
random_state=random_state,
tol=None,
)
pipe_sgd = make_pipeline(transform, clf_sgd)
pipe_sgd.fit(X_train)
y_pred_sgdocsvm = pipe_sgd.predict(X_test)
dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1)
assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99
corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1]
assert corrcoef >= 0.9
def test_sgd_oneclass_convergence():
# Check that the optimization does not end early and that the stopping criterion
# is working. Non-regression test for #30027
for nu in [0.1, 0.5, 0.9]:
# no need for large max_iter
model = SGDOneClassSVM(
nu=nu, max_iter=100, tol=1e-3, learning_rate="constant", eta0=1e-3
)
model.fit(iris.data)
# 6 is the minimal number of iterations that should be surpassed, after which
# the optimization can stop
assert model.n_iter_ > 6
def test_sgd_oneclass_vs_linear_oneclass():
# Test convergence vs. liblinear `OneClassSVM` with kernel="linear"
for nu in [0.1, 0.5, 0.9]:
# allow enough iterations, small dataset
model = SGDOneClassSVM(
nu=nu, max_iter=20000, tol=None, learning_rate="constant", eta0=1e-3
)
model_ref = OneClassSVM(kernel="linear", nu=nu, tol=1e-6) # reference model
model.fit(iris.data)
model_ref.fit(iris.data)
preds = model.predict(iris.data)
dec_fn = model.decision_function(iris.data)
preds_ref = model_ref.predict(iris.data)
dec_fn_ref = model_ref.decision_function(iris.data)
dec_fn_corr = np.corrcoef(dec_fn, dec_fn_ref)[0, 1]
preds_corr = np.corrcoef(preds, preds_ref)[0, 1]
# check weights and intercept concatenated together for correlation
coef_corr = np.corrcoef(
np.concatenate([model.coef_, -model.offset_]),
np.concatenate([model_ref.coef_.flatten(), model_ref.intercept_]),
)[0, 1]
# share of predicted 1's
share_ones = (preds == 1).sum() / len(preds)
assert dec_fn_corr > 0.99
assert preds_corr > 0.95
assert coef_corr > 0.99
assert_allclose(1 - share_ones, nu)
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(
n_samples=1000, n_features=100, n_informative=20, random_state=1234
)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.9999999999,
random_state=42,
).fit(X, y)
est_l1 = SGDClassifier(
alpha=0.001, penalty="l1", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(
alpha=0.001,
penalty="elasticnet",
tol=None,
max_iter=6,
l1_ratio=0.0000000001,
random_state=42,
).fit(X, y)
est_l2 = SGDClassifier(
alpha=0.001, penalty="l2", max_iter=6, random_state=42, tol=None
).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all="raise"):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss="squared_hinge", max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (
r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help."
)
with pytest.raises(ValueError, match=msg_regxp):
model.fit(X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(
loss="squared_hinge",
max_iter=10,
shuffle=True,
penalty="elasticnet",
l1_ratio=0.3,
alpha=0.01,
eta0=0.001,
random_state=0,
tol=None,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize("penalty", ["l2", "l1", "elasticnet"])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(
alpha=1e5,
learning_rate="constant",
eta0=0.1,
penalty=penalty,
shuffle=False,
tol=None,
max_iter=6,
)
with np.errstate(all="raise"):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
warning_message = (
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0),
(-1.0, -1.0, 0.0, 1.0),
(0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0),
(-0.5, -1.0, 0.5, 1.0),
(0.0, 1.0, 1, -1.0),
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0),
(0.0, -1.0, 0.0, 1.0),
(0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0),
(-0.5, 1.0, 0.5, -1.0),
(-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-2.0, -1.0, 0.0, 0.0),
(1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, 1.0, 0.25, -1.0),
(0.5, -1.0, 2.25, 3.0),
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0),
(-1.0, -1.0, 0.0, 0.0),
(2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0),
(-1.0, 1.0, 4.0, -4.0),
(0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0),
(-3.0, 1.0, 12, -4.0),
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.1, 1.0),
(2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0),
(-2.0, 1.0, 2.9, -1.0),
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0),
(0.1, 0.0, 0.0, 0.0),
(-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0),
(2.2, 2.0, 0.01, 0.2),
(2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2),
(-2.0, 1.0, 8.41, -5.8),
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(
alpha=1e-3,
tol=1e-3,
max_iter=1000,
early_stopping=True,
n_iter_no_change=100,
random_state=0,
n_jobs=2,
)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
"alpha": np.logspace(-4, 4, 9),
"n_iter_no_change": [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True, random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=5, n_jobs=2, random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format="csr", random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1, random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4, random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
@pytest.mark.parametrize(
"Estimator", [linear_model.SGDClassifier, linear_model.SGDRegressor]
)
def test_sgd_random_state(Estimator, global_random_seed):
# Train the same model on the same data without converging and check that we
# get reproducible results by fixing the random seed.
if Estimator == linear_model.SGDRegressor:
X, y = datasets.make_regression(random_state=global_random_seed)
else:
X, y = datasets.make_classification(random_state=global_random_seed)
# Fitting twice a model with the same hyper-parameters on the same training
# set with the same seed leads to the same results deterministically.
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_a = est.fit(X, y).coef_
assert est.n_iter_ == 1
est = Estimator(random_state=global_random_seed, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_same_seed_b = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert_allclose(coef_same_seed_a, coef_same_seed_b)
# Fitting twice a model with the same hyper-parameters on the same training
# set but with different random seed leads to different results after one
# epoch because of the random shuffling of the dataset.
est = Estimator(random_state=global_random_seed + 1, max_iter=1)
with pytest.warns(ConvergenceWarning):
coef_other_seed = est.fit(X, y).coef_
assert est.n_iter_ == 1
assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0
def test_validation_mask_correctly_subsets(monkeypatch):
"""Test that data passed to validation callback correctly subsets.
Non-regression test for #23255.
"""
X, Y = iris.data, iris.target
n_samples = X.shape[0]
validation_fraction = 0.2
clf = linear_model.SGDClassifier(
early_stopping=True,
tol=1e-3,
max_iter=1000,
validation_fraction=validation_fraction,
)
mock = Mock(side_effect=_stochastic_gradient._ValidationScoreCallback)
monkeypatch.setattr(_stochastic_gradient, "_ValidationScoreCallback", mock)
clf.fit(X, Y)
X_val, y_val = mock.call_args[0][1:3]
assert X_val.shape[0] == int(n_samples * validation_fraction)
assert y_val.shape[0] == int(n_samples * validation_fraction)
def test_sgd_error_on_zero_validation_weight():
# Test that SGDClassifier raises error when all the validation samples
# have zero sample_weight. Non-regression test for #17229.
X, Y = iris.data, iris.target
sample_weight = np.zeros_like(Y)
validation_fraction = 0.4
clf = linear_model.SGDClassifier(
early_stopping=True, validation_fraction=validation_fraction, random_state=0
)
error_message = (
"The sample weights for validation set are all zero, consider using a"
" different random state."
)
with pytest.raises(ValueError, match=error_message):
clf.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize("Estimator", [SGDClassifier, SGDRegressor])
def test_sgd_verbose(Estimator):
"""non-regression test for gh #25249"""
Estimator(verbose=1).fit(X, Y)
@pytest.mark.parametrize(
"SGDEstimator",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
def test_sgd_dtype_match(SGDEstimator, data_type):
_X = X.astype(data_type)
_Y = np.array(Y, dtype=data_type)
sgd_model = SGDEstimator()
sgd_model.fit(_X, _Y)
assert sgd_model.coef_.dtype == data_type
@pytest.mark.parametrize(
"SGDEstimator",
[
SGDClassifier,
SparseSGDClassifier,
SGDRegressor,
SparseSGDRegressor,
SGDOneClassSVM,
SparseSGDOneClassSVM,
],
)
def test_sgd_numerical_consistency(SGDEstimator):
X_64 = X.astype(dtype=np.float64)
Y_64 = np.array(Y, dtype=np.float64)
X_32 = X.astype(dtype=np.float32)
Y_32 = np.array(Y, dtype=np.float32)
sgd_64 = SGDEstimator(max_iter=20)
sgd_64.fit(X_64, Y_64)
sgd_32 = SGDEstimator(max_iter=20)
sgd_32.fit(X_32, Y_32)
assert_allclose(sgd_64.coef_, sgd_32.coef_)
def test_sgd_one_class_svm_estimator_type():
"""Check that SGDOneClassSVM has the correct estimator type.
Non-regression test for if the mixin was not on the left.
"""
sgd_ocsvm = SGDOneClassSVM()
assert get_tags(sgd_ocsvm).estimator_type == "outlier_detector"
| _SparseSGDOneClassSVM |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 119789,
"end": 130009
} | class ____(GroundingDinoPreTrainedModel):
# When using clones, all layers > 0 will be clones, but layer 0 *is* required
# the bbox_embed in the decoder are all clones though
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": "bbox_embed.0",
"model.decoder.bbox_embed": "bbox_embed",
}
def __init__(self, config: GroundingDinoConfig):
super().__init__(config)
self.model = GroundingDinoModel(config)
if not config.decoder_bbox_embed_share:
del self._tied_weights_keys[r"bbox_embed.(?![0])\d+"]
self.bbox_embed = nn.ModuleList(
[
GroundingDinoMLPPredictionHead(
input_dim=config.d_model,
hidden_dim=config.d_model,
output_dim=4,
num_layers=3,
)
for _ in range(config.decoder_layers)
]
)
self.class_embed = nn.ModuleList(
[GroundingDinoContrastiveEmbedding(config) for _ in range(config.decoder_layers)]
)
# hack for box-refinement
self.model.decoder.class_embed = self.class_embed # class embed has no weights so nothing to tie
self.model.decoder.bbox_embed = self.bbox_embed
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: torch.LongTensor,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
pixel_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[Union[GroundingDinoEncoderOutput, tuple]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[list[dict[str, Union[torch.LongTensor, torch.FloatTensor]]]] = None,
):
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token
[What are token type IDs?](../glossary#token-type-ids)
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
>>> model_id = "IDEA-Research/grounding-dino-tiny"
>>> device = "cuda"
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
>>> image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(image_url, stream=True).raw)
>>> # Check for cats and remote controls
>>> text_labels = [["a cat", "a remote control"]]
>>> inputs = processor(images=image, text=text_labels, return_tensors="pt").to(device)
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> results = processor.post_process_grounded_object_detection(
... outputs,
... threshold=0.4,
... text_threshold=0.3,
... target_sizes=[(image.height, image.width)]
... )
>>> # Retrieve the first image result
>>> result = results[0]
>>> for box, score, text_label in zip(result["boxes"], result["scores"], result["text_labels"]):
... box = [round(x, 2) for x in box.tolist()]
... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
Detected a cat with confidence 0.479 at location [344.7, 23.11, 637.18, 374.28]
Detected a cat with confidence 0.438 at location [12.27, 51.91, 316.86, 472.44]
Detected a remote control with confidence 0.478 at location [38.57, 70.0, 176.78, 118.18]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
# First, sent images through Grounding DINO base model to obtain encoder + decoder outputs
outputs = self.model(
pixel_values=pixel_values,
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
pixel_mask=pixel_mask,
encoder_outputs=encoder_outputs,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
idx = 5 + (1 if output_attentions else 0) + (1 if output_hidden_states else 0)
enc_text_hidden_state = outputs.encoder_last_hidden_state_text if return_dict else outputs[idx]
hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2]
init_reference_points = outputs.init_reference_points if return_dict else outputs[1]
inter_references_points = outputs.intermediate_reference_points if return_dict else outputs[3]
# class logits + predicted bounding boxes
outputs_classes = []
outputs_coords = []
# hidden_states are of shape (batch_size, num_stages, height, width)
# predict class and bounding box deltas for each stage
num_levels = hidden_states.shape[1]
for level in range(num_levels):
if level == 0:
reference = init_reference_points
else:
reference = inter_references_points[:, level - 1]
reference = torch.special.logit(reference, eps=1e-5)
outputs_class = self.class_embed[level](
vision_hidden_state=hidden_states[:, level],
text_hidden_state=enc_text_hidden_state,
text_token_mask=attention_mask.bool(),
)
delta_bbox = self.bbox_embed[level](hidden_states[:, level])
reference_coordinates = reference.shape[-1]
if reference_coordinates == 4:
outputs_coord_logits = delta_bbox + reference
elif reference_coordinates == 2:
delta_bbox[..., :2] += reference
outputs_coord_logits = delta_bbox
else:
raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}")
outputs_coord = outputs_coord_logits.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
outputs_class = torch.stack(outputs_classes)
outputs_coord = torch.stack(outputs_coords)
logits = outputs_class[-1]
pred_boxes = outputs_coord[-1]
loss, loss_dict, auxiliary_outputs = None, None, None
if labels is not None:
label_maps = build_label_maps(logits, input_ids)
text_mask = build_text_mask(logits, attention_mask)
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits,
labels,
self.device,
pred_boxes,
self.config,
label_maps,
text_mask,
outputs_class=outputs_class,
outputs_coord=outputs_coord,
encoder_logits=outputs[-2],
encoder_pred_boxes=outputs[-1],
)
if not return_dict:
auxiliary_outputs = auxiliary_outputs if auxiliary_outputs is not None else []
output = [loss, loss_dict, logits, pred_boxes, *auxiliary_outputs, *outputs, input_ids]
output = tuple(out for out in output if out is not None)
return output
dict_outputs = GroundingDinoObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
last_hidden_state=outputs.last_hidden_state,
auxiliary_outputs=auxiliary_outputs,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
encoder_last_hidden_state_vision=outputs.encoder_last_hidden_state_vision,
encoder_last_hidden_state_text=outputs.encoder_last_hidden_state_text,
encoder_vision_hidden_states=outputs.encoder_vision_hidden_states,
encoder_text_hidden_states=outputs.encoder_text_hidden_states,
encoder_attentions=outputs.encoder_attentions,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_reference_points=outputs.intermediate_reference_points,
init_reference_points=outputs.init_reference_points,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
encoder_logits=outputs.encoder_logits,
encoder_pred_boxes=outputs.encoder_pred_boxes,
input_ids=input_ids,
)
return dict_outputs
__all__ = ["GroundingDinoForObjectDetection", "GroundingDinoModel", "GroundingDinoPreTrainedModel"]
| GroundingDinoForObjectDetection |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 9151,
"end": 10041
} | class ____:
@mock.patch(HOOK_STR)
def test_execute(self, hook_mock):
op = DataplexDeleteLakeOperator(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
task_id="delete_dataplex_lake",
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.delete_lake.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexDeleteLakeOperator |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 819156,
"end": 819348
} | class ____(VegaLiteSchema):
"""Orientation schema wrapper."""
_schema = {"$ref": "#/definitions/Orientation"}
def __init__(self, *args):
super().__init__(*args)
| Orientation |
python | astropy__astropy | astropy/visualization/wcsaxes/frame.py | {
"start": 10754,
"end": 11324
} | class ____(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "brtl"
_spine_auto_position_order = "bltr"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["r"].data = np.array(([xmax, ymin], [xmax, ymax]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
self["l"].data = np.array(([xmin, ymax], [xmin, ymin]))
super().update_spines()
| RectangularFrame |
python | run-llama__llama_index | llama-index-core/llama_index/core/evaluation/retrieval/base.py | {
"start": 968,
"end": 2417
} | class ____(BaseModel):
"""
Retrieval eval result.
NOTE: this abstraction might change in the future.
Attributes:
query (str): Query string
expected_ids (List[str]): Expected ids
retrieved_ids (List[str]): Retrieved ids
metric_dict (Dict[str, BaseRetrievalMetric]): \
Metric dictionary for the evaluation
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
query: str = Field(..., description="Query string")
expected_ids: List[str] = Field(..., description="Expected ids")
expected_texts: Optional[List[str]] = Field(
default=None,
description="Expected texts associated with nodes provided in `expected_ids`",
)
retrieved_ids: List[str] = Field(..., description="Retrieved ids")
retrieved_texts: List[str] = Field(..., description="Retrieved texts")
mode: "RetrievalEvalMode" = Field(
default=RetrievalEvalMode.TEXT, description="text or image"
)
metric_dict: Dict[str, RetrievalMetricResult] = Field(
..., description="Metric dictionary for the evaluation"
)
@property
def metric_vals_dict(self) -> Dict[str, float]:
"""Dictionary of metric values."""
return {k: v.score for k, v in self.metric_dict.items()}
def __str__(self) -> str:
"""String representation."""
return f"Query: {self.query}\nMetrics: {self.metric_vals_dict!s}\n"
| RetrievalEvalResult |
python | Netflix__metaflow | test/test_config/config_corner_cases.py | {
"start": 1649,
"end": 2828
} | class ____(FlowSpec):
trigger_param = Parameter(
"trigger_param",
default="",
external_trigger=True,
external_artifact=trigger_name_func,
)
cfg = Config("cfg", default="config_simple.json")
cfg_default_value = Config(
"cfg_default_value",
default_value=default_config,
)
env_cfg = Config("env_cfg", default_value={"VAR1": "1", "VAR2": "2"})
@environment(
vars={
"TSTVAL": config_expr("str(cfg.some.value)"),
"TSTVAL2": cfg_default_value.a.b,
}
)
@step
def start(self):
self.config_from_env = os.environ.get("TSTVAL")
self.config_from_env_2 = os.environ.get("TSTVAL2")
self.config_val = self.cfg.some.value
self.config_val_2 = self.cfg_default_value.a.b
self.next(self.mid)
# Use config_expr as a top level attribute
@environment(vars=config_expr("env_cfg"))
@step
def mid(self):
self.var1 = os.environ.get("VAR1")
self.var2 = os.environ.get("VAR2")
self.next(self.end)
@step
def end(self):
pass
if __name__ == "__main__":
ConfigSimple()
| ConfigSimple |
python | google__jax | jax/experimental/mosaic/gpu/core.py | {
"start": 9780,
"end": 10277
} | class ____:
addr_ref: ir.Value
num_cols: int
collective: bool
def alloc(self) -> int:
"""Allocates TMEM and returns the number of columns allocated."""
_, cols = tcgen05.tmem_alloc(
self.addr_ref, self.num_cols, collective=self.collective, exact=False
)
return cols
def dealloc(self):
addr = memref.load(self.addr_ref, [])
tcgen05.tmem_dealloc(
addr, self.num_cols, collective=self.collective, exact=False
)
@dataclasses.dataclass()
| _TMEMAlloc |
python | kamyu104__LeetCode-Solutions | Python/minimize-maximum-pair-sum-in-array.py | {
"start": 33,
"end": 252
} | class ____(object):
def minPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
return max(nums[i]+nums[-1-i] for i in xrange(len(nums)//2))
| Solution |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 772,
"end": 992
} | class ____(BaseModel, from_attributes=True):
x: float
y: str
kwargs_model = KwargsModel(x=1, y='y')
KwargsModel(x=1, y='y', z='z')
kwargs_model.x = 2
kwargs_model.model_validate(kwargs_model.__dict__)
| KwargsModel |
python | pandas-dev__pandas | pandas/core/indexing.py | {
"start": 52602,
"end": 88270
} | class ____(_LocationIndexer):
_valid_types = (
"integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array"
)
_takeable = True
# -------------------------------------------------------------------
# Key Checks
def _validate_key(self, key, axis: AxisInt) -> None:
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
if key.index.inferred_type == "integer":
return
raise ValueError(
"iLocation based boolean indexing cannot use an indexable as a mask"
)
return
if isinstance(key, slice):
return
elif is_integer(key):
self._validate_integer(key, axis)
elif isinstance(key, tuple):
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
raise IndexingError("Too many indexers")
elif is_list_like_indexer(key):
if isinstance(key, ABCSeries):
arr = key._values
elif is_array_like(key):
arr = key
else:
arr = np.array(key)
len_axis = len(self.obj._get_axis(axis))
# check that the key has a numeric dtype
if not is_numeric_dtype(arr.dtype):
raise IndexError(f".iloc requires numeric indexers, got {arr}")
if len(arr):
if isinstance(arr.dtype, ExtensionDtype):
arr_max = arr._reduce("max")
arr_min = arr._reduce("min")
else:
arr_max = np.max(arr)
arr_min = np.min(arr)
# check that the key does not exceed the maximum size
if arr_max >= len_axis or arr_min < -len_axis:
raise IndexError("positional indexers are out-of-bounds")
else:
raise ValueError(f"Can only index by location with a [{self._valid_types}]")
def _has_valid_setitem_indexer(self, indexer) -> bool:
"""
Validate that a positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally.
Returns
-------
bool
"""
if isinstance(indexer, dict):
raise IndexError("iloc cannot enlarge its target object")
if isinstance(indexer, ABCDataFrame):
raise TypeError(
"DataFrame indexer for .iloc is not supported. "
"Consider using .loc with a DataFrame indexer for automatic alignment.",
)
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
for ax, i in zip(self.obj.axes, indexer, strict=False):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("iloc cannot enlarge its target object")
elif isinstance(i, dict):
raise IndexError("iloc cannot enlarge its target object")
return True
def _is_scalar_access(self, key: tuple) -> bool:
"""
Returns
-------
bool
"""
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if len(key) != self.ndim:
return False
return all(is_integer(k) for k in key)
def _validate_integer(self, key: int | np.integer, axis: AxisInt) -> None:
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position.
axis : int
Desired axis.
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'.
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
# -------------------------------------------------------------------
def _getitem_tuple(self, tup: tuple):
tup = self._validate_tuple_indexer(tup)
with suppress(IndexingError):
return self._getitem_lowerdim(tup)
return self._getitem_tuple_same_dim(tup)
def _get_list_axis(self, key, axis: AxisInt):
"""
Return Series values by list or array of integers.
Parameters
----------
key : list-like positional indexer
axis : int
Returns
-------
Series object
Notes
-----
`axis` can only be zero.
"""
try:
return self.obj.take(key, axis=axis)
except IndexError as err:
# re-raise with different error message, e.g. test_getitem_ndarray_3d
raise IndexError("positional indexers are out-of-bounds") from err
def _getitem_axis(self, key, axis: AxisInt):
if key is Ellipsis:
key = slice(None)
elif isinstance(key, ABCDataFrame):
raise IndexError(
"DataFrame indexer is not allowed for .iloc\n"
"Consider using .loc for automatic alignment."
)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
if is_iterator(key):
key = list(key)
if isinstance(key, list):
key = np.asarray(key)
if com.is_bool_indexer(key):
self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = item_from_zerodim(key)
if not is_integer(key):
raise TypeError("Cannot index by location index with a non-integer key")
# validate the location
self._validate_integer(key, axis)
return self.obj._ixs(key, axis=axis)
def _get_slice_axis(self, slice_obj: slice, axis: AxisInt):
# caller is responsible for ensuring non-None axis
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
labels._validate_positional_slice(slice_obj)
return self.obj._slice(slice_obj, axis=axis)
def _convert_to_indexer(self, key: T, axis: AxisInt) -> T:
"""
Much simpler as we only have to deal with our valid types.
"""
return key
def _get_setitem_indexer(self, key):
# GH#32257 Fall through to let numpy do validation
if is_iterator(key):
key = list(key)
if self.axis is not None:
key = _tupleize_axis_indexer(self.ndim, self.axis, key)
return key
# -------------------------------------------------------------------
def _decide_split_path(self, indexer, value) -> bool:
"""
Decide whether we will take a block-by-block path.
"""
take_split_path = not self.obj._mgr.is_single_block
if not take_split_path and isinstance(value, ABCDataFrame):
# Avoid cast of values
take_split_path = not value._mgr.is_single_block
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and len(self.obj._mgr.blocks) and self.ndim > 1:
# in case of dict, keys are indices
val = list(value.values()) if isinstance(value, dict) else value
arr = self.obj._mgr.blocks[0].values
take_split_path = not can_hold_element(
arr, extract_array(val, extract_numpy=True)
)
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360, GH 27841
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes, strict=True):
if isinstance(ax, MultiIndex) and not (
is_integer(i) or com.is_null_slice(i)
):
take_split_path = True
break
return take_split_path
def _setitem_new_column(self, indexer, key, value, name: str) -> None:
"""
_setitem_with_indexer cases that can go through DataFrame.__setitem__.
"""
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
if not len(self.obj):
if not is_list_like_indexer(value):
raise ValueError(
"cannot set a frame with no defined index and a scalar"
)
self.obj[key] = value
return
# add a new item with the dtype setup
if com.is_null_slice(indexer[0]):
# We are setting an entire column
self.obj[key] = value
return
elif is_array_like(value):
# GH#42099
arr = extract_array(value, extract_numpy=True)
taker = -1 * np.ones(len(self.obj), dtype=np.intp)
empty_value = algos.take_nd(arr, taker)
if not isinstance(value, ABCSeries):
# if not Series (in which case we need to align),
# we can short-circuit
if isinstance(arr, np.ndarray) and arr.ndim == 1 and len(arr) == 1:
# NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
arr = arr[0, ...]
empty_value[indexer[0]] = arr
self.obj[key] = empty_value
return
self.obj[key] = empty_value
elif not is_list_like(value):
self.obj[key] = construct_1d_array_from_inferred_fill_value(
value, len(self.obj)
)
else:
# FIXME: GH#42099#issuecomment-864326014
self.obj[key] = infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value, name)
return
def _setitem_with_indexer(self, indexer, value, name: str = "iloc") -> None:
"""
_setitem_with_indexer is for setting values on a Series/DataFrame
using positional indexers.
If the relevant keys are not present, the Series/DataFrame may be
expanded.
"""
info_axis = self.obj._info_axis_number
take_split_path = self._decide_split_path(indexer, value)
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
idx, missing = convert_missing_indexer(idx)
if missing:
# reindex the axis to the new value
# and set inplace
key = idx
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == info_axis:
self._setitem_new_column(indexer, key, value, name=name)
return
# reindex the axis
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
# We are expanding the Series/DataFrame values to match
# the length of the new index `labels`. GH#40096 ensure
# this is valid even if the index has duplicates.
taker = np.arange(len(index) + 1, dtype=np.intp)
taker[-1] = -1
reindexers = {i: (labels, taker)}
new_obj = self.obj._reindex_with_indexers(
reindexers, allow_dups=True
)
self.obj._mgr = new_obj._mgr
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
self._setitem_with_indexer_missing(indexer, value)
return
if name == "loc":
# must come after setting of missing
indexer, value = self._maybe_mask_setitem_value(indexer, value)
# align and set the values
if take_split_path:
# We have to operate column-wise
self._setitem_with_indexer_split_path(indexer, value, name)
else:
self._setitem_single_block(indexer, value, name)
def _setitem_with_indexer_split_path(self, indexer, value, name: str):
"""
Setitem column-wise.
"""
# Above we only set take_split_path to True for 2D cases
assert self.ndim == 2
if not isinstance(indexer, tuple):
indexer = _tuplify(self.ndim, indexer)
if len(indexer) > self.ndim:
raise IndexError("too many indices for array")
if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2:
raise ValueError(r"Cannot set values with ndim > 2")
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
from pandas import Series
value = self._align_series(indexer, Series(value))
# Ensure we have something we can iterate over
info_axis = indexer[1]
ilocs = self._ensure_iterable_column_indexer(info_axis)
pi = indexer[0]
lplane_indexer = length_of_indexer(pi, self.obj.index)
# lplane_indexer gives the expected length of obj[indexer[0]]
# we need an iterable, with an ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
if isinstance(value, ABCDataFrame):
self._setitem_with_indexer_frame_value(indexer, value, name)
elif np.ndim(value) == 2:
# TODO: avoid np.ndim call in case it isn't an ndarray, since
# that will construct an ndarray, which will be wasteful
self._setitem_with_indexer_2d_value(indexer, value)
elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi):
# We are setting multiple rows in a single column.
self._setitem_single_column(ilocs[0], value, pi)
elif len(ilocs) == 1 and 0 != lplane_indexer != len(value):
# We are trying to set N values into M entries of a single
# column, which is invalid for N != M
# Exclude zero-len for e.g. boolean masking that is all-false
if len(value) == 1 and not is_integer(info_axis):
# This is a case like df.iloc[:3, [1]] = [0]
# where we treat as df.iloc[:3, 1] = 0
return self._setitem_with_indexer((pi, info_axis[0]), value[0])
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
elif lplane_indexer == 0 and len(value) == len(self.obj.index):
# We get here in one case via .loc with an all-False mask
pass
elif self._is_scalar_access(indexer) and is_object_dtype(
self.obj.dtypes._values[ilocs[0]]
):
# We are setting nested data, only possible for object dtype data
self._setitem_single_column(indexer[1], value, pi)
elif len(ilocs) == len(value):
# We are setting multiple columns in a single row.
for loc, v in zip(ilocs, value, strict=True):
self._setitem_single_column(loc, v, pi)
elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0:
# This is a setitem-with-expansion, see
# test_loc_setitem_empty_append_expands_rows_mixed_dtype
# e.g. df = DataFrame(columns=["x", "y"])
# df["x"] = df["x"].astype(np.int64)
# df.loc[:, "x"] = [1, 2, 3]
self._setitem_single_column(ilocs[0], value, pi)
else:
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
else:
# scalar value
for loc in ilocs:
self._setitem_single_column(loc, value, pi)
def _setitem_with_indexer_2d_value(self, indexer, value) -> None:
# We get here with np.ndim(value) == 2, excluding DataFrame,
# which goes through _setitem_with_indexer_frame_value
pi = indexer[0]
ilocs = self._ensure_iterable_column_indexer(indexer[1])
if not is_array_like(value):
# cast lists to array
value = np.array(value, dtype=object)
if len(ilocs) != value.shape[1]:
raise ValueError(
"Must have equal len keys and value when setting with an ndarray"
)
for i, loc in enumerate(ilocs):
value_col = value[:, i]
if is_object_dtype(value_col.dtype):
# casting to list so that we do type inference in setitem_single_column
value_col = value_col.tolist()
self._setitem_single_column(loc, value_col, pi)
def _setitem_with_indexer_frame_value(
self, indexer, value: DataFrame, name: str
) -> None:
ilocs = self._ensure_iterable_column_indexer(indexer[1])
sub_indexer = list(indexer)
pi = indexer[0]
multiindex_indexer = isinstance(self.obj.columns, MultiIndex)
unique_cols = value.columns.is_unique
# We do not want to align the value in case of iloc GH#37728
if name == "iloc":
for i, loc in enumerate(ilocs):
val = value.iloc[:, i]
self._setitem_single_column(loc, val, pi)
elif not unique_cols and value.columns.equals(self.obj.columns):
# We assume we are already aligned, see
# test_iloc_setitem_frame_duplicate_columns_multiple_blocks
for loc in ilocs:
item = self.obj.columns[loc]
if item in value:
sub_indexer[1] = item
val = self._align_series(
tuple(sub_indexer),
value.iloc[:, loc],
multiindex_indexer,
)
else:
val = np.nan
self._setitem_single_column(loc, val, pi)
elif not unique_cols:
raise ValueError("Setting with non-unique columns is not allowed.")
else:
for loc in ilocs:
item = self.obj.columns[loc]
if item in value:
sub_indexer[1] = item
val = self._align_series(
tuple(sub_indexer),
value[item],
multiindex_indexer,
using_cow=True,
)
else:
val = np.nan
self._setitem_single_column(loc, val, pi)
def _setitem_single_column(self, loc: int, value, plane_indexer) -> None:
"""
Parameters
----------
loc : int
Indexer for column position
plane_indexer : int, slice, listlike[int]
The indexer we use for setitem along axis=0.
"""
pi = plane_indexer
is_full_setter = com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj))
is_null_setter = com.is_empty_slice(pi) or (is_array_like(pi) and len(pi) == 0)
if is_null_setter:
# no-op, don't cast dtype later
return
elif is_full_setter:
try:
self.obj._mgr.column_setitem(
loc, plane_indexer, value, inplace_only=True
)
except (ValueError, TypeError, LossySetitemError) as exc:
# If we're setting an entire column and we can't do it inplace,
# then we can use value's dtype (or inferred dtype)
# instead of object
dtype = self.obj.dtypes.iloc[loc]
if dtype not in (np.void, object) and not self.obj.empty:
# - Exclude np.void, as that is a special case for expansion.
# We want to raise for
# df = pd.DataFrame({'a': [1, 2]})
# df.loc[:, 'a'] = .3
# but not for
# df = pd.DataFrame({'a': [1, 2]})
# df.loc[:, 'b'] = .3
# - Exclude `object`, as then no upcasting happens.
# - Exclude empty initial object with enlargement,
# as then there's nothing to be inconsistent with.
raise TypeError(
f"Invalid value '{value}' for dtype '{dtype}'"
) from exc
self.obj.isetitem(loc, value)
else:
# set value into the column (first attempting to operate inplace, then
# falling back to casting if necessary)
dtype = self.obj.dtypes.iloc[loc]
if dtype == np.void:
# This means we're expanding, with multiple columns, e.g.
# df = pd.DataFrame({'A': [1,2,3], 'B': [4,5,6]})
# df.loc[df.index <= 2, ['F', 'G']] = (1, 'abc')
# Columns F and G will initially be set to np.void.
# Here, we replace those temporary `np.void` columns with
# columns of the appropriate dtype, based on `value`.
self.obj.iloc[:, loc] = construct_1d_array_from_inferred_fill_value(
value, len(self.obj)
)
self.obj._mgr.column_setitem(loc, plane_indexer, value)
def _setitem_single_block(self, indexer, value, name: str) -> None:
"""
_setitem_with_indexer for the case when we have a single Block.
"""
from pandas import Series
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
self.ndim == len(indexer) == 2
and is_integer(indexer[1])
and com.is_null_slice(indexer[0])
):
col = item_labels[indexer[info_axis]]
if len(item_labels.get_indexer_for([col])) == 1:
# e.g. test_loc_setitem_empty_append_expands_rows
loc = item_labels.get_loc(col)
self._setitem_single_column(loc, value, indexer[0])
return
indexer = maybe_convert_ix(*indexer) # e.g. test_setitem_frame_align
if isinstance(value, ABCDataFrame) and name != "iloc":
value = self._align_frame(indexer, value)._values
# actually do the set
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
def _setitem_with_indexer_missing(self, indexer, value):
"""
Insert new row(s) or column(s) into the Series or DataFrame.
"""
from pandas import Series
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an int64 Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH#12246
if index.is_unique:
# pass new_index[-1:] instead if [new_index[-1]]
# so that we retain dtype
new_indexer = index.get_indexer(new_index[-1:])
if (new_indexer != -1).any():
# We get only here with loc, so can hard code
return self._setitem_with_indexer(new_indexer, value, "loc")
# this preserves dtype of the value and of the object
if not is_scalar(value):
new_dtype = None
elif is_valid_na_for_dtype(value, self.obj.dtype):
if not is_object_dtype(self.obj.dtype):
# Every NA value is suitable for object, no conversion needed
value = na_value_for_dtype(self.obj.dtype, compat=False)
new_dtype = maybe_promote(self.obj.dtype, value)[0]
elif isna(value):
new_dtype = None
elif not self.obj.empty and not is_object_dtype(self.obj.dtype):
# We should not cast, if we have object dtype because we can
# set timedeltas into object series
curr_dtype = self.obj.dtype
curr_dtype = getattr(curr_dtype, "numpy_dtype", curr_dtype)
new_dtype = maybe_promote(curr_dtype, value)[0]
else:
new_dtype = None
new_values = Series([value], dtype=new_dtype)._values
if len(self.obj._values):
# GH#22717 handle casting compatibility that np.concatenate
# does incorrectly
new_values = concat_compat([self.obj._values, new_values])
self.obj._mgr = self.obj._constructor(
new_values, index=new_index, name=self.obj.name
)._mgr
elif self.ndim == 2:
if not len(self.obj.columns):
# no columns and scalar
raise ValueError("cannot set a frame with no defined columns")
has_dtype = hasattr(value, "dtype")
if isinstance(value, ABCSeries):
# append a Series
value = value.reindex(index=self.obj.columns)
value.name = indexer
elif isinstance(value, dict):
value = Series(
value, index=self.obj.columns, name=indexer, dtype=object
)
else:
# a list-list
if is_list_like_indexer(value):
# must have conforming columns
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with mismatched columns")
value = Series(value, index=self.obj.columns, name=indexer)
if not len(self.obj):
# We will ignore the existing dtypes instead of using
# internals.concat logic
df = value.to_frame().T
idx = self.obj.index
if isinstance(idx, MultiIndex):
name = idx.names
else:
name = idx.name
df.index = Index([indexer], name=name)
if not has_dtype:
# i.e. if we already had a Series or ndarray, keep that
# dtype. But if we had a list or dict, then do inference
df = df.infer_objects()
self.obj._mgr = df._mgr
else:
self.obj._mgr = self.obj._append_internal(value)._mgr
def _ensure_iterable_column_indexer(self, column_indexer):
"""
Ensure that our column indexer is something that can be iterated over.
"""
ilocs: Sequence[int | np.integer] | np.ndarray | range
if is_integer(column_indexer):
ilocs = [column_indexer]
elif isinstance(column_indexer, slice):
ilocs = range(len(self.obj.columns))[column_indexer]
elif (
isinstance(column_indexer, np.ndarray) and column_indexer.dtype.kind == "b"
):
ilocs = np.arange(len(column_indexer))[column_indexer]
else:
ilocs = column_indexer
return ilocs
def _align_series(
self,
indexer,
ser: Series,
multiindex_indexer: bool = False,
using_cow: bool = False,
):
"""
Parameters
----------
indexer : tuple, slice, scalar
Indexer used to get the locations that will be set to `ser`.
ser : pd.Series
Values to assign to the locations specified by `indexer`.
multiindex_indexer : bool, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = (indexer,)
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
if (
len(indexer) == 2
and isinstance(indexer[1], np.ndarray)
and indexer[1].dtype == np.bool_
):
indexer = (indexer[0], np.where(indexer[1])[0])
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if all(is_sequence(_) or isinstance(_, slice) for _ in indexer):
ser_values = ser.reindex(obj.axes[0][indexer[0]])._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
if isinstance(indexer[1], slice):
len_indexer = len(obj.axes[1][indexer[1]])
else:
len_indexer = len(indexer[1])
ser_values = (
np.tile(ser_values, len_indexer).reshape(len_indexer, -1).T
)
return ser_values
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if not len(new_ix) or ser.index.equals(new_ix):
if using_cow:
return ser
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
elif is_integer(indexer) and self.ndim == 1:
if is_object_dtype(self.obj.dtype):
return ser
ax = self.obj._get_axis(0)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values[indexer]
elif is_integer(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError("Incompatible indexer with Series")
def _align_frame(self, indexer, df: DataFrame) -> DataFrame:
is_frame = self.ndim == 2
if isinstance(indexer, tuple):
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if isinstance(ix, np.ndarray):
ix = ix.reshape(-1)
if idx is None:
idx = ax[ix]
elif cols is None:
cols = ax[ix]
else:
break
else:
sindexers.append(i)
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()
else:
val = df.reindex(idx, columns=cols)
return val
elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame:
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (
isinstance(ax, MultiIndex)
and isinstance(df.index, MultiIndex)
and ax.nlevels != df.index.nlevels
):
raise TypeError(
"cannot align on a multi-index with out "
"specifying the join levels"
)
val = df.reindex(index=ax)
return val
raise ValueError("Incompatible indexer with DataFrame")
| _iLocIndexer |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py | {
"start": 9107,
"end": 13502
} | class ____(DbtConfigs):
"""
Global configs control things like the visual output
of logs, the manner in which dbt parses your project,
and what to do when dbt finds a version mismatch
or a failing model. Docs can be found [here](
https://docs.getdbt.com/reference/global-configs).
Attributes:
send_anonymous_usage_stats: Whether usage stats are sent to dbt.
use_colors: Colorize the output it prints in your terminal.
partial_parse: When partial parsing is enabled, dbt will use an
stored internal manifest to determine which files have been changed
(if any) since it last parsed the project.
printer_width: Length of characters before starting a new line.
write_json: Determines whether dbt writes JSON artifacts to
the target/ directory.
warn_error: Whether to convert dbt warnings into errors.
log_format: The LOG_FORMAT config specifies how dbt's logs should
be formatted. If the value of this config is json, dbt will
output fully structured logs in JSON format.
debug: Whether to redirect dbt's debug logs to standard out.
version_check: Whether to raise an error if a project's version
is used with an incompatible dbt version.
fail_fast: Make dbt exit immediately if a single resource fails to build.
use_experimental_parser: Opt into the latest experimental version
of the static parser.
static_parser: Whether to use the [static parser](
https://docs.getdbt.com/reference/parsing#static-parser).
Examples:
Load stored GlobalConfigs:
```python
from prefect_dbt.cli.configs import GlobalConfigs
dbt_cli_global_configs = GlobalConfigs.load("BLOCK_NAME")
```
"""
_block_type_name = "dbt CLI Global Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
send_anonymous_usage_stats: Optional[bool] = Field(
default=None,
description="Whether usage stats are sent to dbt.",
)
use_colors: Optional[bool] = Field(
default=None,
description="Colorize the output it prints in your terminal.",
)
partial_parse: Optional[bool] = Field(
default=None,
description=(
"When partial parsing is enabled, dbt will use an "
"stored internal manifest to determine which files have been changed "
"(if any) since it last parsed the project."
),
)
printer_width: Optional[int] = Field(
default=None,
description="Length of characters before starting a new line.",
)
write_json: Optional[bool] = Field(
default=None,
description=(
"Determines whether dbt writes JSON artifacts to the target/ directory."
),
)
warn_error: Optional[bool] = Field(
default=None,
description="Whether to convert dbt warnings into errors.",
)
log_format: Optional[str] = Field(
default=None,
description=(
"The LOG_FORMAT config specifies how dbt's logs should "
"be formatted. If the value of this config is json, dbt will "
"output fully structured logs in JSON format."
),
)
debug: Optional[bool] = Field(
default=None,
description="Whether to redirect dbt's debug logs to standard out.",
)
version_check: Optional[bool] = Field(
default=None,
description=(
"Whether to raise an error if a project's version "
"is used with an incompatible dbt version."
),
)
fail_fast: Optional[bool] = Field(
default=None,
description=("Make dbt exit immediately if a single resource fails to build."),
)
use_experimental_parser: Optional[bool] = Field(
default=None,
description=("Opt into the latest experimental version of the static parser."),
)
static_parser: Optional[bool] = Field(
default=None,
description=(
"Whether to use the [static parser](https://docs.getdbt.com/reference/parsing#static-parser)." # noqa
),
)
| GlobalConfigs |
python | getsentry__sentry | src/sentry/incidents/grouptype.py | {
"start": 2361,
"end": 2440
} | class ____(EvidenceData[MetricResult]):
alert_id: int
| MetricIssueEvidenceData |
python | huggingface__transformers | src/transformers/models/oneformer/convert_to_hf_oneformer.py | {
"start": 3575,
"end": 7733
} | class ____:
def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig:
model = original_config.MODEL
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()}
if is_swin:
if model.SWIN.EMBED_DIM == 96:
backbone_config = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224",
drop_path_rate=model.SWIN.DROP_PATH_RATE,
out_features=["stage1", "stage2", "stage3", "stage4"],
)
elif model.SWIN.EMBED_DIM == 192:
backbone_config = SwinConfig.from_pretrained(
"microsoft/swin-large-patch4-window12-384",
drop_path_rate=model.SWIN.DROP_PATH_RATE,
out_features=["stage1", "stage2", "stage3", "stage4"],
)
else:
raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!")
else:
backbone_config = DinatConfig.from_pretrained(
"shi-labs/dinat-large-11x11-in22k-in1k-384",
dilations=model.DiNAT.DILATIONS,
kernel_size=model.DiNAT.KERNEL_SIZE,
out_features=["stage1", "stage2", "stage3", "stage4"],
)
config: OneFormerConfig = OneFormerConfig(
backbone_config=backbone_config,
output_attentions=True,
output_hidden_states=True,
return_dict=True,
ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE,
num_classes=model.SEM_SEG_HEAD.NUM_CLASSES,
num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES,
no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT,
class_weight=model.ONE_FORMER.CLASS_WEIGHT,
mask_weight=model.ONE_FORMER.MASK_WEIGHT,
dice_weight=model.ONE_FORMER.DICE_WEIGHT,
contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT,
contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE,
train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO,
init_std=0.02,
init_xavier_std=1.0,
layer_norm_eps=1e-05,
is_training=False,
use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION,
output_auxiliary_logits=True,
strides=[4, 8, 16, 32],
task_seq_len=original_config.INPUT.TASK_SEQ_LEN,
max_seq_len=original_config.INPUT.MAX_SEQ_LEN,
text_encoder_width=model.TEXT_ENCODER.WIDTH,
text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH,
text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS,
text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE,
text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS,
text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX,
conv_dim=model.SEM_SEG_HEAD.CONVS_DIM,
mask_dim=model.SEM_SEG_HEAD.MASK_DIM,
hidden_dim=model.ONE_FORMER.HIDDEN_DIM,
norm=model.SEM_SEG_HEAD.NORM,
encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS,
encoder_feedforward_dim=1024,
decoder_layers=model.ONE_FORMER.DEC_LAYERS,
use_task_norm=model.ONE_FORMER.USE_TASK_NORM,
num_attention_heads=model.ONE_FORMER.NHEADS,
dropout=model.ONE_FORMER.DROPOUT,
dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD,
pre_norm=model.ONE_FORMER.PRE_NORM,
enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ,
query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS,
common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE,
id2label=id2label,
label2id=label2id,
)
return config
| OriginalOneFormerConfigToOursConverter |
python | huggingface__transformers | src/transformers/models/video_llama_3/modular_video_llama_3.py | {
"start": 2686,
"end": 5674
} | class ____(SiglipVisionConfig):
"""
This is the configuration class to store the configuration of a [`VideoLlama3VisionModel`]. It is used to instantiate a
VideoLLaMA3 vision encoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
VideoLLaMA3-2B [lkhl/VideoLLaMA3-2B-Image-HF](https://huggingface.co/lkhl/VideoLLaMA3-2B-Image-HF).
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "video_llama_3_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
patch_size=16,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_channels=num_channels,
patch_size=patch_size,
hidden_act=hidden_act,
layer_norm_eps=layer_norm_eps,
attention_dropout=attention_dropout,
**kwargs,
)
self.initializer_range = initializer_range
del self.image_size
| VideoLlama3VisionConfig |
python | huggingface__transformers | src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py | {
"start": 3101,
"end": 5087
} | class ____(nn.Module):
"""
This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size,
seq_length, hidden_size)` to be consumed by a Transformer.
"""
def __init__(self, config: ASTConfig):
super().__init__()
patch_size = config.patch_size
frequency_stride = config.frequency_stride
time_stride = config.time_stride
self.projection = nn.Conv2d(
1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride)
)
def forward(self, input_values: torch.Tensor) -> torch.Tensor:
input_values = input_values.unsqueeze(1)
input_values = input_values.transpose(2, 3)
embeddings = self.projection(input_values).flatten(2).transpose(1, 2)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->AST
| ASTPatchEmbeddings |
python | walkccc__LeetCode | solutions/1290. Convert Binary Number in a Linked List to Integer/1290.py | {
"start": 0,
"end": 167
} | class ____:
def getDecimalValue(self, head: ListNode) -> int:
ans = 0
while head:
ans = ans * 2 + head.val
head = head.next
return ans
| Solution |
python | ansible__ansible | test/units/plugins/lookup/test_password.py | {
"start": 16204,
"end": 17037
} | class ____(unittest.TestCase):
def setUp(self):
self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'})
self.password_lookup = lookup_loader.get('password')
self.password_lookup._loader = self.fake_loader
self.os_path_exists = password.os.path.exists
self.os_open = password.os.open
password.os.open = self.noop
self.os_close = password.os.close
password.os.close = self.noop
self.makedirs_safe = password.makedirs_safe
password.makedirs_safe = self.noop
def noop(self, *args, **kwargs):
pass
def tearDown(self):
password.os.path.exists = self.os_path_exists
password.os.open = self.os_open
password.os.close = self.os_close
password.makedirs_safe = self.makedirs_safe
| BaseTestLookupModule |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 8068,
"end": 8819
} | class ____(TypedDict, total=False):
call_id: Required[str]
"""The unique ID of the function shell tool call generated by the model."""
output: Required[Iterable[ResponseFunctionShellCallOutputContentParam]]
"""
Captured chunks of stdout and stderr output, along with their associated
outcomes.
"""
type: Required[Literal["shell_call_output"]]
"""The type of the item. Always `function_shell_call_output`."""
id: Optional[str]
"""The unique ID of the function shell tool call output.
Populated when this item is returned via API.
"""
max_output_length: Optional[int]
"""
The maximum number of UTF-8 characters captured for this shell call's combined
output.
"""
| ShellCallOutput |
python | kamyu104__LeetCode-Solutions | Python/find-all-people-with-secret.py | {
"start": 54,
"end": 1087
} | class ____(object):
def findAllPeople(self, n, meetings, firstPerson):
"""
:type n: int
:type meetings: List[List[int]]
:type firstPerson: int
:rtype: List[int]
"""
meetings.sort(key=lambda x: x[2])
result = {0, firstPerson}
adj = collections.defaultdict(list)
for i, (x, y, _) in enumerate(meetings):
adj[x].append(y)
adj[y].append(x)
if i+1 != len(meetings) and meetings[i+1][2] == meetings[i][2]:
continue
q = [i for i in adj.iterkeys() if i in result]
while q:
new_q = []
for u in q:
for v in adj[u]:
if v in result:
continue
result.add(v)
new_q.append(v)
q = new_q
adj = collections.defaultdict(list)
return list(result)
# Time: O(nlogn)
# Space: O(n)
import collections
| Solution |
python | astropy__astropy | astropy/constants/codata2022.py | {
"start": 354,
"end": 477
} | class ____(Constant):
default_reference = "CODATA 2022"
_registry = {}
_has_incompatible_units = set()
| CODATA2022 |
python | openai__gym | gym/wrappers/filter_observation.py | {
"start": 146,
"end": 3435
} | class ____(gym.ObservationWrapper):
"""Filter Dict observation space by the keys.
Example:
>>> import gym
>>> env = gym.wrappers.TransformObservation(
... gym.make('CartPole-v1'), lambda obs: {'obs': obs, 'time': 0}
... )
>>> env.observation_space = gym.spaces.Dict(obs=env.observation_space, time=gym.spaces.Discrete(1))
>>> env.reset()
{'obs': array([-0.00067088, -0.01860439, 0.04772898, -0.01911527], dtype=float32), 'time': 0}
>>> env = FilterObservation(env, filter_keys=['time'])
>>> env.reset()
{'obs': array([ 0.04560107, 0.04466959, -0.0328232 , -0.02367178], dtype=float32)}
>>> env.step(0)
({'obs': array([ 0.04649447, -0.14996664, -0.03329664, 0.25847703], dtype=float32)}, 1.0, False, {})
"""
def __init__(self, env: gym.Env, filter_keys: Sequence[str] = None):
"""A wrapper that filters dictionary observations by their keys.
Args:
env: The environment to apply the wrapper
filter_keys: List of keys to be included in the observations. If ``None``, observations will not be filtered and this wrapper has no effect
Raises:
ValueError: If the environment's observation space is not :class:`spaces.Dict`
ValueError: If any of the `filter_keys` are not included in the original `env`'s observation space
"""
super().__init__(env)
wrapped_observation_space = env.observation_space
if not isinstance(wrapped_observation_space, spaces.Dict):
raise ValueError(
f"FilterObservationWrapper is only usable with dict observations, "
f"environment observation space is {type(wrapped_observation_space)}"
)
observation_keys = wrapped_observation_space.spaces.keys()
if filter_keys is None:
filter_keys = tuple(observation_keys)
missing_keys = {key for key in filter_keys if key not in observation_keys}
if missing_keys:
raise ValueError(
"All the filter_keys must be included in the original observation space.\n"
f"Filter keys: {filter_keys}\n"
f"Observation keys: {observation_keys}\n"
f"Missing keys: {missing_keys}"
)
self.observation_space = type(wrapped_observation_space)(
[
(name, copy.deepcopy(space))
for name, space in wrapped_observation_space.spaces.items()
if name in filter_keys
]
)
self._env = env
self._filter_keys = tuple(filter_keys)
def observation(self, observation):
"""Filters the observations.
Args:
observation: The observation to filter
Returns:
The filtered observations
"""
filter_observation = self._filter_observation(observation)
return filter_observation
def _filter_observation(self, observation):
observation = type(observation)(
[
(name, value)
for name, value in observation.items()
if name in self._filter_keys
]
)
return observation
| FilterObservation |
python | pyodide__pyodide | tools/create_lockfile_diff.py | {
"start": 534,
"end": 2747
} | class ____:
name: str
old_version: str | None
new_version: str | None
def is_normal_python_package(pkg: PackageSpec) -> bool:
return pkg.package_type == "package" and pkg.file_name.endswith(".whl")
def calculate_diff(
old_lockfile_path: Path, new_lockfile_path: Path
) -> tuple[list[PackageDiff], list[PackageDiff], list[PackageDiff]]:
"""
Calculate the differences between two Pyodide lockfiles.
Returns a tuple of three lists:
- Added packages
- Removed packages
- Changed packages (with old and new versions)
"""
old_lockfile = PyodideLockSpec.from_json(Path(old_lockfile_path))
new_lockfile = PyodideLockSpec.from_json(Path(new_lockfile_path))
old_packages = {
pkg.name: pkg
for pkg in old_lockfile.packages.values()
if is_normal_python_package(pkg)
}
new_packages = {
pkg.name: pkg
for pkg in new_lockfile.packages.values()
if is_normal_python_package(pkg)
}
added = [
PackageDiff(name=pkg.name, old_version=None, new_version=pkg.version)
for name, pkg in new_packages.items()
if name not in old_packages
]
removed = [
PackageDiff(name=pkg.name, old_version=pkg.version, new_version=None)
for name, pkg in old_packages.items()
if name not in new_packages
]
changed = [
PackageDiff(
name=name,
old_version=old_packages[name].version,
new_version=new_packages[name].version,
)
for name in set(old_packages) & set(new_packages)
if old_packages[name].version != new_packages[name].version
]
return added, removed, changed
def main():
args = parse_args()
added, removed, changed = calculate_diff(args.old_lockfile, args.new_lockfile)
print("Added packages:")
for pkg in added:
print(f" - {pkg.name} ({pkg.new_version})")
print("\nRemoved packages:")
for pkg in removed:
print(f" - {pkg.name} ({pkg.old_version})")
print("\nChanged packages:")
for pkg in changed:
print(f" - {pkg.name}: {pkg.old_version} -> {pkg.new_version}")
if __name__ == "__main__":
main()
| PackageDiff |
python | keras-team__keras | keras/src/ops/numpy_test.py | {
"start": 64584,
"end": 85599
} | class ____(testing.TestCase):
def test_mean(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.mean(x).shape, ())
def test_all(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.all(x).shape, ())
def test_any(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.any(x).shape, ())
def test_trapezoid(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.trapezoid(x).shape, (2,))
def test_var(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.var(x).shape, ())
def test_sum(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sum(x).shape, ())
def test_amax(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.amax(x).shape, ())
def test_amin(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.amin(x).shape, ())
def test_square(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.square(x).shape, (2, 3))
def test_negative(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.negative(x).shape, (2, 3))
def test_abs(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.abs(x).shape, (2, 3))
def test_absolute(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.absolute(x).shape, (2, 3))
def test_squeeze(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.squeeze(x).shape, (2, 3))
x = KerasTensor((2, 1, 3))
self.assertEqual(knp.squeeze(x).shape, (2, 3))
self.assertEqual(knp.squeeze(x, axis=1).shape, (2, 3))
self.assertEqual(knp.squeeze(x, axis=-2).shape, (2, 3))
with self.assertRaises(ValueError):
knp.squeeze(x, axis=0)
# Multiple axes
x = KerasTensor((2, 1, 1, 1))
self.assertEqual(knp.squeeze(x, (1, 2)).shape, (2, 1))
self.assertEqual(knp.squeeze(x, (-1, -2)).shape, (2, 1))
self.assertEqual(knp.squeeze(x, (1, 2, 3)).shape, (2,))
self.assertEqual(knp.squeeze(x, (-1, 1)).shape, (2, 1))
def test_transpose(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.transpose(x).shape, (3, 2))
def test_arccos(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arccos(x).shape, (2, 3))
def test_arccosh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arccosh(x).shape, (2, 3))
def test_arcsin(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arcsin(x).shape, (2, 3))
def test_arcsinh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arcsinh(x).shape, (2, 3))
def test_arctan(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arctan(x).shape, (2, 3))
def test_arctanh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.arctanh(x).shape, (2, 3))
def test_argmax(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.argmax(x).shape, ())
self.assertEqual(knp.argmax(x, keepdims=True).shape, (2, 3))
def test_argmin(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.argmin(x).shape, ())
self.assertEqual(knp.argmin(x, keepdims=True).shape, (2, 3))
def test_argsort(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.argsort(x).shape, (2, 3))
self.assertEqual(knp.argsort(x, axis=None).shape, (6,))
def test_array(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.array(x).shape, (2, 3))
def test_average(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.average(x).shape, ())
def test_bitwise_invert(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_invert(x).shape, (2, 3))
# bitwise_not is same as bitwise_invert
def test_broadcast_to(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.broadcast_to(x, (2, 2, 3)).shape, (2, 2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((3, 3))
knp.broadcast_to(x, (2, 2, 3))
def test_cbrt(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.cbrt(x).shape, (2, 3))
def test_ceil(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.ceil(x).shape, (2, 3))
def test_clip(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.clip(x, 1, 2).shape, (2, 3))
def test_concatenate(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.concatenate([x, y]).shape, (4, 3))
self.assertEqual(knp.concatenate([x, y], axis=1).shape, (2, 6))
with self.assertRaises(ValueError):
self.assertEqual(knp.concatenate([x, y], axis=None).shape, (None,))
def test_conjugate(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.conjugate(x).shape, (2, 3))
def test_conj(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.conj(x).shape, (2, 3))
def test_copy(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.copy(x).shape, (2, 3))
def test_cos(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.cos(x).shape, (2, 3))
def test_cosh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.cosh(x).shape, (2, 3))
def test_count_nonzero(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.count_nonzero(x).shape, ())
def test_cumprod(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.cumprod(x).shape, (6,))
def test_cumsum(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.cumsum(x).shape, (6,))
def test_deg2rad(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.deg2rad(x).shape, (2, 3))
def test_diag(self):
x = KerasTensor((3,))
self.assertEqual(knp.diag(x).shape, (3, 3))
self.assertEqual(knp.diag(x, k=3).shape, (6, 6))
self.assertEqual(knp.diag(x, k=-2).shape, (5, 5))
x = KerasTensor((3, 5))
self.assertEqual(knp.diag(x).shape, (3,))
self.assertEqual(knp.diag(x, k=3).shape, (2,))
self.assertEqual(knp.diag(x, k=-2).shape, (1,))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3, 4))
knp.diag(x)
def test_diagflat(self):
x = KerasTensor((3,))
self.assertEqual(knp.diagflat(x).shape, (3, 3))
self.assertEqual(knp.diagflat(x, k=1).shape, (4, 4))
self.assertEqual(knp.diagflat(x, k=-1).shape, (4, 4))
x = KerasTensor((2, 3))
self.assertEqual(knp.diagflat(x).shape, (6, 6))
self.assertEqual(knp.diagflat(x, k=1).shape, (7, 7))
self.assertEqual(knp.diagflat(x, k=-1).shape, (7, 7))
x = KerasTensor((None, 3))
self.assertEqual(knp.diagflat(x).shape, (None, None))
x = KerasTensor(())
self.assertEqual(knp.diagflat(x).shape, (1, 1))
def test_diagonal(self):
x = KerasTensor((3, 3))
self.assertEqual(knp.diagonal(x).shape, (3,))
self.assertEqual(knp.diagonal(x, offset=1).shape, (2,))
x = KerasTensor((3, 5, 5))
self.assertEqual(knp.diagonal(x).shape, (5, 3))
with self.assertRaises(ValueError):
x = KerasTensor((3,))
knp.diagonal(x)
def test_diff(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.diff(x).shape, (2, 2))
self.assertEqual(knp.diff(x, n=2).shape, (2, 1))
self.assertEqual(knp.diff(x, n=3).shape, (2, 0))
self.assertEqual(knp.diff(x, n=4).shape, (2, 0))
self.assertEqual(knp.diff(x, axis=0).shape, (1, 3))
self.assertEqual(knp.diff(x, n=2, axis=0).shape, (0, 3))
self.assertEqual(knp.diff(x, n=3, axis=0).shape, (0, 3))
def test_dot(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 2))
z = KerasTensor((4, 3, 2))
self.assertEqual(knp.dot(x, y).shape, (2, 2))
self.assertEqual(knp.dot(x, 2).shape, (2, 3))
self.assertEqual(knp.dot(x, z).shape, (2, 4, 2))
x = KerasTensor((5,))
y = KerasTensor((5,))
self.assertEqual(knp.dot(x, y).shape, ())
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
knp.dot(x, y)
def test_empty_like(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.empty_like(x).shape, (2, 3))
self.assertEqual(knp.empty_like(x).dtype, x.dtype)
def test_exp(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.exp(x).shape, (2, 3))
def test_exp2(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.exp2(x).shape, (2, 3))
def test_expand_dims(self):
x = KerasTensor((2, 3, 4))
self.assertEqual(knp.expand_dims(x, 0).shape, (1, 2, 3, 4))
self.assertEqual(knp.expand_dims(x, 1).shape, (2, 1, 3, 4))
self.assertEqual(knp.expand_dims(x, -2).shape, (2, 3, 1, 4))
# Multiple axes
self.assertEqual(knp.expand_dims(x, (1, 2)).shape, (2, 1, 1, 3, 4))
self.assertEqual(knp.expand_dims(x, (-1, -2)).shape, (2, 3, 4, 1, 1))
self.assertEqual(knp.expand_dims(x, (-1, 1)).shape, (2, 1, 3, 4, 1))
def test_expm1(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.expm1(x).shape, (2, 3))
def test_flip(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.flip(x).shape, (2, 3))
def test_floor(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.floor(x).shape, (2, 3))
def test_get_item(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.get_item(x, 1).shape, (3,))
x = KerasTensor((5, 3, 2))
self.assertEqual(knp.get_item(x, 3).shape, (3, 2))
x = KerasTensor(
[
2,
]
)
self.assertEqual(knp.get_item(x, 0).shape, ())
def test_hstack(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.hstack([x, y]).shape, (2, 6))
def test_imag(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.imag(x).shape, (2, 3))
def test_isfinite(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isfinite(x).shape, (2, 3))
def test_isinf(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isinf(x).shape, (2, 3))
def test_isnan(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isnan(x).shape, (2, 3))
def test_isneginf(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isneginf(x).shape, (2, 3))
def test_isposinf(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isposinf(x).shape, (2, 3))
def test_isreal(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.isreal(x).shape, (2, 3))
def test_log(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.log(x).shape, (2, 3))
def test_log10(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.log10(x).shape, (2, 3))
def test_log1p(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.log1p(x).shape, (2, 3))
def test_log2(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.log2(x).shape, (2, 3))
def test_logaddexp(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.logaddexp(x, x).shape, (2, 3))
def test_logaddexp2(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.logaddexp2(x, x).shape, (2, 3))
def test_logical_not(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.logical_not(x).shape, (2, 3))
def test_max(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.max(x).shape, ())
def test_median(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.median(x).shape, ())
x = KerasTensor((2, 3, 3))
self.assertEqual(knp.median(x, axis=1).shape, (2, 3))
self.assertEqual(knp.median(x, axis=1, keepdims=True).shape, (2, 1, 3))
def test_meshgrid(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
z = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.meshgrid(x, y)[0].shape, (24, 6))
self.assertEqual(knp.meshgrid(x, y)[1].shape, (24, 6))
self.assertEqual(knp.meshgrid(x, y, indexing="ij")[0].shape, (6, 24))
self.assertEqual(
knp.meshgrid(x, y, z, indexing="ij")[0].shape, (6, 24, 120)
)
with self.assertRaises(ValueError):
knp.meshgrid(x, y, indexing="kk")
def test_moveaxis(self):
x = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.moveaxis(x, 0, -1).shape, (3, 4, 5, 2))
self.assertEqual(knp.moveaxis(x, -1, 0).shape, (5, 2, 3, 4))
self.assertEqual(knp.moveaxis(x, [0, 1], [-1, -2]).shape, (4, 5, 3, 2))
self.assertEqual(knp.moveaxis(x, [0, 1], [1, 0]).shape, (3, 2, 4, 5))
self.assertEqual(knp.moveaxis(x, [0, 1], [-2, -1]).shape, (4, 5, 2, 3))
def test_ndim(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.ndim(x).shape, (2,))
def test_ones_like(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.ones_like(x).shape, (2, 3))
self.assertEqual(knp.ones_like(x).dtype, x.dtype)
def test_zeros_like(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.zeros_like(x).shape, (2, 3))
self.assertEqual(knp.zeros_like(x).dtype, x.dtype)
def test_pad(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.pad(x, 1).shape, (4, 5))
self.assertEqual(knp.pad(x, (1, 2)).shape, (5, 6))
self.assertEqual(knp.pad(x, ((1, 2), (3, 4))).shape, (5, 10))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
knp.pad(x, ((1, 2), (3, 4), (5, 6)))
def test_prod(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.prod(x).shape, ())
self.assertEqual(knp.prod(x, axis=0).shape, (3,))
self.assertEqual(knp.prod(x, axis=1).shape, (2,))
def test_ravel(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.ravel(x).shape, (6,))
def test_unravel_index(self):
x = KerasTensor((6,))
indices = knp.unravel_index(x, (2, 3))
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0].shape, (6,))
self.assertEqual(indices[1].shape, (6,))
x = KerasTensor((2, 3))
indices = knp.unravel_index(x, (3, 4))
self.assertEqual(len(indices), 2)
self.assertEqual(indices[0].shape, (2, 3))
self.assertEqual(indices[1].shape, (2, 3))
def test_real(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.real(x).shape, (2, 3))
def test_reciprocal(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.reciprocal(x).shape, (2, 3))
def test_repeat(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.repeat(x, 2).shape, (12,))
self.assertEqual(knp.repeat(x, [2]).shape, (12,))
self.assertEqual(knp.repeat(x, 3, axis=1).shape, (2, 9))
self.assertEqual(knp.repeat(x, [1, 2], axis=0).shape, (3, 3))
with self.assertRaises(ValueError):
knp.repeat(x, [1, 1])
with self.assertRaises(ValueError):
knp.repeat(x, [1, 1, 1], axis=0)
def test_reshape(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.reshape(x, (3, 2)).shape, (3, 2))
self.assertEqual(knp.reshape(x, (3, -1)).shape, (3, 2))
self.assertEqual(knp.reshape(x, (6,)).shape, (6,))
self.assertEqual(knp.reshape(x, (-1,)).shape, (6,))
def test_roll(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.roll(x, 1).shape, (2, 3))
self.assertEqual(knp.roll(x, 1, axis=1).shape, (2, 3))
self.assertEqual(knp.roll(x, 1, axis=0).shape, (2, 3))
def test_round(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.round(x).shape, (2, 3))
def test_sign(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sign(x).shape, (2, 3))
def test_signbit(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.signbit(x).shape, (2, 3))
def test_sin(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sin(x).shape, (2, 3))
def test_sinh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sinh(x).shape, (2, 3))
def test_size(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.size(x).shape, ())
def test_sort(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sort(x).shape, (2, 3))
self.assertEqual(knp.sort(x, axis=1).shape, (2, 3))
self.assertEqual(knp.sort(x, axis=0).shape, (2, 3))
def test_split(self):
x = KerasTensor((2, 3))
self.assertEqual(len(knp.split(x, 2)), 2)
self.assertEqual(knp.split(x, 2)[0].shape, (1, 3))
self.assertEqual(knp.split(x, 3, axis=1)[0].shape, (2, 1))
self.assertEqual(len(knp.split(x, [1, 3], axis=1)), 3)
self.assertEqual(knp.split(x, [1, 3], axis=1)[0].shape, (2, 1))
self.assertEqual(knp.split(x, [1, 3], axis=1)[1].shape, (2, 2))
self.assertEqual(knp.split(x, [1, 3], axis=1)[2].shape, (2, 0))
with self.assertRaises(ValueError):
knp.split(x, 2, axis=1)
def test_sqrt(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.sqrt(x).shape, (2, 3))
def test_stack(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.stack([x, y]).shape, (2, 2, 3))
self.assertEqual(knp.stack([x, y], axis=-1).shape, (2, 3, 2))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((3, 3))
knp.stack([x, y])
def test_std(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.std(x).shape, ())
def test_swapaxes(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.swapaxes(x, 0, 1).shape, (3, 2))
def test_tan(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.tan(x).shape, (2, 3))
def test_tanh(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.tanh(x).shape, (2, 3))
def test_tile(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.tile(x, 2).shape, (2, 6))
self.assertEqual(knp.tile(x, [2]).shape, (2, 6))
self.assertEqual(knp.tile(x, [1, 2]).shape, (2, 6))
self.assertEqual(knp.tile(x, [2, 1, 2]).shape, (2, 2, 6))
def test_trace(self):
x = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.trace(x).shape, (4, 5))
self.assertEqual(knp.trace(x, axis1=2, axis2=3).shape, (2, 3))
def test_tril(self):
x = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.tril(x).shape, (2, 3, 4, 5))
self.assertEqual(knp.tril(x, k=1).shape, (2, 3, 4, 5))
self.assertEqual(knp.tril(x, k=-1).shape, (2, 3, 4, 5))
def test_triu(self):
x = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.triu(x).shape, (2, 3, 4, 5))
self.assertEqual(knp.triu(x, k=1).shape, (2, 3, 4, 5))
self.assertEqual(knp.triu(x, k=-1).shape, (2, 3, 4, 5))
def test_trunc(self):
x = KerasTensor((2, 3, 4, 5))
self.assertEqual(knp.trunc(x).shape, (2, 3, 4, 5))
def test_vstack(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.vstack([x, y]).shape, (4, 3))
def test_argpartition(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.argpartition(x, 3).shape, (2, 3))
self.assertEqual(knp.argpartition(x, 1, axis=1).shape, (2, 3))
with self.assertRaises(ValueError):
knp.argpartition(x, (1, 3))
def test_angle(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.angle(x).shape, (2, 3))
def test_view(self):
x = knp.array(KerasTensor((2, 3)), dtype="int32")
self.assertEqual(knp.view(x, dtype="uint32").shape, (2, 3))
self.assertEqual(knp.view(x, dtype="uint32").dtype, "uint32")
x = knp.array(KerasTensor((2, 3)), dtype="int32")
self.assertEqual(knp.view(x, dtype="int16").shape, (2, 6))
self.assertEqual(knp.view(x, dtype="int16").dtype, "int16")
x = knp.array(KerasTensor((2, 4)), dtype="int16")
self.assertEqual(knp.view(x, dtype="int32").shape, (2, 2))
self.assertEqual(knp.view(x, dtype="int32").dtype, "int32")
def test_array_split(self):
x = KerasTensor((8, 4))
splits = knp.array_split(x, 3, axis=0)
self.assertEqual(len(splits), 3)
self.assertEqual(splits[0].shape, (3, 4))
self.assertEqual(splits[1].shape, (3, 4))
self.assertEqual(splits[2].shape, (2, 4))
| NumpyOneInputOpsStaticShapeTest |
python | django__django | tests/foreign_object/models/article.py | {
"start": 152,
"end": 652
} | class ____(ForwardManyToOneDescriptor):
"""
The set of articletranslation should not set any local fields.
"""
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.field.name)
self.field.set_cached_value(instance, value)
if value is not None and not self.field.remote_field.multiple:
self.field.remote_field.set_cached_value(value, instance)
| ArticleTranslationDescriptor |
python | encode__django-rest-framework | tests/test_throttling.py | {
"start": 864,
"end": 953
} | class ____(UserRateThrottle):
rate = '6/min'
scope = 'minutes'
| User6MinRateThrottle |
python | wandb__wandb | tests/unit_tests/test_lib/test_fsm.py | {
"start": 203,
"end": 463
} | class ____(TrackCalls):
def __init__(self, calls):
super().__init__(calls)
def on_state(self, inputs) -> None:
self._calls.append("A:on_state")
def to_b(self, inputs) -> bool:
self._calls.append("to_b")
return True
| A |
python | kamyu104__LeetCode-Solutions | Python/missing-ranges.py | {
"start": 29,
"end": 749
} | class ____(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
def getRange(lower, upper):
if lower == upper:
return "{}".format(lower)
else:
return "{}->{}".format(lower, upper)
ranges = []
pre = lower - 1
for i in xrange(len(nums) + 1):
if i == len(nums):
cur = upper + 1
else:
cur = nums[i]
if cur - pre >= 2:
ranges.append(getRange(pre + 1, cur - 1))
pre = cur
return ranges
| Solution |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 8998,
"end": 9196
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
value_set: List[DiagnosticTag] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True)
| PublishDiagnosticsClientTagSupport |
python | pytorch__pytorch | test/distributed/tensor/test_random_ops.py | {
"start": 12154,
"end": 26999
} | class ____(DTensorTestBase):
@with_comms
@skip_unless_torch_gpu
def test_rng_tracker_init(self):
torch.manual_seed(self.rank)
seed_local = (
torch.zeros_like(torch.empty(1), device=self.device_type)
+ torch.initial_seed()
)
torch.distributed.broadcast(seed_local, src=0)
# if local tensor, it should automatically reconcile after the broadcast
# since all virtual ranks should have rank 0's initial_seed()
seed_from_rank_0 = seed_local
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
# seed synchronization now does NOT happen after the first `distribute_tensor`
# call
dt = distribute_tensor(
torch.empty([self.world_size], device=self.device_type),
device_mesh,
[Shard(0)],
)
self.assertTrue(random._rng_tracker is None)
# seed synchronization only happens after `manual_seed` or the first DTensor
# random op call
dt.uniform_(0, 1)
# We do not maintain the copy of the seed in dtensor, but we do mutate the global rng state
# since we now always pull it fresh from the local device generator
self.assertEqual(
seed_from_rank_0, get_generator_seed_for_device_type(self.device_type)
)
@with_comms
@skip_unless_torch_gpu
def test_manual_seed(self):
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
# in the case of calling ``torch.distributed.tensor._random.manual_seed``,
# no seed synchronization should happen since we fully trust the users' input
# and will not override the value.
comm_mode = CommDebugMode()
with comm_mode:
# Test 1: set different seed on different ranks
# RNG tracker should not be initialized until DTensor ``manual_seed``
# is called.
self.assertTrue(random._rng_tracker is None)
manual_seed(self.rank, device_mesh)
# RNG tracker should already be initialized
self.assertTrue(random._rng_tracker is not None)
self.assertEqual(
self.rank, get_generator_seed_for_device_type(self.device_type)
)
# Test 2: set same seed on different ranks
manual_seed(1234, device_mesh)
self.assertEqual(1234, get_generator_seed_for_device_type(self.device_type))
self.assertEqual(comm_mode.get_total_counts(), 0)
@with_comms
@skip_unless_torch_gpu
def test_manual_seed_submesh(self):
@maybe_run_for_local_tensor
def compute_rankwise_if_local_tensor(rank):
# the current rank is not a part of the mesh
single_rank_device_mesh = DeviceMesh(
self.device_type, [(rank + 1) % self.world_size], _rank=rank
)
with self.assertRaisesRegex(
RuntimeError,
"manual_seed requires the current rank to be a part of the device mesh",
):
manual_seed(rank, single_rank_device_mesh)
compute_rankwise_if_local_tensor(self.rank)
@with_comms
@skip_unless_torch_gpu
def test_pipeline_parallel_manual_seed(self):
# This test is to verify the `manual_seed` API works as expected in the
# pipeline parallel setting.
world_mesh = init_device_mesh(
self.device_type,
(self.world_size // 2, 2),
mesh_dim_names=("pp", "spmd"),
)
pp_mesh = world_mesh["pp"]
pp_rank = pp_mesh.get_local_rank() # rank 0,1 = 0; rank 2,3 = 1
spmd_mesh = world_mesh["spmd"]
# set the seed for each pipeline stage to 123 + pp_rank
manual_seed(123 + pp_rank, spmd_mesh)
# dtensor no longer stores a copy of the seed, but it mutates the device's generator so we can check that
self.assertEqual(
123 + pp_rank, get_generator_seed_for_device_type(self.device_type)
)
# mimic initializing a model weight sharded on the SPMD mesh
spmd_dtensor = torch.distributed.tensor.ones(
2 * spmd_mesh.size(), 2, device_mesh=spmd_mesh, placements=[Shard(0)]
)
torch.nn.init.normal_(spmd_dtensor)
# gather all the shards to compare initialization results
WORLD = torch.distributed.group.WORLD
assert WORLD is not None
tensor_gather = funcol.all_gather_tensor(
spmd_dtensor.to_local(),
gather_dim=0,
group=WORLD,
)
# verify the weights are initialized differently on all ranks
for other_rank in range(self.world_size):
if self.rank != other_rank:
self.assertNotEqual(
spmd_dtensor,
tensor_gather[2 * other_rank : 2 * (other_rank + 1), :],
)
@with_comms
@skip_unless_torch_gpu
def test_deterministic_dropout_1d(self):
# test suite sets each rank's seed to the same value but in actual
# execution the default random seed will be different (a random value).
# The DTensor random ops will use the same random seed even though the
# torch random generator keeps different seeds on ranks.
torch.manual_seed(self.rank)
# TODO: add test before/after enabling distribute region
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
size = [4, 4]
dtensor = distribute_tensor(
torch.empty(*size, device=self.device_type), device_mesh, [Shard(1)]
)
# a random op call shifts the offset
dtensor.uniform_(0, 1)
# the dtensor is now replicate on all ranks
dtensor = dtensor.redistribute(device_mesh, [Replicate()])
dropout = torch.nn.Dropout(p=0.2)
dtensor = dropout(dtensor)
# allgather the local tensors
local_tensor = funcol.all_gather_tensor(
dtensor.to_local(), gather_dim=0, group=(device_mesh, 0)
)
@maybe_run_for_local_tensor
def compute_rankwise_if_local_tensor(local_tensor, rank):
# compare with local tensors from other ranks
self_slice = slice(4 * rank, 4 * rank + 4)
for other_rank in range(self.world_size):
if rank != other_rank:
# other rank should have an identical local tensor
other_slice = slice(4 * other_rank, 4 * other_rank + 4)
self.assertEqual(
local_tensor[self_slice, :],
local_tensor[other_slice, :],
)
compute_rankwise_if_local_tensor(local_tensor, self.rank)
@with_comms
@skip_unless_torch_gpu
def test_deterministic_rand_1d(self):
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
size = [4, 4 * self.world_size]
for fn in [
torch.distributed.tensor.rand,
torch.distributed.tensor.randn,
]:
dtensor = fn(size, device_mesh=device_mesh, placements=[Shard(1)])
local_tensor = funcol.all_gather_tensor(
dtensor.to_local(), gather_dim=0, group=(device_mesh, 0)
)
@maybe_run_for_local_tensor
def compute_rankwise_if_local_tensor(local_tensor, rank):
# compare with local tensors from other ranks
self_slice = slice(4 * rank, 4 * rank + 4)
for other_rank in range(self.world_size):
if rank != other_rank:
# other rank should have an identical local tensor for replicate placement
other_slice = slice(4 * other_rank, 4 * other_rank + 4)
self.assertNotEqual(
local_tensor[self_slice, :],
local_tensor[other_slice, :],
)
compute_rankwise_if_local_tensor(local_tensor, self.rank)
# we should set manual seed to the same value on all SPMD ranks
torch.manual_seed(0)
dtensor = fn(size, device_mesh=device_mesh, placements=[Replicate()])
local_tensor = funcol.all_gather_tensor(
dtensor.to_local(), gather_dim=0, group=(device_mesh, 0)
)
@maybe_run_for_local_tensor
def compute_rankwise_if_local_tensor(local_tensor, rank):
# compare with local tensors from other ranks
self_slice = slice(4 * rank, 4 * rank + 4)
for other_rank in range(self.world_size):
if rank != other_rank:
# other rank should have an identical local tensor for replicate placement
other_slice = slice(4 * other_rank, 4 * other_rank + 4)
self.assertEqual(
local_tensor[self_slice, :],
local_tensor[other_slice, :],
)
compute_rankwise_if_local_tensor(local_tensor, self.rank)
@with_comms
@skip_if_lt_x_gpu(4)
def test_deterministic_uniform_2d(self):
mesh = torch.arange(self.world_size).reshape(2, 2)
device_mesh = DeviceMesh(self.device_type, mesh)
dtensor = distribute_tensor(
torch.empty(
*[self.world_size for _ in mesh.size()], device=self.device_type
),
device_mesh,
[Replicate(), Replicate()],
)
placements_list = [ # this list of placements should be enough to cover
[Shard(0), Shard(1)],
[Shard(1), Shard(0)],
[Shard(0), Replicate()],
[Replicate(), Shard(0)],
[Shard(1), Replicate()],
[Replicate(), Shard(1)],
[Replicate(), Replicate()],
]
shard_index_list = [
{0: 0, 1: 1, 2: 2, 3: 3},
{0: 0, 1: 2, 2: 1, 3: 3},
{0: 0, 1: 0, 2: 1, 3: 1},
{0: 0, 1: 1, 2: 0, 3: 1},
{0: 0, 1: 0, 2: 1, 3: 1},
{0: 0, 1: 1, 2: 0, 3: 1},
{0: 0, 1: 0, 2: 0, 3: 0},
]
coordinate = device_mesh.get_coordinate()
assert coordinate is not None
for placements, shard_index in zip(placements_list, shard_index_list):
dtensor = dtensor.redistribute(device_mesh, placements)
# random op call
dtensor.uniform_(0, 1)
# check shard information is correct
shard_coord = [
coordinate[mesh_dim] if mesh_dim >= 0 else 0
for mesh_dim in dtensor._spec.dim_map
]
shard_size = [
device_mesh.size(mesh_dim) if mesh_dim >= 0 else 1
for mesh_dim in dtensor._spec.dim_map
]
shard_linear_idx = random._rng_tracker._calc_shard_linear_idx(
shard_coord, shard_size
)
@maybe_run_for_local_tensor
def check_shard_index(shard_linear_idx, rank):
self.assertEqual(shard_linear_idx, shard_index[rank])
check_shard_index(shard_linear_idx, self.rank)
# compute local size and offset
_, local_shard_offset = compute_local_shape_and_global_offset(
dtensor.shape, device_mesh, placements
)
# get the local shard size and local shard offset for each shard
# local_shard_list_on_dim[i] has the list of all shards on that dim
# as a tuple (local_shard_offset, local_shard_size)
dtensor_shape = dtensor.shape
local_shard_list_on_dim: list[list[tuple[int, int]]] = [
[(0, l)] for l in dtensor_shape
]
for idx, placement in enumerate(placements):
if isinstance(placement, Shard):
mesh_dim_size = device_mesh.size(idx)
shard_dim = placement.dim
local_shard_list_on_dim[shard_dim] = []
for shard_idx_on_dim in range(mesh_dim_size):
(
shard_size,
shard_offset,
) = placement._local_shard_size_and_offset(
dtensor_shape[shard_dim],
mesh_dim_size,
shard_idx_on_dim,
)
local_shard_list_on_dim[shard_dim].append(
(not_none(shard_offset), shard_size)
)
local_shard_comb = itertools.product(*local_shard_list_on_dim)
# the local shard
local_tensor = dtensor.to_local()
# allgather the local tensors
full_tensor = dtensor.full_tensor()
full_tensor = (
full_tensor.reconcile()
if isinstance(full_tensor, LocalTensor)
else full_tensor
)
@maybe_run_for_local_tensor
def blockwise_iter_if_localtensor(local_tensor, local_shard_offset):
# compare local tensor with each other shard
for other_local_shard in local_shard_comb:
other_local_shard_offset, _ = zip(*other_local_shard)
slice_idx = [
slice(offset, offset + size)
for offset, size in other_local_shard
]
if local_shard_offset == other_local_shard_offset:
self.assertEqual(full_tensor[tuple(slice_idx)], local_tensor)
else:
self.assertNotEqual(full_tensor[tuple(slice_idx)], local_tensor)
blockwise_iter_if_localtensor(local_tensor, local_shard_offset)
def test_philox_state_seed_roundtrip(self):
"""
Test that _PhiloxState seed can be read and re-set without error.
This test addresses the issue where reading a seed value from the state
(which uses uint64 view) and then re-setting it would fail with:
OverflowError: can't convert negative int to unsigned
The fix ensures the seed getter uses uint64 view, preventing negative
values from appearing when the high bit is set.
"""
from torch.distributed.tensor._random import _PhiloxState
state = torch.zeros(16, dtype=torch.uint8, device="cpu")
philox = _PhiloxState(state)
test_seed = 2**63 + 42 # This has the sign bit set when viewed as int64
philox.seed = test_seed
philox.seed = philox.seed
| DistTensorRandomOpTest |
python | getsentry__sentry | tests/sentry/integrations/slack/webhooks/commands/test_link_team.py | {
"start": 676,
"end": 1301
} | class ____(SlackCommandsTest):
def setUp(self) -> None:
super().setUp()
self.link_user()
responses.add(
method=responses.POST,
url="https://slack.com/api/chat.postMessage",
body='{"ok": true}',
status=status.HTTP_200_OK,
content_type="application/json",
)
self.team_admin_user = self.create_user()
self.create_member(
team_roles=[(self.team, "admin")],
user=self.team_admin_user,
role="member",
organization=self.organization,
)
| SlackCommandsLinkTeamTestBase |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 1131,
"end": 3418
} | class ____(IntFlag):
"""
Table 3.20 User access permissions.
Table 22 of the 2.0 manual.
"""
R1 = 1
R2 = 2
PRINT = 4
MODIFY = 8
EXTRACT = 16
ADD_OR_MODIFY = 32
R7 = 64
R8 = 128
FILL_FORM_FIELDS = 256
EXTRACT_TEXT_AND_GRAPHICS = 512
ASSEMBLE_DOC = 1024
PRINT_TO_REPRESENTATION = 2048
R13 = 2**12
R14 = 2**13
R15 = 2**14
R16 = 2**15
R17 = 2**16
R18 = 2**17
R19 = 2**18
R20 = 2**19
R21 = 2**20
R22 = 2**21
R23 = 2**22
R24 = 2**23
R25 = 2**24
R26 = 2**25
R27 = 2**26
R28 = 2**27
R29 = 2**28
R30 = 2**29
R31 = 2**30
R32 = 2**31
@classmethod
def _is_reserved(cls, name: str) -> bool:
"""Check if the given name corresponds to a reserved flag entry."""
return name.startswith("R") and name[1:].isdigit()
@classmethod
def _is_active(cls, name: str) -> bool:
"""Check if the given reserved name defaults to 1 = active."""
return name not in {"R1", "R2"}
def to_dict(self) -> dict[str, bool]:
"""Convert the given flag value to a corresponding verbose name mapping."""
result: dict[str, bool] = {}
for name, flag in UserAccessPermissions.__members__.items():
if UserAccessPermissions._is_reserved(name):
continue
result[name.lower()] = (self & flag) == flag
return result
@classmethod
def from_dict(cls, value: dict[str, bool]) -> "UserAccessPermissions":
"""Convert the verbose name mapping to the corresponding flag value."""
value_copy = value.copy()
result = cls(0)
for name, flag in cls.__members__.items():
if cls._is_reserved(name):
# Reserved names have a required value. Use it.
if cls._is_active(name):
result |= flag
continue
is_active = value_copy.pop(name.lower(), False)
if is_active:
result |= flag
if value_copy:
raise ValueError(f"Unknown dictionary keys: {value_copy!r}")
return result
@classmethod
def all(cls) -> "UserAccessPermissions":
return cls((2**32 - 1) - cls.R1 - cls.R2)
| UserAccessPermissions |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.