language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/2502. Design Memory Allocator/2502.py | {
"start": 0,
"end": 716
} | class ____:
def __init__(self, n: int):
self.memory = [0] * n
self.mIDToIndices = [[] for _ in range(1001)]
def allocate(self, size: int, mID: int) -> int:
consecutiveFree = 0
for i, m in enumerate(self.memory):
consecutiveFree = consecutiveFree + 1 if m == 0 else 0
if consecutiveFree == size:
for j in range(i - consecutiveFree + 1, i + 1):
self.memory[j] = mID
self.mIDToIndices[mID].append(j)
return i - consecutiveFree + 1
return -1
def free(self, mID: int) -> int:
indices = self.mIDToIndices[mID]
freedUnits = len(indices)
for index in indices:
self.memory[index] = 0
indices.clear()
return freedUnits
| Allocator |
python | has2k1__plotnine | plotnine/stats/stat_pointdensity.py | {
"start": 292,
"end": 2368
} | class ____(stat):
"""
Compute density estimation for each point
{usage}
Parameters
----------
{common_parameters}
package : Literal["statsmodels", "scipy", "sklearn"], default="statsmodels"
Package whose kernel density estimation to use.
kde_params : dict, default=None
Keyword arguments to pass on to the kde class.
See Also
--------
plotnine.geom_density_2d : The default `geom` for this `stat`.
statsmodels.nonparametric.kde.KDEMultivariate
scipy.stats.gaussian_kde
sklearn.neighbors.KernelDensity
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"density" # Computed density at a point
```
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_AES = {"color": after_stat("density")}
DEFAULT_PARAMS = {
"geom": "density_2d",
"position": "identity",
"na_rm": False,
"package": "statsmodels",
"kde_params": None,
}
CREATES = {"density"}
def setup_params(self, data):
params = self.params
if params["kde_params"] is None:
params["kde_params"] = {}
kde_params = params["kde_params"]
if params["package"] == "statsmodels":
params["package"] = "statsmodels-m"
if "var_type" not in kde_params:
x_type = get_var_type(data["x"])
y_type = get_var_type(data["y"])
kde_params["var_type"] = f"{x_type}{y_type}"
def compute_group(self, data, scales):
package = self.params["package"]
kde_params = self.params["kde_params"]
x = cast("FloatArray", data["x"].to_numpy())
y = cast("FloatArray", data["y"].to_numpy())
var_data = np.array([x, y]).T
density = kde(var_data, var_data, package, **kde_params)
data = pd.DataFrame(
{
"x": data["x"],
"y": data["y"],
"density": density.flatten(),
}
)
return data
| stat_pointdensity |
python | ipython__ipython | IPython/core/magics/script.py | {
"start": 2380,
"end": 2443
} | class ____(Exception):
pass
@magics_class
| RaiseAfterInterrupt |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 26783,
"end": 28986
} | class ____(ZoomPanBase):
"""Pan Axes with left mouse, zoom with right."""
default_keymap = property(lambda self: mpl.rcParams['keymap.pan'])
description = 'Pan axes with left mouse, zoom with right'
image = 'mpl-data/images/move'
cursor = cursors.MOVE
radio_group = 'default'
def __init__(self, *args):
super().__init__(*args)
self._id_drag = None
def _cancel_action(self):
self._button_pressed = None
self._xypress = []
self.figure.canvas.mpl_disconnect(self._id_drag)
self.toolmanager.messagelock.release(self)
self.figure.canvas.draw_idle()
def _press(self, event):
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.toolmanager.messagelock(self)
self._id_drag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
def _release(self, event):
if self._button_pressed is None:
self._cancel_action()
return
self.figure.canvas.mpl_disconnect(self._id_drag)
self.toolmanager.messagelock.release(self)
for a, _ind in self._xypress:
a.end_pan()
if not self._xypress:
self._cancel_action()
return
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
def _mouse_move(self, event):
for a, _ind in self._xypress:
# safer to use the recorded button at the _press than current
# button: # multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.toolmanager.canvas.draw_idle()
| ToolPan |
python | apache__airflow | providers/oracle/src/airflow/providers/oracle/transfers/oracle_to_oracle.py | {
"start": 1097,
"end": 3631
} | class ____(BaseOperator):
"""
Moves data from Oracle to Oracle.
:param oracle_destination_conn_id: destination Oracle connection.
:param destination_table: destination table to insert rows.
:param oracle_source_conn_id: :ref:`Source Oracle connection <howto/connection:oracle>`.
:param source_sql: SQL query to execute against the source Oracle
database. (templated)
:param source_sql_params: Parameters to use in sql query. (templated)
:param rows_chunk: number of rows per chunk to commit.
"""
template_fields: Sequence[str] = ("source_sql", "source_sql_params")
template_fields_renderers = {"source_sql": "sql", "source_sql_params": "py"}
ui_color = "#e08c8c"
def __init__(
self,
*,
oracle_destination_conn_id: str,
destination_table: str,
oracle_source_conn_id: str,
source_sql: str,
source_sql_params: dict | None = None,
rows_chunk: int = 5000,
**kwargs,
) -> None:
super().__init__(**kwargs)
if source_sql_params is None:
source_sql_params = {}
self.oracle_destination_conn_id = oracle_destination_conn_id
self.destination_table = destination_table
self.oracle_source_conn_id = oracle_source_conn_id
self.source_sql = source_sql
self.source_sql_params = source_sql_params
self.rows_chunk = rows_chunk
def _execute(self, src_hook, dest_hook, context) -> None:
with src_hook.get_conn() as src_conn:
cursor = src_conn.cursor()
self.log.info("Querying data from source: %s", self.oracle_source_conn_id)
cursor.execute(self.source_sql, self.source_sql_params)
target_fields = [field[0] for field in cursor.description]
rows_total = 0
for rows in iter(lambda: cursor.fetchmany(self.rows_chunk), []):
dest_hook.bulk_insert_rows(
self.destination_table, rows, target_fields=target_fields, commit_every=self.rows_chunk
)
rows_total += len(rows)
self.log.info("Total inserted: %s rows", rows_total)
self.log.info("Finished data transfer.")
cursor.close()
def execute(self, context: Context) -> None:
src_hook = OracleHook(oracle_conn_id=self.oracle_source_conn_id)
dest_hook = OracleHook(oracle_conn_id=self.oracle_destination_conn_id)
self._execute(src_hook, dest_hook, context)
| OracleToOracleOperator |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 59197,
"end": 62700
} | class ____(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
| MegatronBertForQuestionAnswering |
python | kamyu104__LeetCode-Solutions | Python/largest-magic-square.py | {
"start": 55,
"end": 1487
} | class ____(object):
def largestMagicSquare(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def get_sum(prefix, a, b):
return prefix[b+1]-prefix[a]
def check(grid, prefix_row, prefix_col, l, i, j):
diag, anti_diag = 0, 0
for d in xrange(l):
diag += grid[i+d][j+d]
anti_diag += grid[i+d][j+l-1-d]
if diag != anti_diag:
return False
for ni in xrange(i, i+l):
if diag != get_sum(prefix_row[ni], j, j+l-1):
return False
for nj in xrange(j, j+l):
if diag != get_sum(prefix_col[nj], i, i+l-1):
return False
return True
prefix_row = [[0]*(len(grid[0])+1) for _ in xrange(len(grid))]
prefix_col = [[0]*(len(grid)+1) for _ in xrange(len(grid[0]))]
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
prefix_row[i][j+1] = prefix_row[i][j] + grid[i][j]
prefix_col[j][i+1] = prefix_col[j][i] + grid[i][j]
for l in reversed(xrange(1, min(len(grid), len(grid[0]))+1)):
for i in xrange(len(grid)-(l-1)):
for j in xrange(len(grid[0])-(l-1)):
if check(grid, prefix_row, prefix_col, l, i, j):
return l
return 1
| Solution |
python | scikit-image__scikit-image | src/skimage/_shared/utils.py | {
"start": 16007,
"end": 16210
} | class ____(AttributeError):
"""Error from use of failed estimation instance
This error arises from attempts to use an instance of
:class:`FailedEstimation`.
"""
| FailedEstimationAccessError |
python | vyperlang__vyper | tests/evm_backends/abi_contract.py | {
"start": 10236,
"end": 12639
} | class ____:
"""A contract that has been deployed to the blockchain and created via an ABI."""
@property
def address(self) -> HexAddress:
assert self._address is not None
return self._address
def __init__(
self,
env: "BaseEnv",
name: str,
abi: dict,
functions: list[ABIFunction],
log_topics: list[ABILogTopic],
bytecode: Optional[bytes],
address: HexAddress,
filename: Optional[str] = None,
):
self.env = env
self._address = address # this can be overridden by subclasses
self.filename = filename
self.abi = abi
self._name = name
self._functions = functions
self.log_topics = log_topics
self.bytecode = bytecode
self._deployed_bytecode = self.env.get_code(address)
if not self._deployed_bytecode:
warn(f"Requested {self} but there is no bytecode at that address!", stacklevel=2)
overloads = defaultdict(list)
for f in functions:
overloads[f.name].append(f)
for name, group in overloads.items():
setattr(self, name, ABIOverload.create(group, self))
self._address = address
def marshal_to_python(self, result: bytes, abi_type: list[str]) -> list[Any]:
"""
Convert the output of a contract call to a Python object.
:param result: the computation result returned by `message_call`
:param abi_type: the ABI type of the return value.
"""
schema = f"({_format_abi_type(abi_type)})"
return abi_decode(schema, result)
def __repr__(self):
file_str = f" (file {self.filename})" if self.filename else ""
warn_str = "" if self._deployed_bytecode else " (WARNING: no bytecode at this address!)"
return f"<{self._name} interface at {self.address}{warn_str}>{file_str}"
def parse_log(self, log: "LogEntry") -> ABILog:
"""
Parse a log entry into an ABILog object.
:param log: the log entry to parse
"""
topic_id_str = log.topics[0]
topic_id = bytes.fromhex(topic_id_str.removeprefix("0x"))
for topic in self.log_topics:
if topic.topic_id == topic_id:
return topic.parse(log)
raise KeyError(f"Could not find event for log {topic_id_str}. Found {self.log_topics}")
| ABIContract |
python | kamyu104__LeetCode-Solutions | Python/palindrome-pairs.py | {
"start": 4074,
"end": 4443
} | class ____(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
res = []
trie = TrieNode()
for i in xrange(len(words)):
trie.insert(words[i], i)
for i in xrange(len(words)):
trie.find(words[i], i, res)
return res
| Solution_MLE |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 4749,
"end": 4937
} | class ____(graphene.Union):
class Meta:
types = (GrapheneRunIds, GrapheneInvalidPipelineRunsFilterError, GraphenePythonError)
name = "RunIdsOrError"
| GrapheneRunIdsOrError |
python | django__django | tests/lookup/test_decimalfield.py | {
"start": 107,
"end": 1467
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Product.objects.create(name="Product1", qty_target=10)
Stock.objects.create(product=cls.p1, qty_available=5)
Stock.objects.create(product=cls.p1, qty_available=6)
cls.p2 = Product.objects.create(name="Product2", qty_target=10)
Stock.objects.create(product=cls.p2, qty_available=5)
Stock.objects.create(product=cls.p2, qty_available=5)
cls.p3 = Product.objects.create(name="Product3", qty_target=10)
Stock.objects.create(product=cls.p3, qty_available=5)
Stock.objects.create(product=cls.p3, qty_available=4)
cls.queryset = Product.objects.annotate(
qty_available_sum=Sum("stock__qty_available"),
).annotate(qty_needed=F("qty_target") - F("qty_available_sum"))
def test_gt(self):
qs = self.queryset.filter(qty_needed__gt=0)
self.assertCountEqual(qs, [self.p3])
def test_gte(self):
qs = self.queryset.filter(qty_needed__gte=0)
self.assertCountEqual(qs, [self.p2, self.p3])
def test_lt(self):
qs = self.queryset.filter(qty_needed__lt=0)
self.assertCountEqual(qs, [self.p1])
def test_lte(self):
qs = self.queryset.filter(qty_needed__lte=0)
self.assertCountEqual(qs, [self.p1, self.p2])
| DecimalFieldLookupTests |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 16395,
"end": 17687
} | class ____(Msg):
keys: list[PublicKeyMsg]
def __iter__(self) -> t.Iterator[PublicKeyMsg]:
yield from self.keys
def __len__(self) -> int:
return len(self.keys)
@classmethod
def from_blob(cls, blob: memoryview | bytes) -> t.Self: ...
@classmethod
def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]:
args: list[PublicKeyMsg] = []
while blob:
prev_blob = blob
key_blob, key_blob_length, comment_blob = cls._consume_field(blob)
peek_key_algo, _length, _blob = cls._consume_field(key_blob)
pub_key_msg_cls = PublicKeyMsg.get_dataclass(KeyAlgo(bytes(peek_key_algo).decode('utf-8')))
_fv, comment_blob_length, blob = cls._consume_field(comment_blob)
key_plus_comment = prev_blob[4 : (4 + key_blob_length) + (4 + comment_blob_length)]
args.append(pub_key_msg_cls.from_blob(key_plus_comment))
return cls(args), b""
@staticmethod
def _consume_field(blob: memoryview | bytes) -> tuple[memoryview | bytes, uint32, memoryview | bytes]:
length = uint32.from_blob(blob[:4])
blob = blob[4:]
data, rest = _split_blob(blob, length)
return data, length, rest
| PublicKeyMsgList |
python | scipy__scipy | scipy/optimize/tests/test_minimize_constrained.py | {
"start": 3070,
"end": 4267
} | class ____:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.bounds = None
def fun(self, x):
return (2*(x[0]**2 + x[1]**2 - 1) - x[0],
np.array([4*x[0]-1, 4*x[1]]))
@property
def grad(self):
return True
def hess(self, x):
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[4*x[0], 4*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
| MaratosGradInFunc |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 50993,
"end": 51162
} | class ____(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type="hpu")
# Skips a test on XLA if the condition is true.
| skipHPUIf |
python | sqlalchemy__sqlalchemy | test/ext/test_automap.py | {
"start": 1168,
"end": 11653
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
FixtureTest.define_tables(metadata)
def test_relationship_o2m_default(self):
Base = automap_base(metadata=self.tables_test_metadata)
Base.prepare()
User = Base.classes.users
Address = Base.classes.addresses
a1 = Address(email_address="e1")
u1 = User(name="u1", addresses_collection=[a1])
assert a1.users is u1
def test_relationship_explicit_override_o2m(self):
Base = automap_base(metadata=self.tables_test_metadata)
prop = relationship("addresses", collection_class=set)
class User(Base):
__tablename__ = "users"
addresses_collection = prop
Base.prepare()
assert User.addresses_collection.property is prop
Address = Base.classes.addresses
a1 = Address(email_address="e1")
u1 = User(name="u1", addresses_collection={a1})
assert a1.user is u1
def test_prepare_from_subclass(self):
"""test #9367"""
Base = automap_base()
class User(Base):
__tablename__ = "users"
User.prepare(testing.db)
assert not hasattr(Base.classes, "users")
assert hasattr(Base.classes, "addresses")
def test_prepare_w_only(self):
Base = automap_base()
Base.prepare(
testing.db,
reflection_options={"only": ["users"], "resolve_fks": False},
)
assert hasattr(Base.classes, "users")
assert not hasattr(Base.classes, "addresses")
def test_prepare_call_multiple_times(self):
"""newly added in 2.0 as part of #5145"""
Base = automap_base()
Base.prepare(
testing.db,
reflection_options={"only": ["users"], "resolve_fks": False},
)
assert hasattr(Base.classes, "users")
assert not hasattr(Base.classes, "addresses")
um = Base.classes.users.__mapper__
Base.prepare(
testing.db,
reflection_options={"only": ["users"], "resolve_fks": False},
)
assert hasattr(Base.classes, "users")
assert not hasattr(Base.classes, "addresses")
is_(Base.classes.users.__mapper__, um)
Base.prepare(testing.db)
assert hasattr(Base.classes, "users")
assert hasattr(Base.classes, "addresses")
am = Base.classes.addresses.__mapper__
Base.prepare()
Base.prepare()
is_(Base.classes.users.__mapper__, um)
is_(Base.classes.addresses.__mapper__, am)
def test_prepare_call_dont_rely_on_reflected(self):
"""newly added in 2.0 as part of #5145"""
Base = automap_base()
Base.metadata.reflect(testing.db, only=["users"], resolve_fks=False)
Base.prepare(
testing.db,
reflection_options={"only": ["addresses"]},
)
# check that users was prepared also, even though it wasn't in
# the second reflection call
assert hasattr(Base.classes, "users")
assert hasattr(Base.classes, "addresses")
def test_exception_prepare_not_called(self):
Base = automap_base(metadata=self.tables_test_metadata)
class User(Base):
__tablename__ = "users"
s = Session()
assert_raises_message(
orm_exc.UnmappedClassError,
"Class test.ext.test_automap.User is a subclass of AutomapBase. "
r"Mappings are not produced until the .prepare\(\) method is "
"called on the class hierarchy.",
s.query,
User,
)
def test_relationship_explicit_override_m2o(self):
Base = automap_base(metadata=self.tables_test_metadata)
prop = relationship("users")
class Address(Base):
__tablename__ = "addresses"
users = prop
Base.prepare()
User = Base.classes.users
assert Address.users.property is prop
a1 = Address(email_address="e1")
u1 = User(name="u1", address_collection=[a1])
assert a1.users is u1
def test_relationship_self_referential(self):
Base = automap_base(metadata=self.tables_test_metadata)
Base.prepare()
Node = Base.classes.nodes
n1 = Node()
n2 = Node()
n1.nodes_collection.append(n2)
assert n2.nodes is n1
def test_prepare_accepts_optional_schema_arg(self):
"""
The underlying reflect call accepts an optional schema argument.
This is for determining which database schema to load.
This test verifies that prepare can accept an optional schema
argument and pass it to reflect.
"""
Base = automap_base(metadata=self.tables_test_metadata)
engine_mock = Mock()
with patch.object(Base.metadata, "reflect") as reflect_mock:
Base.prepare(autoload_with=engine_mock, schema="some_schema")
reflect_mock.assert_called_once_with(
engine_mock,
schema="some_schema",
extend_existing=True,
autoload_replace=False,
)
def test_prepare_defaults_to_no_schema(self):
"""
The underlying reflect call accepts an optional schema argument.
This is for determining which database schema to load.
This test verifies that prepare passes a default None if no schema is
provided.
"""
Base = automap_base(metadata=self.tables_test_metadata)
engine_mock = Mock()
with patch.object(Base.metadata, "reflect") as reflect_mock:
Base.prepare(autoload_with=engine_mock)
reflect_mock.assert_called_once_with(
engine_mock,
schema=None,
extend_existing=True,
autoload_replace=False,
)
def test_prepare_w_dialect_kwargs(self):
Base = automap_base(metadata=self.tables_test_metadata)
engine_mock = Mock()
with patch.object(Base.metadata, "reflect") as reflect_mock:
Base.prepare(
autoload_with=engine_mock,
reflection_options={"oracle_resolve_synonyms": True},
)
reflect_mock.assert_called_once_with(
engine_mock,
schema=None,
extend_existing=True,
autoload_replace=False,
oracle_resolve_synonyms=True,
)
def test_naming_schemes(self):
Base = automap_base(metadata=self.tables_test_metadata)
def classname_for_table(base, tablename, table):
return str("cls_" + tablename)
def name_for_scalar_relationship(
base, local_cls, referred_cls, constraint
):
return "scalar_" + referred_cls.__name__
def name_for_collection_relationship(
base, local_cls, referred_cls, constraint
):
return "coll_" + referred_cls.__name__
Base.prepare(
classname_for_table=classname_for_table,
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship,
)
User = Base.classes.cls_users
Address = Base.classes.cls_addresses
u1 = User()
a1 = Address()
u1.coll_cls_addresses.append(a1)
assert a1.scalar_cls_users is u1
def test_relationship_m2m(self):
Base = automap_base(metadata=self.tables_test_metadata)
Base.prepare()
Order, Item = Base.classes.orders, Base.classes["items"]
o1 = Order()
i1 = Item()
o1.items_collection.append(i1)
assert o1 in i1.orders_collection
def test_relationship_explicit_override_forwards_m2m(self):
Base = automap_base(metadata=self.tables_test_metadata)
class Order(Base):
__tablename__ = "orders"
items_collection = relationship(
"items", secondary="order_items", collection_class=set
)
Base.prepare()
Item = Base.classes["items"]
o1 = Order()
i1 = Item()
o1.items_collection.add(i1)
# it is 'order_collection' because the class name is
# "Order" !
assert isinstance(i1.order_collection, list)
assert o1 in i1.order_collection
def test_m2m_relationship_also_map_the_secondary(self):
"""test #6679"""
Base = automap_base(metadata=self.tables_test_metadata)
# extend the table to have pk cols
Table(
"order_items",
self.tables_test_metadata,
Column("item_id", None, ForeignKey("items.id"), primary_key=True),
Column(
"order_id", None, ForeignKey("orders.id"), primary_key=True
),
extend_existing=True,
)
# then also map to it
class OrderItem(Base):
__tablename__ = "order_items"
Base.prepare()
Order = Base.classes["orders"]
Item = Base.classes["items"]
o1 = Order()
i1 = Item(description="x")
o1.items_collection.append(i1)
s = fixtures.fixture_session()
s.add(o1)
s.flush()
oi = s.execute(select(OrderItem)).scalars().one()
is_(oi.items, i1)
is_(oi.orders, o1)
def test_relationship_pass_params(self):
Base = automap_base(metadata=self.tables_test_metadata)
mock = Mock()
def _gen_relationship(
base, direction, return_fn, attrname, local_cls, referred_cls, **kw
):
mock(base, direction, attrname)
return generate_relationship(
base,
direction,
return_fn,
attrname,
local_cls,
referred_cls,
**kw,
)
Base.prepare(generate_relationship=_gen_relationship)
assert {tuple(c[1]) for c in mock.mock_calls}.issuperset(
[
(Base, interfaces.MANYTOONE, "nodes"),
(Base, interfaces.MANYTOMANY, "keywords_collection"),
(Base, interfaces.MANYTOMANY, "items_collection"),
(Base, interfaces.MANYTOONE, "users"),
(Base, interfaces.ONETOMANY, "addresses_collection"),
]
)
| AutomapTest |
python | bokeh__bokeh | src/bokeh/command/subcommands/file_output.py | {
"start": 1708,
"end": 7297
} | class ____(Subcommand):
''' Abstract subcommand to output applications as some type of file.
'''
# subtype must set this instance attribute to file extension
extension: str
@classmethod
def files_arg(cls, output_type_name: str) -> Arg:
''' Returns a positional arg for ``files`` to specify file inputs to
the command.
Subclasses should include this to their class ``args``.
Example:
.. code-block:: python
class Foo(FileOutputSubcommand):
args = (
FileOutputSubcommand.files_arg("FOO"),
# more args for Foo
) + FileOutputSubcommand.other_args()
'''
return ('files', Argument(
metavar='DIRECTORY-OR-SCRIPT',
nargs='+',
help=(f"The app directories or scripts to generate {output_type_name} for"),
default=None,
))
@classmethod
def other_args(cls) -> Args:
''' Return args for ``-o`` / ``--output`` to specify where output
should be written, and for a ``--args`` to pass on any additional
command line args to the subcommand.
Subclasses should append these to their class ``args``.
Example:
.. code-block:: python
class Foo(FileOutputSubcommand):
args = (
FileOutputSubcommand.files_arg("FOO"),
# more args for Foo
) + FileOutputSubcommand.other_args()
'''
return (
(('-o', '--output'), Argument(
metavar='FILENAME',
action='append',
type=str,
help="Name of the output file or - for standard output.",
)),
('--args', Argument(
metavar='COMMAND-LINE-ARGS',
nargs="...",
help="Any command line arguments remaining are passed on to the application handler",
)),
)
def filename_from_route(self, route: str, ext: str) -> str:
'''
'''
if route == "/":
base = "index"
else:
base = route[1:]
return f"{base}.{ext}"
def invoke(self, args: argparse.Namespace) -> None:
'''
'''
argvs = { f : args.args for f in args.files}
applications = build_single_handler_applications(args.files, argvs)
if args.output is None:
outputs: list[str] = []
else:
outputs = list(args.output) # copy so we can pop from it
if len(outputs) > len(applications):
die(f"--output/-o was given too many times ({len(outputs)} times for {len(applications)} applications)")
for (route, app) in applications.items():
doc = app.create_document()
if len(outputs) > 0:
filename = outputs.pop(0)
else:
filename = self.filename_from_route(route, self.extension)
self.write_file(args, filename, doc)
def write_file(self, args: argparse.Namespace, filename: str, doc: Document) -> None:
'''
'''
def write_str(content: str, filename: str) -> None:
if filename == "-":
print(content)
else:
with open(filename, "w", encoding="utf-8") as file:
file.write(content)
self.after_write_file(args, filename, doc)
def write_bytes(content: bytes, filename: str) -> None:
if filename == "-":
sys.stdout.buffer.write(content)
else:
with open(filename, "wb") as f:
f.write(content)
self.after_write_file(args, filename, doc)
contents = self.file_contents(args, doc)
if isinstance(contents, str):
write_str(contents, filename)
elif isinstance(contents, bytes):
write_bytes(contents, filename)
else:
if filename == "-" or len(contents) <= 1:
def indexed(i: int) -> str:
return filename
else:
def indexed(i: int) -> str:
root, ext = splitext(filename)
return f"{root}_{i}{ext}"
for i, content in enumerate(contents):
if isinstance(content, str):
write_str(content, indexed(i))
elif isinstance(content, bytes):
write_bytes(content, indexed(i))
# can be overridden optionally
def after_write_file(self, args: argparse.Namespace, filename: str, doc: Document) -> None:
'''
'''
pass
@abstractmethod
def file_contents(self, args: argparse.Namespace, doc: Document) -> str | bytes | list[str] | list[bytes]:
''' Subclasses must override this method to return the contents of the output file for the given doc.
subclassed methods return different types:
str: html, json
bytes: SVG, png
Raises:
NotImplementedError
'''
raise NotImplementedError()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| FileOutputSubcommand |
python | huggingface__transformers | src/transformers/models/exaone4/modular_exaone4.py | {
"start": 14348,
"end": 14483
} | class ____(LlamaPreTrainedModel):
config_class = Exaone4Config
_no_split_modules = ["Exaone4DecoderLayer"]
| Exaone4PreTrainedModel |
python | doocs__leetcode | solution/1900-1999/1991.Find the Middle Index in Array/Solution.py | {
"start": 0,
"end": 240
} | class ____:
def findMiddleIndex(self, nums: List[int]) -> int:
l, r = 0, sum(nums)
for i, x in enumerate(nums):
r -= x
if l == r:
return i
l += x
return -1
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_global_variable.py | {
"start": 377,
"end": 1003
} | class ____:
...
hello = "hello"
world: str = "world"
def g():
foo.append(1)
def h():
_test_sink(Baz)
_test_sink(_test_source())
def returns_any() -> typing.Any:
return {"hello": Baz()}
typed_global_dict: typing.Dict[str, Baz] = returns_any()
untyped_global_dict = returns_any()
typed_global_lambda: typing.Callable[[int, str], int] = lambda x, y: x + int(y)
def fun(x: int, y: str) -> int:
return x + int(y)
typed_global_callable: typing.Callable[[int, str], int] = fun
untyped_global_callable = fun(1, "2")
typed_callable_assignment: int = fun(1, "2")
untyped_callable_assignment = fun
| Baz |
python | kamyu104__LeetCode-Solutions | Python/minimum-add-to-make-parentheses-valid.py | {
"start": 29,
"end": 341
} | class ____(object):
def minAddToMakeValid(self, S):
"""
:type S: str
:rtype: int
"""
add, bal, = 0, 0
for c in S:
bal += 1 if c == '(' else -1
if bal == -1:
add += 1
bal += 1
return add + bal
| Solution |
python | doocs__leetcode | solution/2500-2599/2552.Count Increasing Quadruplets/Solution.py | {
"start": 0,
"end": 811
} | class ____:
def countQuadruplets(self, nums: List[int]) -> int:
n = len(nums)
f = [[0] * n for _ in range(n)]
g = [[0] * n for _ in range(n)]
for j in range(1, n - 2):
cnt = sum(nums[l] > nums[j] for l in range(j + 1, n))
for k in range(j + 1, n - 1):
if nums[j] > nums[k]:
f[j][k] = cnt
else:
cnt -= 1
for k in range(2, n - 1):
cnt = sum(nums[i] < nums[k] for i in range(k))
for j in range(k - 1, 0, -1):
if nums[j] > nums[k]:
g[j][k] = cnt
else:
cnt -= 1
return sum(
f[j][k] * g[j][k] for j in range(1, n - 2) for k in range(j + 1, n - 1)
)
| Solution |
python | pytorch__pytorch | torch/sparse/semi_structured.py | {
"start": 16031,
"end": 22196
} | class ____(SparseSemiStructuredTensor):
"""
This class implements semi-structured sparsity for the CUTLASS backend.
In this implementation, the specified elements and metadata are stored separately,
in packed and meta respectively.
When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and
sparse_semi_structured_from_dense for conversion to the compressed format.
"""
BACKEND = "cutlass"
_DTYPE_SHAPE_CONSTRAINTS = {
torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 128, 16, 16),
torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8),
torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8),
torch.float32: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 4, 4),
}
@classmethod
def from_dense(
cls, original_tensor: torch.Tensor
) -> "SparseSemiStructuredTensorCUTLASS":
cls._validate_device_dim_dtype_shape(original_tensor)
(
sparse_tensor_cutlass,
meta_tensor_cutlass,
) = sparse_semi_structured_from_dense_cutlass(original_tensor)
# pyrefly: ignore [no-matching-overload]
return cls(
original_tensor.shape,
packed=sparse_tensor_cutlass,
meta=meta_tensor_cutlass,
packed_t=None,
meta_t=None,
compressed_swizzled_bitmask=None,
requires_grad=original_tensor.requires_grad,
)
def to_dense(self): # type: ignore[override]
assert self.meta is not None and self.packed is not None
return (
sparse_semi_structured_to_dense_cutlass(
self.packed,
self.meta,
)
if self.meta.ndim == 2
else super().to_dense()
)
@classmethod
def prune_dense_static_sort(
cls, original_tensor: torch.Tensor, algorithm=""
) -> "SparseSemiStructuredTensor":
"""
This function takes in a unpruned dense tensor and runs a (branchless) static sort across a 4x4 tile.
It greedily picks the largest values in the tile, upholding the 2:4 sparsity constraint across both rows and columns.
The algorithm used to prune the matrix is implemented in `_sparse_semi_structured_tile`.
Then it creates the packed and meta tensors for the compressed sparse representation of the pruned dense tensor.
It also calculates the packed_t and meta_t tensors for the compressed sparse representation of the transposed
pruned dense tensor.
Since we cannot transpose the compressed representations, we store both for the fw/bw pass respectively.
Finally, this function also computes a compressed swizzled bitmask that encodes the sparsity pattern
This can be used in the backward pass to mask the gradients.
[9 1 7 4] [9 0 7 0]
[1 2 3 0] [0 2 0 0]
[8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to CUTLASS semi-structured -> packed
[1 2 6 2] [0 0 6 2] -> metadata
-> pack to transposed CUTLASS -> packed_t
semi-structured representation -> metadata_t
-> compute swizzled bitmask -> compressed_swizzled_bitmask
The equivalent PyTorch code to create the same five outputs from the dense tensor can be found below:
```
from torch.sparse import SparseSemiStructuredTensorCUTLASS
from torch.sparse._semi_structured_conversions import (
_sparse_semi_structured_tile,
_compute_compressed_swizzled_bitmask,
)
pruned = _sparse_semi_structured_tile(dense)
packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned)
packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(
pruned.t().contiguous()
)
bitmask = _compute_compressed_swizzled_bitmask(pruned)
SparseSemiStructuredTensorCUTLASS(
dense.shape,
packed_cutlass,
meta_cutlass,
packed_t_cutlass,
meta_t_cutlass,
bitmask,
)
```
"""
# We can either pack to the CUTLASS or cuSPARSELt representation, depending on the use_cutlass flag.
(
packed,
meta,
packed_t,
meta_t,
compressed_swizzled_bitmask,
) = torch._sparse_semi_structured_tile(
original_tensor, algorithm=algorithm, use_cutlass=True
)
# pyrefly: ignore [no-matching-overload]
return cls(
original_tensor.shape,
packed=packed,
meta=meta,
packed_t=packed_t,
meta_t=meta_t,
compressed_swizzled_bitmask=compressed_swizzled_bitmask,
requires_grad=False,
)
def _mm(
self, B: torch.Tensor, *, bias: torch.Tensor | None = None, **kwargs
) -> torch.Tensor:
if isinstance(B, SparseSemiStructuredTensor):
raise ValueError(
"`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware"
)
cls_name = self.__class__.__name__
if self.ndim != 2 or B.ndim != 2:
raise NotImplementedError(
f"`{cls_name}` matmul: Broadcasting is not implemented"
)
if self.packed is None or self.meta is None:
raise NotImplementedError(
f"`{cls_name}` matmul: operation is not supported"
)
else:
if bias is None:
res = torch._sparse_semi_structured_mm(self.packed, self.meta, B)
else:
res = torch._sparse_semi_structured_addmm(
bias, self.packed, self.meta, B
)
return res[: self.shape[0]]
| SparseSemiStructuredTensorCUTLASS |
python | spack__spack | lib/spack/spack/install_test.py | {
"start": 42222,
"end": 42471
} | class ____(spack.error.SpackError):
"""Raised when one or more tests in a suite have failed."""
def __init__(self, num_failures):
msg = "%d test(s) in the suite failed.\n" % num_failures
super().__init__(msg)
| TestSuiteFailure |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-people-that-can-be-caught-in-tag.py | {
"start": 631,
"end": 1223
} | class ____(object):
def catchMaximumAmountofPeople(self, team, dist):
"""
:type team: List[int]
:type dist: int
:rtype: int
"""
result = j = 0
for i in xrange(len(team)):
if not team[i]:
continue
while j < i-dist:
j += 1
while j <= min(i+dist, len(team)-1):
if team[j] == 0:
break
j += 1
if j <= min(i+dist, len(team)-1):
result += 1
j += 1
return result
| Solution2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 4133,
"end": 4165
} | class ____(TD_A1, TD_A2): ...
| TD_A |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 2176,
"end": 2852
} | class ____(PrefectBaseModel):
"""Details associated with a WAIT state transition."""
type: Literal["wait_details"] = Field(
default="wait_details",
description=(
"The type of state transition detail. Used to ensure pydantic does not"
" coerce into a different type."
),
)
delay_seconds: int = Field(
default=...,
description=(
"The length of time in seconds the client should wait before transitioning"
" states."
),
)
reason: Optional[str] = Field(
default=None, description="The reason why the state transition should wait."
)
| StateWaitDetails |
python | sanic-org__sanic | sanic/compat.py | {
"start": 855,
"end": 2741
} | class ____(StrEnum):
"""Base class for string enums that are case insensitive."""
def _generate_next_value_(name, start, count, last_values):
return name.upper()
def __eq__(self, value: object) -> bool:
value = str(value).upper()
return super().__eq__(value)
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return self.value
@contextmanager
def use_context(method: StartMethod):
from sanic import Sanic
orig = Sanic.start_method
Sanic.start_method = method
yield
Sanic.start_method = orig
def enable_windows_color_support():
import ctypes
kernel = ctypes.windll.kernel32
kernel.SetConsoleMode(kernel.GetStdHandle(-11), 7)
def pypy_os_module_patch() -> None:
"""
The PyPy os module is missing the 'readlink' function, which causes issues
withaiofiles. This workaround replaces the missing 'readlink' function
with 'os.path.realpath', which serves the same purpose.
"""
if hasattr(os, "readlink"):
error_logger.warning(
"PyPy: Skipping patching of the os module as it appears the "
"'readlink' function has been added."
)
return
module = sys.modules["os"]
module.readlink = os.path.realpath # type: ignore
def pypy_windows_set_console_cp_patch() -> None:
"""
A patch function for PyPy on Windows that sets the console code page to
UTF-8 encodingto allow for proper handling of non-ASCII characters. This
function uses ctypes to call the Windows API functions SetConsoleCP and
SetConsoleOutputCP to set the code page.
"""
from ctypes import windll # type: ignore
code: int = windll.kernel32.GetConsoleOutputCP()
if code != 65001:
windll.kernel32.SetConsoleCP(65001)
windll.kernel32.SetConsoleOutputCP(65001)
| UpperStrEnum |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 13209,
"end": 18754
} | class ____(IHaveNew):
name: str
job_name: Optional[str]
op_selection: Optional[Sequence[str]]
mode: Optional[str]
min_interval: Optional[int]
description: Optional[str]
target_dict: Mapping[str, TargetSnap]
metadata: Optional[SensorMetadataSnap]
default_status: Optional[DefaultSensorStatus]
sensor_type: Optional[SensorType]
asset_selection: Optional[AssetSelection]
tags: Mapping[str, str]
run_tags: Mapping[str, str]
owners: Optional[Sequence[str]]
def __new__(
cls,
name: str,
job_name: Optional[str] = None,
op_selection: Optional[Sequence[str]] = None,
mode: Optional[str] = None,
min_interval: Optional[int] = None,
description: Optional[str] = None,
target_dict: Optional[Mapping[str, TargetSnap]] = None,
metadata: Optional[SensorMetadataSnap] = None,
default_status: Optional[DefaultSensorStatus] = None,
sensor_type: Optional[SensorType] = None,
asset_selection: Optional[AssetSelection] = None,
tags: Optional[Mapping[str, str]] = None,
run_tags: Optional[Mapping[str, str]] = None,
owners: Optional[Sequence[str]] = None,
):
if job_name and not target_dict:
# handle the legacy case where the ExternalSensorData was constructed from an earlier
# version of dagster
target_dict = {
job_name: TargetSnap(
job_name=check.str_param(job_name, "job_name"),
mode=check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME),
op_selection=check.opt_nullable_sequence_param(
op_selection, "op_selection", str
),
)
}
if asset_selection is not None:
check.invariant(
is_whitelisted_for_serdes_object(asset_selection),
"asset_selection must be serializable",
)
return super().__new__(
cls,
name=name,
job_name=job_name, # keep legacy field populated
op_selection=op_selection, # keep legacy field populated
mode=mode, # keep legacy field populated
min_interval=min_interval,
description=description,
target_dict=target_dict,
metadata=metadata,
# Leave default_status as None if it's STOPPED to maintain stable back-compat IDs
default_status=(
DefaultSensorStatus.RUNNING
if default_status == DefaultSensorStatus.RUNNING
else None
),
sensor_type=sensor_type,
asset_selection=asset_selection,
tags=tags or {},
run_tags=run_tags or {},
owners=owners,
)
@classmethod
def from_def(cls, sensor_def: SensorDefinition, repository_def: RepositoryDefinition) -> Self:
first_target = sensor_def.targets[0] if sensor_def.targets else None
asset_keys = None
if isinstance(sensor_def, AssetSensorDefinition):
asset_keys = [sensor_def.asset_key]
if sensor_def.asset_selection is not None:
target_dict = {
base_asset_job_name: TargetSnap(
job_name=base_asset_job_name, mode=DEFAULT_MODE_NAME, op_selection=None
)
for base_asset_job_name in repository_def.get_implicit_asset_job_names()
}
serializable_asset_selection = (
sensor_def.asset_selection.to_serializable_asset_selection(
repository_def.asset_graph
)
)
else:
target_dict = {
target.job_name: TargetSnap(
job_name=target.job_name,
mode=DEFAULT_MODE_NAME,
op_selection=target.op_selection,
)
for target in sensor_def.targets
}
if sensor_def.has_anonymous_job:
job_def = check.inst(
sensor_def.job,
UnresolvedAssetJobDefinition,
"Anonymous job should be UnresolvedAssetJobDefinition",
)
serializable_asset_selection = job_def.selection.to_serializable_asset_selection(
repository_def.asset_graph
)
else:
serializable_asset_selection = None
return cls(
name=sensor_def.name,
job_name=first_target.job_name if first_target else None,
mode=None,
op_selection=first_target.op_selection if first_target else None,
target_dict=target_dict,
min_interval=sensor_def.minimum_interval_seconds,
description=sensor_def.description,
metadata=SensorMetadataSnap(
asset_keys=asset_keys, standard_metadata=sensor_def.metadata
),
default_status=sensor_def.default_status,
sensor_type=sensor_def.sensor_type,
asset_selection=serializable_asset_selection,
tags=sensor_def.tags,
run_tags=(
sensor_def.run_tags
if isinstance(sensor_def, AutomationConditionSensorDefinition)
else None
),
owners=sensor_def.owners,
)
@whitelist_for_serdes(storage_name="ExternalRepositoryErrorData")
@record
| SensorSnap |
python | mlflow__mlflow | mlflow/metrics/genai/prompts/v1.py | {
"start": 4394,
"end": 7960
} | class ____:
definition = (
"Answer similarity is evaluated on the degree of semantic similarity of the provided "
"output to the provided targets, which is the ground truth. Scores can be assigned based "
"on the gradual similarity in meaning and description to the provided targets, where a "
"higher score indicates greater alignment between the provided output and provided targets."
)
grading_prompt = (
"Answer similarity: Below are the details for different scores:\n"
"- Score 1: The output has little to no semantic similarity to the provided targets.\n"
"- Score 2: The output displays partial semantic similarity to the provided targets on "
"some aspects.\n"
"- Score 3: The output has moderate semantic similarity to the provided targets.\n"
"- Score 4: The output aligns with the provided targets in most aspects and has "
"substantial semantic similarity.\n"
"- Score 5: The output closely aligns with the provided targets in all significant aspects."
)
grading_context_columns = ["targets"]
parameters = default_parameters
default_model = default_model
example_score_2 = EvaluationExample(
input="What is MLflow?",
output="MLflow is an open-source platform.",
score=2,
justification="The provided output is partially similar to the target, as it captures the "
"general idea that MLflow is an open-source platform. However, it lacks the comprehensive "
"details and context provided in the target about MLflow's purpose, development, and "
"challenges it addresses. Therefore, it demonstrates partial, but not complete, "
"semantic similarity.",
grading_context={
"targets": "MLflow is an open-source platform for managing the end-to-end "
"machine learning (ML) lifecycle. It was developed by Databricks, a company "
"that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning "
"models."
},
)
example_score_4 = EvaluationExample(
input="What is MLflow?",
output="MLflow is an open-source platform for managing machine learning workflows, "
"including experiment tracking, model packaging, versioning, and deployment, simplifying "
"the ML lifecycle.",
score=4,
justification="The provided output aligns closely with the target. It covers various key "
"aspects mentioned in the target, including managing machine learning workflows, "
"experiment tracking, model packaging, versioning, and deployment. While it may not include"
" every single detail from the target, it demonstrates substantial semantic similarity.",
grading_context={
"targets": "MLflow is an open-source platform for managing the end-to-end "
"machine learning (ML) lifecycle. It was developed by Databricks, a company "
"that specializes in big data and machine learning solutions. MLflow is "
"designed to address the challenges that data scientists and machine learning "
"engineers face when developing, training, and deploying machine learning "
"models."
},
)
default_examples = [example_score_2, example_score_4]
@dataclass
| AnswerSimilarityMetric |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 1888,
"end": 1969
} | class ____(NormalModelA):
field2 = models.CharField(max_length=10)
| NormalModelB |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 8374,
"end": 8506
} | class ____(TestCase, jtu.CudaArchSpecificTest):
def setUp(self):
self.skip_unless_sm90a()
super().setUp()
| Sm90ATestCase |
python | Textualize__textual | src/textual/widgets/_tabs.py | {
"start": 5490,
"end": 27054
} | class ____(Widget, can_focus=True):
"""A row of tabs."""
DEFAULT_CSS = """
Tabs {
width: 100%;
height: 2;
&:focus {
.underline--bar {
background: $foreground 30%;
}
& .-active {
text-style: $block-cursor-text-style;
color: $block-cursor-foreground;
background: $block-cursor-background;
}
}
& > #tabs-scroll {
overflow: hidden;
}
#tabs-list {
width: auto;
}
#tabs-list-bar, #tabs-list {
width: auto;
height: auto;
min-width: 100%;
overflow: hidden hidden;
}
&:ansi {
#tabs-list {
text-style: dim;
}
& #tabs-list > .-active {
text-style: not dim;
}
&:focus {
#tabs-list > .-active {
text-style: bold not dim;
}
}
& .underline--bar {
color: ansi_bright_blue;
background: ansi_default;
}
& .-active {
color: transparent;
background: transparent;
}
}
}
"""
BINDINGS: ClassVar[list[BindingType]] = [
Binding("left", "previous_tab", "Previous tab", show=False),
Binding("right", "next_tab", "Next tab", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| left | Move to the previous tab. |
| right | Move to the next tab. |
"""
class TabError(Exception):
"""Exception raised when there is an error relating to tabs."""
class TabMessage(Message):
"""Parent class for all messages that have to do with a specific tab."""
ALLOW_SELECTOR_MATCH = {"tab"}
"""Additional message attributes that can be used with the [`on` decorator][textual.on]."""
def __init__(self, tabs: Tabs, tab: Tab) -> None:
"""Initialize event.
Args:
tabs: The Tabs widget.
tab: The tab that is the object of this message.
"""
self.tabs: Tabs = tabs
"""The tabs widget containing the tab."""
self.tab: Tab = tab
"""The tab that is the object of this message."""
super().__init__()
@property
def control(self) -> Tabs:
"""The tabs widget containing the tab that is the object of this message.
This is an alias for the attribute `tabs` and is used by the
[`on`][textual.on] decorator.
"""
return self.tabs
def __rich_repr__(self) -> rich.repr.Result:
yield self.tabs
yield self.tab
class TabActivated(TabMessage):
"""Sent when a new tab is activated."""
class TabDisabled(TabMessage):
"""Sent when a tab is disabled."""
class TabEnabled(TabMessage):
"""Sent when a tab is enabled."""
class TabHidden(TabMessage):
"""Sent when a tab is hidden."""
class TabShown(TabMessage):
"""Sent when a tab is shown."""
class Cleared(Message):
"""Sent when there are no active tabs.
This can occur when Tabs are cleared, if all tabs are hidden, or if the
currently active tab is unset.
"""
def __init__(self, tabs: Tabs) -> None:
"""Initialize the event.
Args:
tabs: The tabs widget.
"""
self.tabs: Tabs = tabs
"""The tabs widget which was cleared."""
super().__init__()
@property
def control(self) -> Tabs:
"""The tabs widget which was cleared.
This is an alias for [`Cleared.tabs`][textual.widgets.Tabs.Cleared] which
is used by the [`on`][textual.on] decorator.
"""
return self.tabs
def __rich_repr__(self) -> rich.repr.Result:
yield self.tabs
active: reactive[str] = reactive("", init=False)
"""The ID of the active tab, or empty string if none are active."""
def __init__(
self,
*tabs: Tab | ContentText,
active: str | None = None,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
):
"""Construct a Tabs widget.
Args:
*tabs: Positional argument should be explicit Tab objects, or a str or Text.
active: ID of the tab which should be active on start.
name: Optional name for the tabs widget.
id: Optional ID for the widget.
classes: Optional initial classes for the widget.
disabled: Whether the widget is disabled or not.
"""
self._tabs_counter = 0
add_tabs = [
(
Tab(tab, id=f"tab-{self._new_tab_id}")
if isinstance(tab, (str, Content, Text))
else self._auto_tab_id(tab)
)
for tab in tabs
]
super().__init__(
name=name,
id=id,
classes=classes,
disabled=disabled,
)
self._tabs = add_tabs
self._first_active = active
def _auto_tab_id(self, tab: Tab) -> Tab:
"""Set an automatic ID if not supplied."""
if tab.id is None:
tab.id = f"tab-{self._new_tab_id}"
return tab
@property
def _new_tab_id(self) -> int:
"""Get the next tab id in a sequence."""
self._tabs_counter += 1
return self._tabs_counter
@property
def tab_count(self) -> int:
"""Total number of tabs."""
return len(self.query("#tabs-list > Tab"))
@property
def _potentially_active_tabs(self) -> list[Tab]:
"""List of all tabs that could be active.
This list is comprised of all tabs that are shown and enabled,
plus the active tab in case it is disabled.
"""
return [
tab
for tab in self.query("#tabs-list > Tab").results(Tab)
if ((not tab.disabled or tab is self.active_tab) and tab.display)
]
@property
def _next_active(self) -> Tab | None:
"""Next tab to make active if the active tab is removed."""
tabs = self._potentially_active_tabs
if self.active_tab is None:
return None
try:
active_index = tabs.index(self.active_tab)
except ValueError:
return None
del tabs[active_index]
try:
return tabs[active_index]
except IndexError:
try:
return tabs[active_index - 1]
except IndexError:
pass
return None
def add_tab(
self,
tab: Tab | ContentText,
*,
before: Tab | str | None = None,
after: Tab | str | None = None,
) -> AwaitComplete:
"""Add a new tab to the end of the tab list.
Args:
tab: A new tab object, or a label (str or Text).
before: Optional tab or tab ID to add the tab before.
after: Optional tab or tab ID to add the tab after.
Returns:
An optionally awaitable object that waits for the tab to be mounted and
internal state to be fully updated to reflect the new tab.
Raises:
Tabs.TabError: If there is a problem with the addition request.
Note:
Only one of `before` or `after` can be provided. If both are
provided a `Tabs.TabError` will be raised.
"""
if before and after:
raise self.TabError("Unable to add a tab both before and after a tab")
if isinstance(before, str):
try:
before = self.query_one(f"#tabs-list > #{before}", Tab)
except NoMatches:
raise self.TabError(
f"There is no tab with ID '{before}' to mount before"
)
elif isinstance(before, Tab) and self not in before.ancestors:
raise self.TabError(
"Request to add a tab before a tab that isn't part of this tab collection"
)
if isinstance(after, str):
try:
after = self.query_one(f"#tabs-list > #{after}", Tab)
except NoMatches:
raise self.TabError(f"There is no tab with ID '{after}' to mount after")
elif isinstance(after, Tab) and self not in after.ancestors:
raise self.TabError(
"Request to add a tab after a tab that isn't part of this tab collection"
)
from_empty = self.tab_count == 0
tab_widget = (
Tab(tab, id=f"tab-{self._new_tab_id}")
if isinstance(tab, (str, Content, Text))
else self._auto_tab_id(tab)
)
mount_await = self.query_one("#tabs-list").mount(
tab_widget, before=before, after=after
)
if from_empty:
tab_widget.add_class("-active")
activated_message = self.TabActivated(self, tab_widget)
async def refresh_active() -> None:
"""Wait for things to be mounted before highlighting."""
await mount_await
self.active = tab_widget.id or ""
self._highlight_active(animate=False)
self.post_message(activated_message)
return AwaitComplete(refresh_active())
elif before or after:
async def refresh_active() -> None:
await mount_await
self._highlight_active(animate=False)
return AwaitComplete(refresh_active())
return AwaitComplete(mount_await())
def clear(self) -> AwaitComplete:
"""Clear all the tabs.
Returns:
An awaitable object that waits for the tabs to be removed.
"""
underline = self.query_one(Underline)
underline.highlight_start = 0
underline.highlight_end = 0
self.post_message(self.Cleared(self))
self.active = ""
return AwaitComplete(self.query("#tabs-list > Tab").remove())
def remove_tab(self, tab_or_id: Tab | str | None) -> AwaitComplete:
"""Remove a tab.
Args:
tab_or_id: The Tab to remove or its id.
Returns:
An optionally awaitable object that waits for the tab to be removed.
"""
if not tab_or_id:
return AwaitComplete()
if isinstance(tab_or_id, Tab):
remove_tab = tab_or_id
else:
try:
remove_tab = self.query_one(f"#tabs-list > #{tab_or_id}", Tab)
except NoMatches:
return AwaitComplete()
if remove_tab.has_class("-active"):
next_tab = self._next_active
else:
next_tab = None
async def do_remove() -> None:
"""Perform the remove after refresh so the underline bar gets new positions."""
await remove_tab.remove()
if not self.query("#tabs-list > Tab"):
self.active = ""
elif next_tab is not None:
self.active = next_tab.id or ""
else:
self._highlight_active(animate=False)
return AwaitComplete(do_remove())
def validate_active(self, active: str) -> str:
"""Check id assigned to active attribute is a valid tab."""
if active and not self.query(f"#tabs-list > #{active}"):
raise ValueError(f"No Tab with id {active!r}")
return active
@property
def active_tab(self) -> Tab | None:
"""The currently active tab, or None if there are no active tabs."""
try:
return self.query_one("#tabs-list Tab.-active", Tab)
except NoMatches:
return None
def _on_mount(self, _: Mount) -> None:
"""Make the first tab active."""
if self._first_active is not None:
self.active = self._first_active
if not self.active:
try:
tab = self.query("#tabs-list > Tab").first(Tab)
except NoMatches:
# Tabs are empty!
return
self.active = tab.id or ""
def compose(self) -> ComposeResult:
with Container(id="tabs-scroll"):
with Vertical(id="tabs-list-bar"):
with Horizontal(id="tabs-list"):
yield from self._tabs
yield Underline()
def watch_active(self, previously_active: str, active: str) -> None:
"""Handle a change to the active tab."""
self.query("#tabs-list > Tab.-active").remove_class("-active")
if active:
try:
active_tab = self.query_one(f"#tabs-list > #{active}", Tab)
except NoMatches:
return
active_tab.add_class("-active")
self._highlight_active(animate=previously_active != "")
self._scroll_active_tab()
self.post_message(self.TabActivated(self, active_tab))
else:
underline = self.query_one(Underline)
underline.highlight_start = 0
underline.highlight_end = 0
self.post_message(self.Cleared(self))
def _highlight_active(
self,
animate: bool = True,
) -> None:
"""Move the underline bar to under the active tab.
Args:
animate: Should the bar animate?
"""
underline = self.query_one(Underline)
try:
_active_tab = self.query_one("#tabs-list > Tab.-active")
except NoMatches:
underline.show_highlight = False
underline.highlight_start = 0
underline.highlight_end = 0
else:
underline.show_highlight = True
def move_underline(animate: bool) -> None:
"""Move the tab underline.
Args:
animate: animate the underline to its new position.
"""
try:
active_tab = self.query_one("#tabs-list > Tab.-active")
except NoMatches:
pass
else:
tab_region = active_tab.virtual_region.shrink(
active_tab.styles.gutter
)
start, end = tab_region.column_span
if animate:
underline.animate(
"highlight_start",
start,
duration=0.3,
level="basic",
)
underline.animate(
"highlight_end",
end,
duration=0.3,
level="basic",
)
else:
underline.highlight_start = start
underline.highlight_end = end
if animate and self.app.animation_level != "none":
self.set_timer(
0.02,
lambda: self.call_after_refresh(move_underline, True),
)
else:
self.call_after_refresh(move_underline, False)
async def _on_tab_clicked(self, event: Tab.Clicked) -> None:
"""Activate a tab that was clicked."""
self.focus()
event.stop()
self._activate_tab(event.tab)
def _activate_tab(self, tab: Tab) -> None:
"""Activate a tab.
Args:
tab: The Tab that was clicked.
"""
self.query("#tabs-list Tab.-active").remove_class("-active")
tab.add_class("-active")
self.active = tab.id or ""
def _on_underline_clicked(self, event: Underline.Clicked) -> None:
"""The underline was clicked.
Activate the tab above to make a larger clickable area.
Args:
event: The Underline.Clicked event.
"""
event.stop()
offset = event.offset + (0, -1)
self.focus()
for tab in self.query(Tab):
if offset in tab.region and not tab.disabled:
self._activate_tab(tab)
break
def _scroll_active_tab(self) -> None:
"""Scroll the active tab into view."""
if self.active_tab:
try:
self.query_one("#tabs-scroll").scroll_to_center(
self.active_tab, force=True
)
except NoMatches:
pass
def _on_resize(self):
"""Make the active tab visible on resize."""
self._highlight_active(animate=False)
self._scroll_active_tab()
def action_next_tab(self) -> None:
"""Make the next tab active."""
self._move_tab(+1)
def action_previous_tab(self) -> None:
"""Make the previous tab active."""
self._move_tab(-1)
def _move_tab(self, direction: int) -> None:
"""Activate the next enabled tab in the given direction.
Tab selection wraps around. If no tab is currently active, the "next"
tab is set to be the first and the "previous" tab is the last one.
Args:
direction: +1 for the next tab, -1 for the previous.
"""
active_tab = self.active_tab
tabs = self._potentially_active_tabs
if not tabs:
return
if not active_tab:
self.active = tabs[0 if direction == 1 else -1].id or ""
return
tab_count = len(tabs)
new_tab_index = (tabs.index(active_tab) + direction) % tab_count
self.active = tabs[new_tab_index].id or ""
def _on_tab_disabled(self, event: Tab.Disabled) -> None:
"""Re-post the disabled message."""
event.stop()
self.post_message(self.TabDisabled(self, event.tab))
def _on_tab_enabled(self, event: Tab.Enabled) -> None:
"""Re-post the enabled message."""
event.stop()
self.post_message(self.TabEnabled(self, event.tab))
def _on_tab_relabelled(self, event: Tab.Relabelled) -> None:
"""Redraw the highlight when tab is relabelled."""
event.stop()
self._highlight_active()
def disable(self, tab_id: str) -> Tab:
"""Disable the indicated tab.
Args:
tab_id: The ID of the [`Tab`][textual.widgets.Tab] to disable.
Returns:
The [`Tab`][textual.widgets.Tab] that was targeted.
Raises:
TabError: If there are any issues with the request.
"""
try:
tab_to_disable = self.query_one(f"#tabs-list > Tab#{tab_id}", Tab)
except NoMatches:
raise self.TabError(
f"There is no tab with ID {tab_id!r} to disable."
) from None
tab_to_disable.disabled = True
return tab_to_disable
def enable(self, tab_id: str) -> Tab:
"""Enable the indicated tab.
Args:
tab_id: The ID of the [`Tab`][textual.widgets.Tab] to enable.
Returns:
The [`Tab`][textual.widgets.Tab] that was targeted.
Raises:
TabError: If there are any issues with the request.
"""
try:
tab_to_enable = self.query_one(f"#tabs-list > Tab#{tab_id}", Tab)
except NoMatches:
raise self.TabError(
f"There is no tab with ID {tab_id!r} to enable."
) from None
tab_to_enable.disabled = False
return tab_to_enable
def hide(self, tab_id: str) -> Tab:
"""Hide the indicated tab.
Args:
tab_id: The ID of the [`Tab`][textual.widgets.Tab] to hide.
Returns:
The [`Tab`][textual.widgets.Tab] that was targeted.
Raises:
TabError: If there are any issues with the request.
"""
try:
tab_to_hide = self.query_one(f"#tabs-list > Tab#{tab_id}", Tab)
except NoMatches:
raise self.TabError(f"There is no tab with ID {tab_id!r} to hide.")
if tab_to_hide.has_class("-active"):
next_tab = self._next_active
self.active = next_tab.id or "" if next_tab else ""
tab_to_hide.add_class("-hidden")
self.post_message(self.TabHidden(self, tab_to_hide).set_sender(self))
self.call_after_refresh(self._highlight_active)
return tab_to_hide
def show(self, tab_id: str) -> Tab:
"""Show the indicated tab.
Args:
tab_id: The ID of the [`Tab`][textual.widgets.Tab] to show.
Returns:
The [`Tab`][textual.widgets.Tab] that was targeted.
Raises:
TabError: If there are any issues with the request.
"""
try:
tab_to_show = self.query_one(f"#tabs-list > Tab#{tab_id}", Tab)
except NoMatches:
raise self.TabError(f"There is no tab with ID {tab_id!r} to show.")
tab_to_show.remove_class("-hidden")
self.post_message(self.TabShown(self, tab_to_show).set_sender(self))
if not self.active:
self._activate_tab(tab_to_show)
self.call_after_refresh(self._highlight_active)
return tab_to_show
| Tabs |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/auth.py | {
"start": 213,
"end": 1432
} | class ____(Oauth2Authenticator):
def _prepare_refresh_token_params(self) -> Dict[str, str]:
return {
"refresh_token": self.get_refresh_token(),
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
"grant_type": "refresh_token",
}
def get_auth_header(self) -> Mapping[str, Any]:
token = self.get_access_token()
return {"Authorization": f"Zoho-oauthtoken {token}"}
def refresh_access_token(self) -> Tuple[str, int]:
"""
This method is overridden because token parameters should be passed via URL params, not via the request payload.
Returns a tuple of (access_token, token_lifespan_in_seconds)
"""
try:
response = requests.request(method="POST", url=self.get_token_refresh_endpoint(), params=self._prepare_refresh_token_params())
response.raise_for_status()
response_json = response.json()
return response_json[self.get_access_token_name()], response_json[self.get_expires_in_name()]
except Exception as e:
raise Exception(f"Error while refreshing access token: {e}") from e
| ZohoOauth2Authenticator |
python | charliermarsh__ruff | crates/ruff_python_ast/generate.py | {
"start": 855,
"end": 2219
} | class ____:
name: str
accepts_sequence: bool = False
# Map of AST node types to their corresponding visitor information.
# Only visitors that are different from the default `visit_*` method are included.
# These visitors either have a different name or accept a sequence of items.
type_to_visitor_function: dict[str, VisitorInfo] = {
"TypeParams": VisitorInfo("visit_type_params", True),
"Parameters": VisitorInfo("visit_parameters", True),
"Stmt": VisitorInfo("visit_body", True),
"Arguments": VisitorInfo("visit_arguments", True),
}
def rustfmt(code: str) -> str:
return check_output(["rustfmt", "--emit=stdout"], input=code, text=True)
def to_snake_case(node: str) -> str:
"""Converts CamelCase to snake_case"""
return re.sub("([A-Z])", r"_\1", node).lower().lstrip("_")
def write_rustdoc(out: list[str], doc: str) -> None:
for line in doc.split("\n"):
out.append(f"/// {line}")
# ------------------------------------------------------------------------------
# Read AST description
def load_ast(root: Path) -> Ast:
ast_path = root.joinpath("crates", "ruff_python_ast", "ast.toml")
with ast_path.open("rb") as ast_file:
ast = tomllib.load(ast_file)
return Ast(ast)
# ------------------------------------------------------------------------------
# Preprocess
@dataclass
| VisitorInfo |
python | getsentry__sentry | src/sentry/migrations/0925_backfill_open_periods.py | {
"start": 1168,
"end": 6988
} | class ____:
UNRESOLVED = 0
RESOLVED = 1
# end copy
def get_open_periods_for_group(
apps: StateApps,
group_id: int,
status: int,
project_id: int,
first_seen: datetime,
activities: list[Any],
GroupOpenPeriod: Any,
) -> list[Any]:
# No activities means the group has been open since the first_seen date
if not activities:
return [
GroupOpenPeriod(
group_id=group_id,
project_id=project_id,
date_started=first_seen,
)
]
# Since activities can apparently exist from before the start date, we want to ensure the
# first open period starts at the first_seen date and ends at the first resolution activity after it.
start_index = 0
activities_len = len(activities)
while (
start_index < activities_len and activities[start_index].type not in RESOLVED_ACTIVITY_TYPES
):
start_index += 1
open_periods = []
regression_time: datetime | None = first_seen
for activity in activities[start_index:]:
if activity.type == ActivityType.SET_REGRESSION.value and regression_time is None:
regression_time = activity.datetime
elif activity.type in RESOLVED_ACTIVITY_TYPES and regression_time is not None:
if activity.datetime < regression_time:
logger.warning(
"Open period has invalid start and end dates",
extra={
"group_id": group_id,
"activity_datetime": activity.datetime,
"regression_time": regression_time,
},
)
return []
open_periods.append(
GroupOpenPeriod(
group_id=group_id,
project_id=project_id,
date_started=regression_time,
date_ended=activity.datetime,
resolution_activity=activity,
user_id=activity.user_id,
)
)
regression_time = None
# Handle currently open period if the group is unresolved
if status == GroupStatus.UNRESOLVED and regression_time is not None:
open_periods.append(
GroupOpenPeriod(
group_id=group_id,
project_id=project_id,
date_started=regression_time,
)
)
return open_periods
def _backfill_group_open_periods(
apps: StateApps, group_data: list[tuple[int, datetime, int, int]]
) -> None:
GroupOpenPeriod = apps.get_model("sentry", "GroupOpenPeriod")
Activity = apps.get_model("sentry", "Activity")
group_ids = [group_id for group_id, _, _, _ in group_data]
groups_with_open_periods = set(
GroupOpenPeriod.objects.filter(group_id__in=group_ids)
.values_list("group_id", flat=True)
.distinct()
)
group_ids = [group_id for group_id in group_ids if group_id not in groups_with_open_periods]
# Filter to REGRESSION and SET_RESOLVED_XX activties to find the bounds of each open period.
# The only UNRESOLVED activity we would care about is the first UNRESOLVED activity for the group creation,
# but we don't create an entry for that.
activities = defaultdict(list)
try:
for activity in Activity.objects.filter(
group_id__in=group_ids,
type__in=[ActivityType.SET_REGRESSION.value, *RESOLVED_ACTIVITY_TYPES],
).order_by("datetime"):
# Skip activities before the group's first_seen date
if activity.datetime < activity.group.first_seen:
continue
activities[activity.group_id].append(activity)
except Exception as e:
logger.exception(
"Error getting activities",
extra={"group_ids": group_ids, "error": e},
)
return
open_periods = []
for group_id, first_seen, status, project_id in group_data:
# Skip groups that already have open periods
if group_id in groups_with_open_periods:
continue
open_periods.extend(
get_open_periods_for_group(
apps,
group_id,
status,
project_id,
first_seen,
activities[group_id],
GroupOpenPeriod,
)
)
with transaction.atomic(router.db_for_write(GroupOpenPeriod)):
try:
GroupOpenPeriod.objects.bulk_create(open_periods)
except (IntegrityError, DataError) as e:
logger.exception(
"Error creating open period",
extra={"group_ids": group_ids, "error": e},
)
def backfill_group_open_periods(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
Group = apps.get_model("sentry", "Group")
backfill_key = "backfill_group_open_periods_from_activity_0702_1"
redis_client = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
progress_id = int(redis_client.get(backfill_key) or 0)
for group_data in chunked(
RangeQuerySetWrapperWithProgressBarApprox(
Group.objects.filter(id__gt=progress_id).values_list(
"id", "first_seen", "status", "project_id"
),
result_value_getter=lambda item: item[0],
),
CHUNK_SIZE,
):
logger.info(
"Processing batch for group open period backfill",
extra={"last_group_id": group_data[-1][0]},
)
_backfill_group_open_periods(apps, group_data)
# Save progress to redis in case we have to restart
redis_client.set(backfill_key, group_data[-1][0], ex=60 * 60 * 24 * 7)
| GroupStatus |
python | tensorflow__tensorflow | tensorflow/compiler/tests/matrix_triangular_solve_op_test.py | {
"start": 1308,
"end": 7375
} | class ____(xla_test.XLATestCase):
# MatrixTriangularSolve defined for float64, float32, complex64, complex128
# (https://www.tensorflow.org/api_docs/python/tf/matrix_triangular_solve)
@property
def float_types(self):
return set(super(MatrixTriangularSolveOpTest,
self).float_types).intersection(
(np.float64, np.float32, np.complex64, np.complex128))
def _VerifyTriangularSolveBase(self, sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b, verification,
atol):
feed_dict = {placeholder_a: a, placeholder_ca: clean_a, placeholder_b: b}
verification_np = sess.run(verification, feed_dict)
broadcasted_shape = a.shape[:-2] + (b.shape[-2], b.shape[-1])
broadcasted_b = b + np.zeros(shape=broadcasted_shape, dtype=b.dtype)
self.assertAllClose(broadcasted_b, verification_np, atol=atol)
def _VerifyTriangularSolve(self, a, b, lower, adjoint, atol, dtype=None):
clean_a = np.tril(a) if lower else np.triu(a)
with self.session() as sess:
placeholder_a = MakePlaceholder(a, dtype)
placeholder_ca = MakePlaceholder(clean_a, dtype)
placeholder_b = MakePlaceholder(b, dtype)
with self.test_scope():
x = linalg_ops.matrix_triangular_solve(
placeholder_a, placeholder_b, lower=lower, adjoint=adjoint)
verification = test_util.matmul_without_tf32(
placeholder_ca, x, adjoint_a=adjoint)
self._VerifyTriangularSolveBase(sess, placeholder_a, placeholder_ca,
placeholder_b, a, clean_a, b,
verification, atol)
def _VerifyTriangularSolveCombo(self, a, b, atol=1e-4, dtype=None):
transp = lambda x: np.swapaxes(x, -1, -2)
for lower, adjoint in itertools.product([True, False], repeat=2):
self._VerifyTriangularSolve(
a if lower else transp(a), b, lower, adjoint, atol, dtype=dtype)
def testBasic(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5))
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBfloat16(self):
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5))
b = rng.randn(5, 7)
self._VerifyTriangularSolveCombo(a, b, atol=5e-2, dtype=dtypes.bfloat16)
def testBasicNotActuallyTriangular(self):
rng = np.random.RandomState(0)
a = rng.randn(5, 5) # the `a` matrix is not lower-triangular
b = rng.randn(5, 7)
for dtype in self.float_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBasicComplexDtypes(self):
if xla_test.test.is_built_with_rocm():
# The following subtest invokes the call to "BlasTrsm"
# That operation is currently not supported on the ROCm platform
self.skipTest("BlasTrsm op for complex types is not supported in ROCm")
rng = np.random.RandomState(0)
a = np.tril(rng.randn(5, 5) + rng.randn(5, 5) * 1j)
b = rng.randn(5, 7) + rng.randn(5, 7) * 1j
for dtype in self.complex_types:
self._VerifyTriangularSolveCombo(a.astype(dtype), b.astype(dtype))
def testBatch(self):
rng = np.random.RandomState(0)
shapes = [((4, 3, 3), (4, 3, 5)), ((1, 2, 2), (1, 2, 1)),
((1, 1, 1), (1, 1, 2)), ((2, 3, 4, 4), (2, 3, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testBatchBroadcast(self):
rng = np.random.RandomState(0)
shapes = [((3, 3), (4, 3, 5)), ((1, 2, 2), (3, 2, 1)), ((1, 1), (1, 1, 2)),
((1, 3, 4, 4), (2, 1, 4, 1))]
tuples = itertools.product(self.float_types, shapes)
for dtype, (a_shape, b_shape) in tuples:
n = a_shape[-1]
a = np.tril(rng.rand(*a_shape) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(*b_shape)
self._VerifyTriangularSolveCombo(
a.astype(dtype), b.astype(dtype), atol=1e-3)
def testLarge(self):
n = 1024
rng = np.random.RandomState(0)
a = np.tril(rng.rand(n, n) - 0.5) / (2.0 * n) + np.eye(n)
b = rng.randn(n, n)
self._VerifyTriangularSolve(
a.astype(np.float32), b.astype(np.float32), True, False, 1e-4)
@test_util.disable_mlir_bridge("Error handling")
def testNonSquareCoefficientMatrix(self):
rng = np.random.RandomState(0)
for dtype in self.float_types:
a = rng.randn(3, 4).astype(dtype)
b = rng.randn(4, 4).astype(dtype)
with self.test_scope():
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
linalg_ops.matrix_triangular_solve(a, b)
@test_util.run_v2_only # Different error types
@test_util.disable_mlir_bridge("Error handling")
def testWrongDimensionsV2(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(errors.InvalidArgumentError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(errors.InvalidArgumentError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
@test_util.run_v1_only("Different error types")
@test_util.disable_mlir_bridge("Error handling")
def testWrongDimensionsV1(self):
randn = np.random.RandomState(0).randn
for dtype in self.float_types:
lhs = constant_op.constant(randn(3, 3), dtype=dtype)
rhs = constant_op.constant(randn(4, 3), dtype=dtype)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
with self.assertRaises(ValueError):
linalg_ops.matrix_triangular_solve(lhs, rhs)
if __name__ == "__main__":
test.main()
| MatrixTriangularSolveOpTest |
python | matplotlib__matplotlib | galleries/examples/units/basic_units.py | {
"start": 8170,
"end": 9979
} | class ____:
def addition_rule(self, units):
for unit_1, unit_2 in itertools.pairwise(units):
if unit_1 != unit_2:
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if len(non_null) > 1:
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if operation not in self.op_dict:
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
if x >= 0:
n = int((x / np.pi) * 2.0 + 0.25)
else:
n = int((x / np.pi) * 2.0 - 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n == -1:
return r'$-\pi/2$'
elif n == -2:
return r'$-\pi$'
elif n % 2 == 0:
return fr'${n//2}\pi$'
else:
return fr'${n}\pi/2$'
| UnitResolver |
python | django__django | tests/lookup/models.py | {
"start": 2035,
"end": 2174
} | class ____(models.Model):
name = models.CharField(max_length=100)
games = models.ManyToManyField(Game, related_name="players")
| Player |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py | {
"start": 2034,
"end": 3025
} | class ____(object):
def __init__(self, tpu_map):
self._tpu_map = tpu_map
def get(self, name):
return MockRequestClass(name, self._tpu_map)
def mock_request_compute_metadata(*args, **kwargs):
del kwargs # Unused.
if args[0] == 'project/project-id':
return 'test-project'
elif args[0] == 'instance/zone':
return 'projects/test-project/locations/us-central1-c'
elif args[0] == 'instance/network-interfaces/0/ip':
return '10.128.1.2'
return ''
def mock_is_running_in_gce():
return True
def mock_is_not_running_in_gce():
return False
def mock_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
mock_response = mock.MagicMock()
mock_response.info.return_value = {'Metadata-Flavor': 'Google'}
return mock_response
def mock_not_running_in_gce_urlopen(cls, *args, **kwargs):
del cls, args, kwargs # Unused.
raise URLError(reason='Host does not exist.')
@test_util.run_all_in_graph_and_eager_modes
| MockNodeClass |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/pandas_filesystem_csv.py | {
"start": 649,
"end": 1898
} | class ____(DataSourceTestConfig):
# see options: https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
read_options: dict[str, Any] = field(default_factory=dict)
# see options: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
write_options: dict[str, Any] = field(default_factory=dict)
@property
@override
def label(self) -> str:
return "pandas-filesystem-csv"
@property
@override
def pytest_mark(self) -> pytest.MarkDecorator:
return pytest.mark.filesystem
@override
def create_batch_setup(
self,
request: pytest.FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
assert not extra_data, "extra_data is not supported for this data source."
tmp_path = request.getfixturevalue("tmp_path")
assert isinstance(tmp_path, pathlib.Path)
return PandasFilesystemCsvBatchTestSetup(
data=data,
config=self,
base_dir=tmp_path,
context=context,
)
| PandasFilesystemCsvDatasourceTestConfig |
python | networkx__networkx | networkx/algorithms/planarity.py | {
"start": 5159,
"end": 5997
} | class ____:
"""Represents a set of return edges.
All return edges in an interval induce a same constraint on the contained
edges, which means that all edges must either have a left orientation or
all edges must have a right orientation.
"""
def __init__(self, low=None, high=None):
self.low = low
self.high = high
def empty(self):
"""Check if the interval is empty"""
return self.low is None and self.high is None
def copy(self):
"""Returns a copy of this interval"""
return Interval(self.low, self.high)
def conflicting(self, b, planarity_state):
"""Returns True if interval I conflicts with edge b"""
return (
not self.empty()
and planarity_state.lowpt[self.high] > planarity_state.lowpt[b]
)
| Interval |
python | kamyu104__LeetCode-Solutions | Python/sum-of-beautiful-subsequences.py | {
"start": 1171,
"end": 2093
} | class ____(object):
def totalBeauty(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def count(arr):
for i, x in enumerate(sorted(arr)): # coordinate compression
val_to_idx[x] = i
bit = BIT(len(arr))
for x in arr:
bit.add(val_to_idx[x], bit.query(val_to_idx[x]-1)+1)
return bit.query(len(arr)-1)
mx = max(nums)
val_to_idx = [0]*(mx+1)
lookup = [[] for _ in xrange(mx+1)]
for x in nums:
for d in FACTORS[x]:
lookup[d].append(x)
return reduce(lambda accu, x: (accu+x)%MOD, (PHI[g]*count(lookup[g]) for g in reversed(xrange(1, mx+1))), 0)
# Time: precompute: O(rlogr), r = max_nums
# runtime: O(mx * log(mx) + nlogr * (log(nlogr) + logn)), mx = max(nums)
# Space: O(rlogr)
# number theory, bit, fenwick tree
| Solution |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_index.py | {
"start": 3645,
"end": 7790
} | class ____(unittest.TestCase):
def test_ivfflat_cpu_coarse(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nq = 100
nlist = 10
nprobe = 3
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFFlat(q, d, nlist)
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
idx_cpu.train(xb)
idx_cpu.add(xb)
# construct a GPU index using the same trained coarse quantizer
# from the CPU index
config = faiss.GpuIndexIVFFlatConfig()
idx_gpu = faiss.GpuIndexIVFFlat(res, q, d, nlist, faiss.METRIC_L2, config)
assert(idx_gpu.is_trained)
idx_gpu.add(xb)
k = 20
idx_cpu.nprobe = nprobe
idx_gpu.nprobe = nprobe
d_g, i_g = idx_gpu.search(xq, k)
d_c, i_c = idx_cpu.search(xq, k)
self.assertGreaterEqual((i_g == i_c).sum(), i_g.size * 0.9)
self.assertTrue(np.allclose(d_g, d_c, rtol=5e-5, atol=5e-5))
def test_ivfsq_pu_coarse(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nq = 100
nlist = 10
nprobe = 3
use_residual = True
qtype = faiss.ScalarQuantizer.QT_8bit
q = faiss.IndexFlatL2(d)
idx_cpu = faiss.IndexIVFScalarQuantizer(
q, d, nlist, qtype, faiss.METRIC_L2, use_residual)
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xq = rs.rand(nq, d).astype('float32')
idx_cpu.train(xb)
idx_cpu.add(xb)
# construct a GPU index using the same trained coarse quantizer
# from the CPU index
idx_gpu = faiss.GpuIndexIVFScalarQuantizer(
res, q, d, nlist, qtype, faiss.METRIC_L2, use_residual)
assert(not idx_gpu.is_trained)
idx_gpu.train(xb)
idx_gpu.add(xb)
k = 20
idx_cpu.nprobe = nprobe
idx_gpu.nprobe = nprobe
d_g, i_g = idx_gpu.search(xq, k)
d_c, i_c = idx_cpu.search(xq, k)
self.assertGreaterEqual(knn_intersection_measure(i_c, i_g), 0.9)
self.assertTrue(np.allclose(d_g, d_c, rtol=2e-4, atol=2e-4))
def test_ivfpq_cpu_coarse(self):
res = faiss.StandardGpuResources()
d = 32
nb = 50000
nq = 20
nlist_lvl_1 = 10
nlist_lvl_2 = 1000
nprobe_lvl_1 = 3
nprobe_lvl_2 = 10
rs = np.random.RandomState(567)
coarse_centroids = rs.rand(nlist_lvl_2, d).astype('float32')
# Construct an IVFFlat index for usage as a coarse quantizer
idx_coarse_cpu = faiss.IndexIVFFlat(
faiss.IndexFlatL2(d), d, nlist_lvl_1)
idx_coarse_cpu.set_direct_map_type(faiss.DirectMap.Hashtable)
idx_coarse_cpu.nprobe = nprobe_lvl_1
idx_coarse_cpu.train(coarse_centroids)
idx_coarse_cpu.add(coarse_centroids)
idx_coarse_cpu.make_direct_map()
assert(idx_coarse_cpu.ntotal == nlist_lvl_2)
idx_cpu = faiss.IndexIVFPQ(
idx_coarse_cpu, d, nlist_lvl_2, 4, 8)
xb = rs.rand(nb, d).astype('float32')
idx_cpu.train(xb)
idx_cpu.add(xb)
idx_cpu.nprobe = nprobe_lvl_2
# construct a GPU index using the same trained coarse quantizer
# from the CPU index
config = faiss.GpuIndexIVFPQConfig()
config.use_cuvs = False
idx_gpu = faiss.GpuIndexIVFPQ(
res, idx_coarse_cpu, d, nlist_lvl_2, 4, 8, faiss.METRIC_L2, config)
assert(not idx_gpu.is_trained)
idx_gpu.train(xb)
idx_gpu.add(xb)
idx_gpu.nprobe = nprobe_lvl_2
k = 10
# precomputed codes also utilize the coarse quantizer
for use_precomputed in [False, True]:
idx_gpu.setPrecomputedCodes(use_precomputed)
xq = rs.rand(nq, d).astype('float32')
d_g, i_g = idx_gpu.search(xq, k)
d_c, i_c = idx_cpu.search(xq, k)
self.assertGreaterEqual(knn_intersection_measure(i_c, i_g), 0.9)
| TestIVFPluggableCoarseQuantizer |
python | kamyu104__LeetCode-Solutions | Python/maximum-product-of-three-numbers.py | {
"start": 29,
"end": 741
} | class ____(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
min1, min2 = float("inf"), float("inf")
max1, max2, max3 = float("-inf"), float("-inf"), float("-inf")
for n in nums:
if n <= min1:
min2 = min1
min1 = n
elif n <= min2:
min2 = n
if n >= max1:
max3 = max2
max2 = max1
max1 = n
elif n >= max2:
max3 = max2
max2 = n
elif n >= max3:
max3 = n
return max(min1 * min2 * max1, max1 * max2 * max3)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 197194,
"end": 197556
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("assignable", "client_mutation_id")
assignable = sgqlc.types.Field("Assignable", graphql_name="assignable")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddAssigneesToAssignablePayload |
python | pytorch__pytorch | torch/distributed/fsdp/_trace_utils.py | {
"start": 2296,
"end": 3814
} | class ____:
"""
This represents the execution order information from the forward pass.
Attributes:
curr_module (nn.Module): Current module being traced.
module_forward_order (List[nn.Module]): The modules in (pre-)forward
order, i.e. the order in which their ``forward()`` methods are
called. Each call to a module's ``forward()`` corresponds to one
element in the list.
module_to_param_usage_infos (Dict[nn.Module, List[_ParamUsageInfo]]):
Maps a module to a list of module execution infos. See
:class:`_ParamUsageInfo` for details.
param_forward_order (List[nn.Parameter]): The parameters in forward
execution order, where only a parameter's first participation is
included.
visited_params (Set[nn.Parameter]): The parameters visited so far
during the trace. This is only used during tracing for fast
membership check. Invariant: The parameters in
``param_forward_order`` are exactly those in ``visited_params``.
"""
def __init__(self, root_module: nn.Module) -> None:
self.curr_module: nn.Module = root_module
self.module_forward_order: list[nn.Module] = [root_module]
self.module_to_param_usage_infos: dict[nn.Module, list[_ParamUsageInfo]] = {
root_module: []
}
self.param_forward_order: list[nn.Parameter] = []
self.visited_params: set[nn.Parameter] = set()
| _ExecutionInfo |
python | apache__thrift | lib/py/src/transport/TTwisted.py | {
"start": 1186,
"end": 1560
} | class ____(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = BytesIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
return self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
| TMessageSenderTransport |
python | huggingface__transformers | tests/models/internvl/test_modeling_internvl.py | {
"start": 1547,
"end": 6236
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
image_seq_length=64,
vision_feature_layer=-1,
ignore_index=-100,
image_token_id=1,
num_channels=3,
image_size=64,
model_type="internvl",
is_training=True,
text_config={
"model_type": "qwen2",
"vocab_size": 99,
"hidden_size": 128,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_theta": 10000,
"mlp_ratio": 4,
"tie_word_embeddings": True,
"bos_token_id": 3,
"eos_token_id": 4,
"pad_token_id": 5,
},
vision_config={
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 128,
"image_size": 64,
"patch_size": 4,
"num_channels": 3,
"hidden_act": "quick_gelu",
"use_absolute_position_embeddings": True,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = image_token_id
self.model_type = model_type
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.vision_feature_layer = vision_feature_layer
self.is_training = is_training
self.image_seq_length = image_seq_length
self.num_channels = num_channels
self.image_size = image_size
self.seq_length = seq_length + image_seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
def get_config(self):
return InternVLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
model_type=self.model_type,
image_token_id=self.image_token_id,
image_seq_length=self.image_seq_length,
vision_feature_layer=self.vision_feature_layer,
)
def prepare_config_and_inputs(self):
config = self.get_config()
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, : self.image_seq_length] = self.image_token_id
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
def create_and_check_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask):
model = InternVLForConditionalGeneration(config=config)
model.to(torch_device)
model.half()
model.eval()
logits = model(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values.to(torch.bfloat16),
return_dict=True,
)["logits"]
self.parent.assertFalse(torch.isnan(logits).any().item())
def create_and_check_model_fp16_autocast_forward(self, config, input_ids, pixel_values, attention_mask):
config.dtype = torch.float16
model = InternVLForConditionalGeneration(config=config)
model.to(torch_device)
model.eval()
with torch.autocast(device_type=torch_device, dtype=torch.float16):
logits = model(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values.to(torch.bfloat16),
return_dict=True,
)["logits"]
self.parent.assertFalse(torch.isnan(logits).any().item())
@require_torch
| InternVLVisionText2TextModelTester |
python | getsentry__sentry | src/sentry/models/files/fileblobindex.py | {
"start": 276,
"end": 637
} | class ____(AbstractFileBlobIndex):
__relocation_scope__ = RelocationScope.Excluded
file = FlexibleForeignKey("sentry.File")
blob = FlexibleForeignKey("sentry.FileBlob", on_delete=models.PROTECT)
class Meta:
app_label = "sentry"
db_table = "sentry_fileblobindex"
unique_together = (("file", "blob", "offset"),)
| FileBlobIndex |
python | instagram__MonkeyType | tests/test_typing.py | {
"start": 20175,
"end": 24855
} | class ____:
@pytest.mark.parametrize(
'value, expected_type',
[
(1, int),
('foo', str),
(Dummy, Type[Dummy]),
(1.1, float),
((), typing_Tuple[()]),
(('a', 1, True), typing_Tuple[str, int, bool]),
(set(), Set[Any]),
({'a', 'b', 'c'}, Set[str]),
({'a', 1}, Set[Union[str, int]]),
([], List[Any]),
([1, 2, 3], List[int]),
([1, True], List[Union[int, bool]]),
(tuple(), typing_Tuple[()]),
(helper, Callable),
(lambda x: x, Callable),
(Dummy().an_instance_method, Callable),
(len, Callable),
(generator(), Iterator[Any]),
],
)
def test_builtin_types(self, value, expected_type):
"""Return the appropriate type for builtins"""
assert get_type(value, max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE) == expected_type
assert get_type(value, max_typed_dict_size=0) == expected_type
@pytest.mark.parametrize(
'value, expected_when_max_size_is_zero, expected_when_max_size_is_none',
[
({}, Dict[Any, Any], Dict[Any, Any]),
({'a': 1, 'b': 2}, Dict[str, int],
make_typed_dict(required_fields={'a': int, 'b': int})),
({'a': 1, 2: 'b'}, Dict[Union[str, int], Union[str, int]], Dict[Union[str, int], Union[str, int]]),
(get_default_dict(key=1, value=1), DefaultDict[int, int], DefaultDict[int, int]),
(get_nested_default_dict(key=1, value=1.0),
DefaultDict[int, DefaultDict[int, float]],
DefaultDict[int, DefaultDict[int, float]]),
({
'foo': {
'a': 1,
'b': "hello"
}
},
Dict[str, Dict[str, Union[str, int]]],
make_typed_dict(required_fields={
'foo': make_typed_dict(required_fields={
'a': int,
'b': str
}),
})),
],
)
def test_dict_type(self, value, expected_when_max_size_is_zero, expected_when_max_size_is_none):
"""Return the appropriate type for dictionaries."""
assert get_type(value, max_typed_dict_size=0) == expected_when_max_size_is_zero
assert get_type(value, max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE) == expected_when_max_size_is_none
@pytest.mark.parametrize(
'value, expected_when_max_size_is_zero, expected_when_max_size_is_none',
[
(get_default_dict_with_dict(key=1, value=3),
DefaultDict[int, Dict[str, int]],
DefaultDict[int, make_typed_dict(required_fields={'a': int, 'b': int})]),
([{'a': 1, 'b': 2}], List[Dict[str, int]], List[make_typed_dict(required_fields={'a': int, 'b': int})]),
([{'a': 1, 'b': 2}, {'a': 1}], List[Dict[str, int]],
List[make_typed_dict(required_fields={'a': int}, optional_fields={'b': int})]),
(({'a': 1, 'b': 2},),
typing_Tuple[Dict[str, int]],
typing_Tuple[make_typed_dict(required_fields={'a': int, 'b': int})]),
],
)
def test_dict_nested_within_generic(self, value, expected_when_max_size_is_zero, expected_when_max_size_is_none):
"""Return the appropriate type for dictionaries."""
actual_when_zero = get_type(value, max_typed_dict_size=0)
actual_when_none = get_type(value, max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE)
assert (types_equal(actual_when_zero, expected_when_max_size_is_zero))
assert (types_equal(actual_when_none, expected_when_max_size_is_none))
@pytest.mark.parametrize(
'value, max_typed_dict_size, expected',
[
({'a': 1, 'b': 2}, 1, Dict[str, int]),
({
'foo': {
'a': 1,
'b': "hello"
}
}, 1,
make_typed_dict(required_fields={'foo': Dict[str, Union[str, int]]})),
]
)
def test_dict_type_with_other_max_sizes(self, value, max_typed_dict_size, expected):
assert get_type(value, max_typed_dict_size) == expected
def test_instance_type(self):
"""Return appropriate type for an instance of a user defined class"""
assert get_type(Dummy(), max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE) == Dummy
def test_class_type(self):
"""Return the correct type for classes"""
assert get_type(Dummy, max_typed_dict_size=VERY_LARGE_MAX_TYPED_DICT_SIZE) == Type[Dummy]
| TestGetType |
python | spyder-ide__spyder | spyder/plugins/appearance/widgets.py | {
"start": 570,
"end": 9150
} | class ____(QDialog):
"""A color scheme editor dialog."""
def __init__(self, parent=None, stack=None):
super().__init__(parent)
self.parent = parent
self.stack = stack
self.order = [] # Uses scheme names
# Needed for self.get_edited_color_scheme()
self.widgets = {}
self.scheme_name_textbox = {}
self.last_edited_color_scheme = None
self.last_used_scheme = None
self.original_scheme = None
# Widgets
bbox = SpyderDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
# Layout
layout = QVBoxLayout()
layout.addWidget(self.stack)
layout.addWidget(bbox)
self.setLayout(layout)
# Signals
bbox.accepted.connect(self.validate_colors)
bbox.accepted.connect(self.get_edited_color_scheme)
bbox.rejected.connect(self.reject)
# Helpers
# -------------------------------------------------------------------------
def set_scheme(self, scheme_name):
"""Set the current stack by 'scheme_name'."""
self.stack.setCurrentIndex(self.order.index(scheme_name))
self.last_used_scheme = scheme_name
self.original_scheme = self.get_edited_color_scheme()
def get_scheme_name(self):
"""
Returns the edited scheme name, needed to update the combobox on
scheme creation.
"""
return self.scheme_name_textbox[self.last_used_scheme].text()
def validate_colors(self):
"""
Validate the current color scheme and display a message box listing
any invalid colors.
"""
invalid_colors = {}
scheme_name = self.last_used_scheme
pattern = (
r"^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}|[A-Fa-f0-9]{8}|[A-Fa-f0-9]{4})$"
)
for key in self.widgets[scheme_name]:
items = self.widgets[scheme_name][key]
if not bool(re.match(pattern, items[0].text())):
invalid_colors[key] = items[0].text()
if invalid_colors:
message = _(
"The following properties have invalid colors:<br><br>"
)
for property_name, color in invalid_colors.items():
name = syntaxhighlighters.COLOR_SCHEME_KEYS[property_name]
clean_name = name[:-1].replace("<br>", "")
message += _(
'The property <b>{}</b> has an invalid color: {}<br>'
).format(clean_name, color)
msgbox = QMessageBox(
QMessageBox.Warning,
_('Error setting colors'),
message,
QMessageBox.Ok,
self
)
msgbox.setTextFormat(Qt.RichText)
msgbox.exec_()
else:
self.accept()
def get_edited_color_scheme(self):
"""
Get the values of the last edited color scheme to be used in an instant
preview in the preview editor, without using `apply`.
"""
color_scheme = {}
scheme_name = self.last_used_scheme
for key in self.widgets[scheme_name]:
items = self.widgets[scheme_name][key]
if len(items) == 1:
# ColorLayout
value = items[0].text()
else:
# ColorLayout + checkboxes
value = (items[0].text(), items[1].isChecked(),
items[2].isChecked())
color_scheme[key] = value
return color_scheme
# Actions
# -------------------------------------------------------------------------
def add_color_scheme_stack(self, scheme_name, custom=False):
"""Add a stack for a given scheme and connects the CONF values."""
color_scheme_groups = [
(_('Text'), ["normal", "comment", "string", "number", "keyword",
"builtin", "definition", "instance", ]),
(_('Highlight'), ["currentcell", "currentline", "occurrence",
"matched_p", "unmatched_p", "ctrlclick"]),
(_('Background'), ["background", "sideareas"])
]
parent = self.parent
self.line_edit = parent.create_lineedit(
_("Theme name:"), '{0}/name'.format(scheme_name)
)
self.widgets[scheme_name] = {}
# Widget setup
self.line_edit.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.setWindowTitle(_('Syntax highlighting theme editor'))
# Layout
name_layout = QHBoxLayout()
name_layout.addWidget(self.line_edit.label)
name_layout.addWidget(self.line_edit.textbox)
self.scheme_name_textbox[scheme_name] = self.line_edit.textbox
if not custom:
self.line_edit.textbox.setDisabled(True)
if not self.isVisible():
self.line_edit.setVisible(False)
cs_layout = QVBoxLayout()
cs_layout.addLayout(name_layout)
h_layout = QHBoxLayout()
v_layout = QVBoxLayout()
for index, item in enumerate(color_scheme_groups):
group_name, keys = item
group_layout = QGridLayout()
for row, key in enumerate(keys):
option = "{0}/{1}".format(scheme_name, key)
value = self.parent.get_option(option)
name = syntaxhighlighters.COLOR_SCHEME_KEYS[key]
if isinstance(value, str):
label, clayout = parent.create_coloredit(
name,
option,
without_layout=True,
)
label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
group_layout.addWidget(label, row+1, 0)
group_layout.addLayout(clayout, row+1, 1)
# Needed to update temp scheme to obtain instant preview
self.widgets[scheme_name][key] = [clayout]
else:
label, clayout, cb_bold, cb_italic = parent.create_scedit(
name,
option,
without_layout=True,
)
label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
group_layout.addWidget(label, row+1, 0)
group_layout.addLayout(clayout, row+1, 1)
group_layout.addWidget(cb_bold, row+1, 2)
group_layout.addWidget(cb_italic, row+1, 3)
# Needed to update temp scheme to obtain instant preview
self.widgets[scheme_name][key] = [clayout, cb_bold,
cb_italic]
group_box = QGroupBox(group_name)
group_box.setLayout(group_layout)
if index == 0:
h_layout.addWidget(group_box)
else:
v_layout.addWidget(group_box)
h_layout.addLayout(v_layout)
cs_layout.addLayout(h_layout)
stackitem = QWidget()
stackitem.setLayout(cs_layout)
self.stack.addWidget(stackitem)
self.order.append(scheme_name)
def delete_color_scheme_stack(self, scheme_name):
"""Remove stack widget by 'scheme_name'."""
self.set_scheme(scheme_name)
widget = self.stack.currentWidget()
self.stack.removeWidget(widget)
index = self.order.index(scheme_name)
self.order.pop(index)
def restore_original_scheme(self, scheme_name):
"Restores the original values of the scheme being edited."
parent = self.parent
self.line_edit.textbox.setText(
str(parent.get_option('{0}/name'.format(scheme_name)))
)
for key, value in self.original_scheme.items():
if isinstance(value, tuple):
color = QColor()
color.setNamedColor(value[0])
self.widgets[scheme_name][key][0].update_text(color)
self.widgets[scheme_name][key][1].setChecked(value[1])
self.widgets[scheme_name][key][2].setChecked(value[2])
else:
color = QColor()
color.setNamedColor(value)
self.widgets[scheme_name][key][0].update_text(color)
def reject(self):
"""Executes when Cancel is pressed: Restores the edited scheme."""
self.restore_original_scheme(self.last_used_scheme)
super().reject()
| SchemeEditor |
python | apache__airflow | providers/apache/kafka/tests/unit/apache/kafka/triggers/test_await_message.py | {
"start": 1365,
"end": 1490
} | class ____:
def __init__(*args, **kwargs):
pass
def error(*args, **kwargs):
return False
| MockedMessage |
python | agronholm__apscheduler | src/apscheduler/datastores/mongodb.py | {
"start": 2949,
"end": 3710
} | class ____(Generic[T]):
cursor: Cursor[T]
def __aiter__(self) -> AsyncIterator[T]:
return self
async def __aenter__(self) -> Self:
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await to_thread.run_sync(self.cursor.close)
def __next__(self) -> T:
try:
return next(self.cursor)
except StopIteration:
raise StopAsyncIteration from None
async def __anext__(self) -> T:
return await to_thread.run_sync(next, self)
@classmethod
async def create(cls, func: Callable[..., Cursor[T]]) -> AsyncCursor[T]:
cursor = await to_thread.run_sync(func)
return AsyncCursor(cursor)
@attrs.define(eq=False, repr=False)
| AsyncCursor |
python | has2k1__plotnine | plotnine/coords/coord_trans.py | {
"start": 630,
"end": 4750
} | class ____(coord):
"""
Transformed cartesian coordinate system
Parameters
----------
x : str | trans
Name of transform or `trans` class to transform the x axis
y : str | trans
Name of transform or `trans` class to transform the y axis
xlim : tuple[float, float]
Limits for x axis. If None, then they are automatically computed.
ylim : tuple[float, float]
Limits for y axis. If None, then they are automatically computed.
expand : bool
If `True`, expand the coordinate axes by some factor. If `False`,
use the limits from the data.
"""
trans_x: trans
trans_y: trans
def __init__(
self,
x: str | trans = "identity",
y: str | trans = "identity",
xlim: Optional[tuple[float, float]] = None,
ylim: Optional[tuple[float, float]] = None,
expand: bool = True,
):
from mizani.transforms import gettrans
self.trans_x = gettrans(x)
self.trans_y = gettrans(y)
self.limits = NS(x=xlim, y=ylim)
self.expand = expand
def transform(
self, data: pd.DataFrame, panel_params: panel_view, munch: bool = False
) -> pd.DataFrame:
from mizani.bounds import squish_infinite
if not self.is_linear and munch:
data = self.munch(data, panel_params)
def trans_x(col: FloatSeries) -> FloatSeries:
result = transform_value(self.trans_x, col)
if any(result.isna()):
warn(
"Coordinate transform of x aesthetic "
"created one or more NaN values.",
PlotnineWarning,
)
return result
def trans_y(col: FloatSeries) -> FloatSeries:
result = transform_value(self.trans_y, col)
if any(result.isna()):
warn(
"Coordinate transform of y aesthetic "
"created one or more NaN values.",
PlotnineWarning,
)
return result
data = transform_position(data, trans_x, trans_y)
return transform_position(data, squish_infinite, squish_infinite)
def backtransform_range(self, panel_params: panel_view) -> panel_ranges:
return panel_ranges(
x=self.trans_x.inverse(panel_params.x.range),
y=self.trans_y.inverse(panel_params.y.range),
)
def setup_panel_params(self, scale_x: scale, scale_y: scale) -> panel_view:
"""
Compute the range and break information for the panel
"""
def get_scale_view(
scale: scale, limits: tuple[float, float], trans: trans
) -> scale_view:
coord_limits = trans.transform(limits) if limits else limits
expansion = scale.default_expansion(expand=self.expand)
ranges = scale.expand_limits(
scale.final_limits, expansion, coord_limits, trans
)
sv = scale.view(
limits=coord_limits,
range=ranges.range,
)
sv.range = tuple(sorted(ranges.range_coord)) # type: ignore
breaks = cast("tuple[float, float]", sv.breaks)
sv.breaks = transform_value(trans, breaks)
sv.minor_breaks = transform_value(trans, sv.minor_breaks)
return sv
out = panel_view(
x=get_scale_view(scale_x, self.limits.x, self.trans_x),
y=get_scale_view(scale_y, self.limits.y, self.trans_y),
)
return out
def distance(
self,
x: FloatSeries,
y: FloatSeries,
panel_params: panel_view,
) -> FloatArray:
max_dist = dist_euclidean(panel_params.x.range, panel_params.y.range)[
0
]
xt = self.trans_x.transform(x)
yt = self.trans_y.transform(y)
return dist_euclidean(xt, yt) / max_dist
def transform_value(trans: trans, value: TFloatArrayLike) -> TFloatArrayLike:
"""
Transform value
"""
return trans.transform(value)
| coord_trans |
python | pytorch__pytorch | torch/_inductor/remote_cache.py | {
"start": 11046,
"end": 11102
} | class ____(RedisRemoteCache):
pass
| RemoteAutotuneCache |
python | ray-project__ray | doc/source/ray-core/doc_code/pattern_async_actor.py | {
"start": 1079,
"end": 2049
} | class ____:
def __init__(self, task_store):
self.task_store = task_store
self.num_executed_tasks = 0
async def run(self):
while True:
# Here we use await instead of ray.get() to
# wait for the next task and it will yield
# the control while waiting.
task = await self.task_store.get_next_task.remote()
self._execute_task(task)
def _execute_task(self, task):
# Executing the task
self.num_executed_tasks = self.num_executed_tasks + 1
def get_num_executed_tasks(self):
return self.num_executed_tasks
async_task_executor = AsyncTaskExecutor.remote(task_store)
async_task_executor.run.remote()
# We are able to run get_num_executed_tasks while run method is running.
num_executed_tasks = ray.get(async_task_executor.get_num_executed_tasks.remote())
print(f"num of executed tasks so far: {num_executed_tasks}")
# __async_actor_end__
| AsyncTaskExecutor |
python | jazzband__django-waffle | waffle/tests/test_management.py | {
"start": 7643,
"end": 9177
} | class ____(TestCase):
def test_create(self):
""" The command should create a new sample. """
name = 'test'
percent = 20
call_command('waffle_sample', name, str(percent), create=True)
sample = get_waffle_sample_model().objects.get(name=name)
self.assertEqual(sample.percent, percent)
def test_not_create(self):
""" The command shouldn't create a new sample if the create flag is
not set.
"""
name = 'test'
with self.assertRaisesRegex(CommandError, 'This sample does not exist'):
call_command('waffle_sample', name, '20')
self.assertFalse(get_waffle_sample_model().objects.filter(name=name).exists())
def test_update(self):
""" The command should update an existing sample. """
name = 'test'
sample = get_waffle_sample_model().objects.create(name=name, percent=0)
self.assertEqual(sample.percent, 0)
percent = 50
call_command('waffle_sample', name, str(percent))
sample.refresh_from_db()
self.assertEqual(sample.percent, percent)
def test_list(self):
""" The command should list all samples."""
stdout = io.StringIO()
get_waffle_sample_model().objects.create(name='test', percent=34)
call_command('waffle_sample', list_samples=True, stdout=stdout)
expected = 'Samples:\ntest: 34.0%'
actual = stdout.getvalue().strip()
self.assertEqual(actual, expected)
| WaffleSampleManagementCommandTests |
python | boto__boto3 | tests/unit/dynamodb/test_transform.py | {
"start": 12722,
"end": 13619
} | class ____(BaseTransformAttributeValueTest):
def test_handler(self):
input_params = {
'Structure': {
'TransformMe': self.python_value,
'LeaveAlone': 'unchanged',
}
}
input_shape = {
'Structure': {
'type': 'structure',
'members': {
'TransformMe': {'shape': self.target_shape},
'LeaveAlone': {'shape': 'String'},
},
}
}
self.add_input_shape(input_shape)
self.injector.inject_attribute_value_input(
params=input_params, model=self.operation_model
)
assert input_params == {
'Structure': {
'TransformMe': self.dynamodb_value,
'LeaveAlone': 'unchanged',
}
}
| TestTransformAttributeValueInput |
python | chardet__chardet | chardet/enums.py | {
"start": 873,
"end": 1028
} | class ____:
"""
This enum represents the different states a state machine can be in.
"""
START = 0
ERROR = 1
ITS_ME = 2
| MachineState |
python | redis__redis-py | redis/commands/core.py | {
"start": 245527,
"end": 249848
} | class ____:
"""
Redis Function commands
"""
def function_load(
self, code: str, replace: Optional[bool] = False
) -> Union[Awaitable[str], str]:
"""
Load a library to Redis.
:param code: the source code (must start with
Shebang statement that provides a metadata about the library)
:param replace: changes the behavior to overwrite the existing library
with the new contents.
Return the library name that was loaded.
For more information, see https://redis.io/commands/function-load
"""
pieces = ["REPLACE"] if replace else []
pieces.append(code)
return self.execute_command("FUNCTION LOAD", *pieces)
def function_delete(self, library: str) -> Union[Awaitable[str], str]:
"""
Delete the library called ``library`` and all its functions.
For more information, see https://redis.io/commands/function-delete
"""
return self.execute_command("FUNCTION DELETE", library)
def function_flush(self, mode: str = "SYNC") -> Union[Awaitable[str], str]:
"""
Deletes all the libraries.
For more information, see https://redis.io/commands/function-flush
"""
return self.execute_command("FUNCTION FLUSH", mode)
def function_list(
self, library: Optional[str] = "*", withcode: Optional[bool] = False
) -> Union[Awaitable[List], List]:
"""
Return information about the functions and libraries.
Args:
library: specify a pattern for matching library names
withcode: cause the server to include the libraries source implementation
in the reply
"""
args = ["LIBRARYNAME", library]
if withcode:
args.append("WITHCODE")
return self.execute_command("FUNCTION LIST", *args)
def _fcall(
self, command: str, function, numkeys: int, *keys_and_args: Any
) -> Union[Awaitable[str], str]:
return self.execute_command(command, function, numkeys, *keys_and_args)
def fcall(
self, function, numkeys: int, *keys_and_args: Any
) -> Union[Awaitable[str], str]:
"""
Invoke a function.
For more information, see https://redis.io/commands/fcall
"""
return self._fcall("FCALL", function, numkeys, *keys_and_args)
def fcall_ro(
self, function, numkeys: int, *keys_and_args: Any
) -> Union[Awaitable[str], str]:
"""
This is a read-only variant of the FCALL command that cannot
execute commands that modify data.
For more information, see https://redis.io/commands/fcall_ro
"""
return self._fcall("FCALL_RO", function, numkeys, *keys_and_args)
def function_dump(self) -> Union[Awaitable[str], str]:
"""
Return the serialized payload of loaded libraries.
For more information, see https://redis.io/commands/function-dump
"""
from redis.client import NEVER_DECODE
options = {}
options[NEVER_DECODE] = []
return self.execute_command("FUNCTION DUMP", **options)
def function_restore(
self, payload: str, policy: Optional[str] = "APPEND"
) -> Union[Awaitable[str], str]:
"""
Restore libraries from the serialized ``payload``.
You can use the optional policy argument to provide a policy
for handling existing libraries.
For more information, see https://redis.io/commands/function-restore
"""
return self.execute_command("FUNCTION RESTORE", payload, policy)
def function_kill(self) -> Union[Awaitable[str], str]:
"""
Kill a function that is currently executing.
For more information, see https://redis.io/commands/function-kill
"""
return self.execute_command("FUNCTION KILL")
def function_stats(self) -> Union[Awaitable[List], List]:
"""
Return information about the function that's currently running
and information about the available execution engines.
For more information, see https://redis.io/commands/function-stats
"""
return self.execute_command("FUNCTION STATS")
AsyncFunctionCommands = FunctionCommands
| FunctionCommands |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1210127,
"end": 1212577
} | class ____(sgqlc.types.Type, Node):
"""A repository's open source license"""
__schema__ = github_schema
__field_names__ = (
"body",
"conditions",
"description",
"featured",
"hidden",
"implementation",
"key",
"limitations",
"name",
"nickname",
"permissions",
"pseudo_license",
"spdx_id",
"url",
)
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
"""The full text of the license"""
conditions = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)), graphql_name="conditions")
"""The conditions set by the license"""
description = sgqlc.types.Field(String, graphql_name="description")
"""A human-readable description of the license"""
featured = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="featured")
"""Whether the license should be featured"""
hidden = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hidden")
"""Whether the license should be displayed in license pickers"""
implementation = sgqlc.types.Field(String, graphql_name="implementation")
"""Instructions on how to implement the license"""
key = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="key")
"""The lowercased SPDX ID of the license"""
limitations = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)), graphql_name="limitations")
"""The limitations set by the license"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The license full name specified by <https://spdx.org/licenses>"""
nickname = sgqlc.types.Field(String, graphql_name="nickname")
"""Customary short name if applicable (e.g, GPLv3)"""
permissions = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)), graphql_name="permissions")
"""The permissions set by the license"""
pseudo_license = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="pseudoLicense")
"""Whether the license is a pseudo-license placeholder (e.g., other,
no-license)
"""
spdx_id = sgqlc.types.Field(String, graphql_name="spdxId")
"""Short identifier specified by <https://spdx.org/licenses>"""
url = sgqlc.types.Field(URI, graphql_name="url")
"""URL to the license on <https://choosealicense.com>"""
| License |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 13324,
"end": 13444
} | class ____(OpcodeWithArg): # Stores local variable number
_FLAGS = HAS_LOCAL | HAS_ARGUMENT
__slots__ = ()
| STORE_FAST |
python | pytorch__pytorch | torch/nn/modules/linear.py | {
"start": 8881,
"end": 12326
} | class ____(LazyModuleMixin, Linear):
r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
class. They will be initialized after the first call to ``forward`` is done and the
module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
of the :class:`Linear` is inferred from the ``input.shape[-1]``.
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
on lazy modules and their limitations.
Args:
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
"""
cls_to_become = Linear # type: ignore[assignment]
# pyrefly: ignore [bad-override]
weight: UninitializedParameter
bias: UninitializedParameter # type: ignore[assignment]
def __init__(
self, out_features: int, bias: bool = True, device=None, dtype=None
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
# bias is hardcoded to False to avoid creating tensor
# that will soon be overwritten.
# pyrefly: ignore [bad-argument-type]
super().__init__(0, 0, False)
# pyrefly: ignore [bad-argument-type]
self.weight = UninitializedParameter(**factory_kwargs)
self.out_features = out_features
if bias:
# pyrefly: ignore [bad-argument-type]
self.bias = UninitializedParameter(**factory_kwargs)
def reset_parameters(self) -> None:
"""
Resets parameters based on their initialization used in ``__init__``.
"""
# pyrefly: ignore [bad-argument-type]
if not self.has_uninitialized_params() and self.in_features != 0:
super().reset_parameters()
def initialize_parameters(self, input) -> None: # type: ignore[override]
"""
Infers ``in_features`` based on ``input`` and initializes parameters.
"""
# pyrefly: ignore [bad-argument-type]
if self.has_uninitialized_params():
with torch.no_grad():
self.in_features = input.shape[-1]
self.weight.materialize((self.out_features, self.in_features))
if self.bias is not None:
self.bias.materialize((self.out_features,))
self.reset_parameters()
if self.in_features == 0:
assert input.shape[-1] == self.weight.shape[-1], (
f"The in_features inferred from input: {input.shape[-1]} "
f"is not equal to in_features from self.weight: "
f"{self.weight.shape[-1]}"
)
self.in_features = input.shape[-1]
# TODO: PartialLinear - maybe in sparse?
| LazyLinear |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 8253,
"end": 9462
} | class ____:
"""mixin to easily allow tests of both sparray and spmatrix"""
bsr_container = bsr_matrix
coo_container = coo_matrix
csc_container = csc_matrix
csr_container = csr_matrix
dia_container = dia_matrix
dok_container = dok_matrix
lil_container = lil_matrix
asdense = staticmethod(asmatrix)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).toarray(), self.dat[[1], :])
assert_array_equal(self.datsp.getrow(-1).toarray(), self.dat[[-1], :])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).toarray(), self.dat[:, [1]])
assert_array_equal(self.datsp.getcol(-1).toarray(), self.dat[:, [-1]])
def test_asfptype(self):
A = self.spcreator(arange(6,dtype='int32').reshape(2,3))
assert_equal(A.asfptype().dtype, np.dtype('float64'))
assert_equal(A.asfptype().format, A.format)
assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32'))
assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128'))
B = A.asfptype()
C = B.asfptype()
assert_(B is C)
# TODO test prune
# TODO test has_sorted_indices
| _MatrixMixin |
python | matplotlib__matplotlib | lib/matplotlib/tri/_triinterpolate.py | {
"start": 47876,
"end": 50284
} | class ____(_DOF_estimator_geom):
"""
The 'smoothest' approximation, df is computed through global minimization
of the bending energy:
E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
"""
def __init__(self, Interpolator):
self._eccs = Interpolator._eccs
super().__init__(Interpolator)
def compute_dz(self):
"""
Elliptic solver for bending energy minimization.
Uses a dedicated 'toy' sparse Jacobi PCG solver.
"""
# Initial guess for iterative PCG solver.
dz_init = super().compute_dz()
Uf0 = np.ravel(dz_init)
reference_element = _ReducedHCT_Element()
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
eccs = self._eccs
triangles = self._triangles
Uc = self.z[self._triangles]
# Building stiffness matrix and force vector in COO format
Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
J, eccs, triangles, Uc)
# Building sparse matrix and solving minimization problem
# We could use scipy.sparse direct solver; however to avoid this
# external dependency an implementation of a simple PCG solver with
# a simple diagonal Jacobi preconditioner is implemented.
tol = 1.e-10
n_dof = Ff.shape[0]
Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
shape=(n_dof, n_dof))
Kff_coo.compress_csc()
Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
# If the PCG did not converge, we return the best guess between Uf0
# and Uf.
err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
if err0 < err:
# Maybe a good occasion to raise a warning here ?
_api.warn_external("In TriCubicInterpolator initialization, "
"PCG sparse solver did not converge after "
"1000 iterations. `geom` approximation is "
"used instead of `min_E`")
Uf = Uf0
# Building dz from Uf
dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
dz[:, 0] = Uf[::2]
dz[:, 1] = Uf[1::2]
return dz
# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
# a PCG sparse solver for (symmetric) elliptic problems.
| _DOF_estimator_min_E |
python | tensorflow__tensorflow | tensorflow/python/keras/keras_parameterized.py | {
"start": 1226,
"end": 17664
} | class ____(test.TestCase, parameterized.TestCase):
def tearDown(self):
keras.backend.clear_session()
super(TestCase, self).tearDown()
def run_with_all_saved_model_formats(
test_or_class=None,
exclude_formats=None):
"""Execute the decorated test with all Keras saved model formats).
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times - once
for each Keras saved model format.
The Keras saved model formats include:
1. HDF5: 'h5'
2. SavedModel: 'tf'
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
Various methods in `testing_utils` to get file path for saved models will
auto-generate a string of the two saved model formats. This allows unittests
to confirm the equivalence between the two Keras saved model formats.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_with_all_saved_model_formats
def test_foo(self):
save_format = testing_utils.get_save_format()
saved_model_dir = '/tmp/saved_model/'
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if __name__ == "__main__":
tf.test.main()
```
This test tries to save the model into the formats of 'hdf5', 'h5', 'keras',
'tensorflow', and 'tf'.
We can also annotate the whole class if we want this to apply to all tests in
the class:
```python
@testing_utils.run_with_all_saved_model_formats
class MyTests(testing_utils.KerasTestCase):
def test_foo(self):
save_format = testing_utils.get_save_format()
saved_model_dir = '/tmp/saved_model/'
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = tf.keras.models.load_model(saved_model_dir)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_formats: A collection of Keras saved model formats to not run.
(May also be a single format not wrapped in a collection).
Defaults to None.
Returns:
Returns a decorator that will run the decorated test method multiple times:
once for each desired Keras saved model format.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
# Exclude h5 save format if H5py isn't available.
if h5py is None:
exclude_formats.append(['h5'])
saved_model_formats = ['h5', 'tf', 'tf_no_traces']
params = [('_%s' % saved_format, saved_format)
for saved_format in saved_model_formats
if saved_format not in nest.flatten(exclude_formats)]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, saved_format, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if saved_format == 'h5':
_test_h5_saved_model_format(f, self, *args, **kwargs)
elif saved_format == 'tf':
_test_tf_saved_model_format(f, self, *args, **kwargs)
elif saved_format == 'tf_no_traces':
_test_tf_saved_model_format_no_traces(f, self, *args, **kwargs)
else:
raise ValueError('Unknown model type: %s' % (saved_format,))
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_h5_saved_model_format(f, test_or_class, *args, **kwargs):
with testing_utils.saved_model_format_scope('h5'):
f(test_or_class, *args, **kwargs)
def _test_tf_saved_model_format(f, test_or_class, *args, **kwargs):
with testing_utils.saved_model_format_scope('tf'):
f(test_or_class, *args, **kwargs)
def _test_tf_saved_model_format_no_traces(f, test_or_class, *args, **kwargs):
with testing_utils.saved_model_format_scope('tf', save_traces=False):
f(test_or_class, *args, **kwargs)
def run_with_all_weight_formats(test_or_class=None, exclude_formats=None):
"""Runs all tests with the supported formats for saving weights."""
exclude_formats = exclude_formats or []
exclude_formats.append('tf_no_traces') # Only applies to saving models
return run_with_all_saved_model_formats(test_or_class, exclude_formats)
# TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass
# it. Or perhaps make 'subclass' always use a custom build method.
def run_with_all_model_types(
test_or_class=None,
exclude_models=None):
"""Execute the decorated test with all Keras model types.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times - once
for each Keras model type.
The Keras model types are: ['functional', 'subclass', 'sequential']
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active Keras model type. This allows unittests to confirm
the equivalence between different Keras models.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_with_all_model_types(
exclude_models = ['sequential'])
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test tries building a small mlp as both a functional model and as a
subclass model.
We can also annotate the whole class if we want this to apply to all tests in
the class:
```python
@testing_utils.run_with_all_model_types(exclude_models = ['sequential'])
class MyTests(testing_utils.KerasTestCase):
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_models: A collection of Keras model types to not run.
(May also be a single model type not wrapped in a collection).
Defaults to None.
Returns:
Returns a decorator that will run the decorated test method multiple times:
once for each desired Keras model type.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
model_types = ['functional', 'subclass', 'sequential']
params = [('_%s' % model, model) for model in model_types
if model not in nest.flatten(exclude_models)]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, model_type, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if model_type == 'functional':
_test_functional_model_type(f, self, *args, **kwargs)
elif model_type == 'subclass':
_test_subclass_model_type(f, self, *args, **kwargs)
elif model_type == 'sequential':
_test_sequential_model_type(f, self, *args, **kwargs)
else:
raise ValueError('Unknown model type: %s' % (model_type,))
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_functional_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('functional'):
f(test_or_class, *args, **kwargs)
def _test_subclass_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('subclass'):
f(test_or_class, *args, **kwargs)
def _test_sequential_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('sequential'):
f(test_or_class, *args, **kwargs)
def run_all_keras_modes(test_or_class=None,
config=None,
always_skip_v1=False,
always_skip_eager=False,
**kwargs):
"""Execute the decorated test with all keras execution modes.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times -
once executing in legacy graph mode, once running eagerly and with
`should_run_eagerly` returning True, and once running eagerly with
`should_run_eagerly` returning False.
If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and
the test will only run twice.
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_all_keras_modes
def test_foo(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test will try compiling & fitting the small functional mlp using all
three Keras execution modes.
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
always_skip_v1: If True, does not try running the legacy graph mode even
when Tensorflow v2 behavior is not enabled.
always_skip_eager: If True, does not execute the decorated test
with eager execution modes.
**kwargs: Additional kwargs for configuring tests for
in-progress Keras behaviors/ refactorings that we haven't fully
rolled out yet
Returns:
Returns a decorator that will run the decorated test method multiple times.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
if kwargs:
raise ValueError('Unrecognized keyword args: {}'.format(kwargs))
params = [('_v2_function', 'v2_function')]
if not always_skip_eager:
params.append(('_v2_eager', 'v2_eager'))
if not (always_skip_v1 or tf2.enabled()):
params.append(('_v1_session', 'v1_session'))
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
"""A run of a single test case w/ specified run mode."""
if run_mode == 'v1_session':
_v1_session_test(f, self, config, *args, **kwargs)
elif run_mode == 'v2_eager':
_v2_eager_test(f, self, *args, **kwargs)
elif run_mode == 'v2_function':
_v2_function_test(f, self, *args, **kwargs)
else:
return ValueError('Unknown run mode %s' % run_mode)
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
with ops.get_default_graph().as_default():
with testing_utils.run_eagerly_scope(False):
with test_or_class.test_session(config=config):
f(test_or_class, *args, **kwargs)
def _v2_eager_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(True):
f(test_or_class, *args, **kwargs)
def _v2_function_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(False):
f(test_or_class, *args, **kwargs)
def _test_or_class_decorator(test_or_class, single_method_decorator):
"""Decorate a test or class with a decorator intended for one method.
If the test_or_class is a class:
This will apply the decorator to all test methods in the class.
If the test_or_class is an iterable of already-parameterized test cases:
This will apply the decorator to all the cases, and then flatten the
resulting cross-product of test cases. This allows stacking the Keras
parameterized decorators w/ each other, and to apply them to test methods
that have already been marked with an absl parameterized decorator.
Otherwise, treat the obj as a single method and apply the decorator directly.
Args:
test_or_class: A test method (that may have already been decorated with a
parameterized decorator, or a test class that extends
keras_parameterized.TestCase
single_method_decorator:
A parameterized decorator intended for a single test method.
Returns:
The decorated result.
"""
def _decorate_test_or_class(obj):
if isinstance(obj, collections.abc.Iterable):
return itertools.chain.from_iterable(
single_method_decorator(method) for method in obj)
if isinstance(obj, type):
cls = obj
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix):
setattr(cls, name, single_method_decorator(value))
cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__,
cls.__dict__.copy())
return cls
return single_method_decorator(obj)
if test_or_class is not None:
return _decorate_test_or_class(test_or_class)
return _decorate_test_or_class
| TestCase |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/implementation/utils.py | {
"start": 21752,
"end": 23192
} | class ____(
NamedTuple(
"_ExecutionParams",
[
("selector", JobSubsetSelector),
("run_config", Mapping[str, object]),
("mode", Optional[str]),
("execution_metadata", "ExecutionMetadata"),
("step_keys", Optional[Sequence[str]]),
],
)
):
def __new__(
cls,
selector: JobSubsetSelector,
run_config: Optional[Mapping[str, object]],
mode: Optional[str],
execution_metadata: "ExecutionMetadata",
step_keys: Optional[Sequence[str]],
):
check.opt_list_param(step_keys, "step_keys", of_type=str)
return super().__new__(
cls,
selector=check.inst_param(selector, "selector", JobSubsetSelector),
run_config=check.opt_mapping_param(run_config, "run_config", key_type=str),
mode=check.opt_str_param(mode, "mode"),
execution_metadata=check.inst_param(
execution_metadata, "execution_metadata", ExecutionMetadata
),
step_keys=step_keys,
)
def to_graphql_input(self) -> Mapping[str, Any]:
return {
"selector": self.selector.to_graphql_input(),
"runConfigData": self.run_config,
"mode": self.mode,
"executionMetadata": self.execution_metadata.to_graphql_input(),
"stepKeys": self.step_keys,
}
| ExecutionParams |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 6217,
"end": 6314
} | class ____(InitializationTestMixin):
item_class = NameDataClass
| TestInitializationFromDataClass |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 12445,
"end": 12586
} | class ____(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 4
| TestIDCTIVFloat |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_meta.py | {
"start": 2446,
"end": 2777
} | class ____(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.lin1 = MyLinear(2, 2, bias=False, device=device)
self.lin2 = MyLinear(2, 2, bias=False, device=device)
self.buf_mod = MyBuffer(device)
def forward(self, x):
return self.lin2(self.lin1(x))
| MyModel |
python | Textualize__textual | docs/examples/styles/display.py | {
"start": 65,
"end": 325
} | class ____(App):
CSS_PATH = "display.tcss"
def compose(self):
yield Static("Widget 1")
yield Static("Widget 2", classes="remove")
yield Static("Widget 3")
if __name__ == "__main__":
app = DisplayApp()
app.run()
| DisplayApp |
python | kamyu104__LeetCode-Solutions | Python/design-a-todo-list.py | {
"start": 388,
"end": 2231
} | class ____(object):
def __init__(self):
self.__tasks = []
self.__user_task_ids = collections.defaultdict(SortedList)
def addTask(self, userId, taskDescription, dueDate, tags):
"""
:type userId: int
:type taskDescription: str
:type dueDate: int
:type tags: List[str]
:rtype: int
"""
self.__tasks.append([dueDate, taskDescription, set(tags)])
self.__user_task_ids[userId].add((dueDate, len(self.__tasks)))
return len(self.__tasks)
def getAllTasks(self, userId):
"""
:type userId: int
:rtype: List[str]
"""
if userId not in self.__user_task_ids:
return []
return [self.__tasks[i-1][1] for _, i in self.__user_task_ids[userId]]
def getTasksForTag(self, userId, tag):
"""
:type userId: int
:type tag: str
:rtype: List[str]
"""
if userId not in self.__user_task_ids:
return []
return [self.__tasks[i-1][1] for _, i in self.__user_task_ids[userId] if tag in self.__tasks[i-1][-1]]
def completeTask(self, userId, taskId):
"""
:type userId: int
:type taskId: int
:rtype: None
"""
if not (taskId-1 < len(self.__tasks) and userId in self.__user_task_ids):
return
self.__user_task_ids[userId].discard((self.__tasks[taskId-1][0], taskId))
# Time: ctor: O(1)
# addTask: O(l + t * logn), n is the number of user's tasks, l is the max length of a task, t is the number of tags
# getAllTasks: O(r), r is the length of result
# getTasksForTag: O(r), r is the length of result
# completeTask: O(l + t * logn)
# Space: O(n * (l + t))
from sortedcontainers import SortedList
# sortedlist
| TodoList |
python | astropy__astropy | astropy/wcs/wcs.py | {
"start": 9058,
"end": 141876
} | class ____(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: https://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : `~astropy.io.fits.Header`, `~astropy.io.fits.hdu.image.PrimaryHDU`, `~astropy.io.fits.hdu.image.ImageHDU`, str, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : `~astropy.io.fits.HDUList`, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of str, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
preserve_units : bool, optional
By default, some units are converted to SI, for example spectral
axes in units of nm might be converted to m, and celestial axes
in units of arcsec might be converted to deg. If ``preserve_units``
is set to `True`, the original units will be preserved.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(
self,
header=None,
fobj=None,
key=" ",
minerr=0.0,
relax=True,
naxis=None,
keysel=None,
colsel=None,
fix=True,
translate_units="",
_do_set=True,
preserve_units=False,
):
close_fds = []
self._preserve_units = preserve_units
# these parameters are stored to be used when unpickling a WCS object:
self._init_kwargs = {
"keysel": copy.copy(keysel),
"colsel": copy.copy(colsel),
}
if header is None:
if naxis is None:
naxis = 2
wcsprm = Wcsprm(
header=None,
key=key,
relax=relax,
naxis=naxis,
preserve_units=preserve_units,
)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = os.path.exists(header)
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2"
)
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object"
)
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
if not (fobj is None or isinstance(fobj, fits.HDUList)):
raise AssertionError(
"'fobj' must be either None or an astropy.io.fits.HDUList object."
)
est_naxis = 2
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode("ascii")
tmp_wcsprm = Wcsprm(
header=tmp_header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
warnings=False,
hdulist=fobj,
preserve_units=preserve_units,
)
if naxis is not None:
try:
tmp_wcsprm = tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis if tmp_wcsprm.naxis else 2
except NoWcsKeywordsFoundError:
pass
self.naxis = est_naxis
header = fits.Header.fromstring(header_string)
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(header, fobj, dist="CPDIS", err=minerr)
self._fix_pre2012_scamp_tpv(header)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace("END" + " " * 77, "")
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
header_string = header_string.decode("ascii")
try:
wcsprm = Wcsprm(
header=header_bytes,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
preserve_units=preserve_units,
)
except NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = Wcsprm(
header=None,
key=key,
relax=relax,
keysel=keysel_flags,
colsel=colsel,
hdulist=fobj,
preserve_units=preserve_units,
)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if wcsprm.naxis != 2 and (
det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip
):
raise ValueError(
f"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {wcsprm.naxis} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
"""
)
header_naxis = header.get("NAXIS", None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
f"The WCS transformation has more axes ({wcsprm.naxis:d}) than the "
f"image it is associated with ({header_naxis:d})",
FITSFixedWarning,
)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
if header is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
self.fix(translate_units=translate_units)
else:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._get_naxis(header)
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(
new_copy,
self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2),
)
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(
new_copy,
deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo), deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo), deepcopy(self.det2im2, memo)),
)
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
# We need to know which axes have been dropped, but there is no easy
# way to do this with the .sub function, so instead we assign UUIDs to
# the CNAME parameters in copy.wcs. We can later access the original
# CNAME properties from self.wcs.
cname_uuid = [str(uuid.uuid4()) for i in range(copy.wcs.naxis)]
copy.wcs.cname = cname_uuid
# Subset the WCS
copy.wcs = copy.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
# Construct a list of dimensions from the original WCS in the order
# in which they appear in the final WCS.
keep = [
cname_uuid.index(cname) if cname in cname_uuid else None
for cname in copy.wcs.cname
]
# Restore the original CNAMEs
copy.wcs.cname = ["" if i is None else self.wcs.cname[i] for i in keep]
# Subset pixel_shape and pixel_bounds
if self.pixel_shape:
copy.pixel_shape = tuple(
None if i is None else self.pixel_shape[i] for i in keep
)
if self.pixel_bounds:
copy.pixel_bounds = [
None if i is None else self.pixel_bounds[i] for i in keep
]
return copy
sub.__doc__ = Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
SCAMP uses TAN projection exclusively. The case of CTYPE ending
in -TAN should have been handled by ``_fix_pre2012_scamp_tpv()`` before
calling this function.
"""
if self.wcs is None:
return
# Delete SIP if CTYPE explicitly has '-TPV' code:
ctype = [ct.strip().upper() for ct in self.wcs.ctype]
if sum(ct.endswith("-TPV") for ct in ctype) == 2:
if self.sip is not None:
self.sip = None
warnings.warn(
"Removed redundant SIP distortion parameters "
"because CTYPE explicitly specifies TPV distortions",
FITSFixedWarning,
)
return
# Nothing to be done if no PV parameters attached since SCAMP
# encodes distortion coefficients using PV keywords
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Loop over distinct values of `i' index
has_scamp = False
for i in {v[0] for v in pv}:
# Get all values of `j' index for this value of `i' index
js = tuple(v[1] for v in pv if v[0] == i)
if "-TAN" in self.wcs.ctype[i - 1].upper() and js and max(js) >= 5:
# TAN projection *may* use PVi_j with j up to 4 - see
# Sections 2.5, 2.6, and Table 13
# in https://doi.org/10.1051/0004-6361:20021327
has_scamp = True
break
if has_scamp and all(ct.endswith("-SIP") for ct in ctype):
# Prefer SIP - see recommendations in Section 7 in
# http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf
self.wcs.set_pv([])
warnings.warn(
"Removed redundant SCAMP distortion parameters "
"because SIP parameters are also present",
FITSFixedWarning,
)
return
def fix(self, translate_units="", naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array, optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
if (
key == "datfix"
and "1858-11-17" in val
and not np.count_nonzero(self.wcs.mjdref)
):
continue
warnings.warn(
f"'{key}' made the change '{val}'.",
FITSFixedWarning,
)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : (int, int), optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn(
"Need a valid header in order to calculate footprint\n",
AstropyUserWarning,
)
return None
else:
naxis1 = header.get("NAXIS1", None)
naxis2 = header.get("NAXIS2", None)
if naxis1 is None or naxis2 is None:
raise ValueError("Image size could not be determined.")
if center:
corners = np.array(
[[1, 1], [1, naxis2], [naxis1, naxis2], [naxis1, 1]], dtype=np.float64
)
else:
corners = np.array(
[
[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5],
],
dtype=np.float64,
)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header["AXISCORR"]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = "D2IMDIS"
d_kw = "D2IM"
err_kw = "D2IMERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == "lookup":
del header[distortion]
assert isinstance(fobj, fits.HDUList), (
"An astropy.io.fits.HDUList"
"is required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["D2IMARR", d_extver].data
else:
d_data = (fobj["D2IMARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["D2IMARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn(
"The use of ``AXISCORR`` for D2IM correction has been"
" deprecated.`~astropy.wcs` will read in files with ``AXISCORR`` but"
" ``to_fits()`` will write out files without it.",
AstropyDeprecationWarning,
)
cpdis = [None, None]
crpix = [0.0, 0.0]
crval = [0.0, 0.0]
cdelt = [1.0, 1.0]
try:
d2im_data = fobj[("D2IMARR", 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[("D2IMARR", 1)].header
naxis = d2im_hdr["NAXIS"]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get("CRPIX" + str(i), 0.0)
crval[i - 1] = d2im_hdr.get("CRVAL" + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get("CDELT" + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`~astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = "D2IMDIS"
d_kw = "D2IM"
def write_d2i(num, det2im):
if det2im is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Detector to image correction type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(det2im.data.shape),
"Number of independent variables in D2IM function",
)
for i in range(det2im.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a D2IM function",
)
image = fits.ImageHDU(det2im.data, name="D2IMARR")
header = image.header
header["CRPIX1"] = (det2im.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (det2im.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
det2im.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
det2im.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (det2im.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (det2im.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist="CPDIS", err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == "CPDIS":
d_kw = "DP"
err_kw = "CPERR"
else:
d_kw = "DQ"
err_kw = "CQERR"
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == "lookup":
if not isinstance(fobj, fits.HDUList):
raise ValueError(
"an astropy.io.fits.HDUList is "
"required for Lookup table distortion."
)
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + ".EXTVER"
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + f".AXIS.{i:d}"
if i == header[dp_axis_key]:
d_data = fobj["WCSDVARR", d_extver].data
else:
d_data = (fobj["WCSDVARR", d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj["WCSDVARR", d_extver].header
d_crpix = (d_header.get("CRPIX1", 0.0), d_header.get("CRPIX2", 0.0))
d_crval = (d_header.get("CRVAL1", 0.0), d_header.get("CRVAL2", 0.0))
d_cdelt = (d_header.get("CDELT1", 1.0), d_header.get("CDELT2", 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + "."):
del header[key]
else:
warnings.warn(
"Polynomial distortion is not implemented.\n",
AstropyUserWarning,
)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist="CPDIS"):
"""
Write out `distortion paper`_ keywords to the given
`~astropy.io.fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == "CPDIS":
d_kw = "DP"
else:
d_kw = "DQ"
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[f"{dist}{num:d}"] = (
"LOOKUP",
"Prior distortion function type",
)
hdulist[0].header[f"{d_kw}{num:d}.EXTVER"] = (
num,
"Version number of WCSDVARR extension",
)
hdulist[0].header[f"{d_kw}{num:d}.NAXES"] = (
len(cpdis.data.shape),
f"Number of independent variables in {dist} function",
)
for i in range(cpdis.data.ndim):
jth = {1: "1st", 2: "2nd", 3: "3rd"}.get(i + 1, f"{i + 1}th")
hdulist[0].header[f"{d_kw}{num:d}.AXIS.{i + 1:d}"] = (
i + 1,
f"Axis number of the {jth} variable in a {dist} function",
)
image = fits.ImageHDU(cpdis.data, name="WCSDVARR")
header = image.header
header["CRPIX1"] = (cpdis.crpix[0], "Coordinate system reference pixel")
header["CRPIX2"] = (cpdis.crpix[1], "Coordinate system reference pixel")
header["CRVAL1"] = (
cpdis.crval[0],
"Coordinate system value at reference pixel",
)
header["CRVAL2"] = (
cpdis.crval[1],
"Coordinate system value at reference pixel",
)
header["CDELT1"] = (cpdis.cdelt[0], "Coordinate increment along axis")
header["CDELT2"] = (cpdis.cdelt[1], "Coordinate increment along axis")
image.ver = int(hdulist[0].header[f"{d_kw}{num:d}.EXTVER"])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _fix_pre2012_scamp_tpv(self, header, wcskey=""):
"""
Replace -TAN with TPV (for pre-2012 SCAMP headers that use -TAN
in CTYPE). Ignore SIP if present. This follows recommendations in
Section 7 in
http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf.
This is to deal with pre-2012 headers that may contain TPV with a
CTYPE that ends in '-TAN' (post-2012 they should end in '-TPV' when
SCAMP has adopted the new TPV convention).
"""
if isinstance(header, (str, bytes)):
return
wcskey = wcskey.strip().upper()
cntype = [
(nax, header.get(f"CTYPE{nax}{wcskey}", "").strip())
for nax in range(1, self.naxis + 1)
]
tan_axes = [ct[0] for ct in cntype if ct[1].endswith("-TAN")]
if len(tan_axes) == 2:
# check if PVi_j with j >= 5 is present and if so, do not load SIP
tan_to_tpv = False
for nax in tan_axes:
js = []
for p in header[f"PV{nax}_*{wcskey}"].keys():
prefix = f"PV{nax}_"
if p.startswith(prefix):
p = p[len(prefix) :]
p = p.rstrip(wcskey)
try:
p = int(p)
except ValueError:
continue
js.append(p)
if js and max(js) >= 5:
tan_to_tpv = True
break
if tan_to_tpv:
warnings.warn(
"Removed redundant SIP distortion parameters "
"because SCAMP' PV distortions are also present",
FITSFixedWarning,
)
self._remove_sip_kw(header, del_order=True)
for i in tan_axes:
kwd = f"CTYPE{i:d}{wcskey}"
if kwd in header:
header[kwd] = (
header[kwd].strip().upper().replace("-TAN", "-TPV")
)
@staticmethod
def _remove_sip_kw(header, del_order=False):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in {
m.group() for m in map(SIP_KW.match, list(header)) if m is not None
}:
del header[key]
if del_order:
for kwd in ["A_ORDER", "B_ORDER", "AP_ORDER", "BP_ORDER"]:
if kwd in header:
del header[kwd]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if "A_ORDER" in header and header["A_ORDER"] > 1:
if "B_ORDER" not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion"
)
m = int(header["A_ORDER"])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"A_{i}_{j}"
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header["B_ORDER"])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"B_{i}_{j}"
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header["A_ORDER"]
del header["B_ORDER"]
ctype = [header[f"CTYPE{nax}{wcskey}"] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith("-SIP") for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif "B_ORDER" in header and header["B_ORDER"] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER "
"keyword for SIP distortion"
)
else:
a = None
b = None
if "AP_ORDER" in header and header["AP_ORDER"] > 1:
if "BP_ORDER" not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion"
)
m = int(header["AP_ORDER"])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"AP_{i}_{j}"
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header["BP_ORDER"])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = f"BP_{i}_{j}"
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header["AP_ORDER"]
del header["BP_ORDER"]
elif "BP_ORDER" in header and header["BP_ORDER"] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion"
)
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if f"CRPIX1{wcskey}" not in header or f"CRPIX2{wcskey}" not in header:
raise ValueError("Header has SIP keywords without CRPIX keywords")
crpix1 = header.get(f"CRPIX1{wcskey}")
crpix2 = header.get(f"CRPIX2{wcskey}")
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
trdir = "sky to detector" if name[-1] == "P" else "detector to sky"
comment = (
f"SIP polynomial order, axis {ord(name[0]) - ord('A'):d}, {trdir:s}"
)
keywords[f"{name}_ORDER"] = size - 1, comment
comment = "SIP distortion coefficient"
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[f"{name}_{i:d}_{j:d}"] = a[i, j], comment
write_array("A", self.sip.a)
write_array("B", self.sip.b)
write_array("AP", self.sip.ap)
write_array("BP", self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be used as input"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be used as input"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be "
"used as input"
)
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != "RA":
raise ValueError(
"WCS does not have longitude type of 'RA', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.lattyp != "DEC":
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore "
"(ra, dec) data can not be returned"
)
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned"
)
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any(x.size == 0 for x in axes):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other"
)
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == "output":
output = self._normalize_sky(output)
return (
output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape),
)
return [output[:, i].reshape(axes[0].shape) for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
f"of shape (N, {self.naxis})"
)
if 0 in xy.shape:
return xy
if ra_dec_order and sky == "input":
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == "output":
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
f"(coords[N][{self.naxis}], origin)"
)
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be "
"a 1-D array for each axis, followed by an origin."
)
return _return_list_of_arrays(axes, origin)
raise TypeError(
f"WCS projection has {self.naxis} dimensions, so expected 2 (an Nx{self.naxis} array "
f"and the origin argument) or {self.naxis + 1} arguments (the position in each "
f"dimension, and the origin argument). Instead, {len(args)} arguments were "
"given."
)
def all_pix2world(self, *args, **kwargs):
return self._array_converter(self._all_pix2world, "output", *args, **kwargs)
all_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("naxis", 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS("sky coordinates, in degrees", 8)}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)["world"], "output", *args, **kwargs
)
wcs_pix2world.__doc__ = f"""
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("naxis", 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS("world coordinates, in degrees", 8)}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
"""
def _all_world2pix(
self, world, origin, tolerance, maxiter, adaptive, detect_divergence, quiet
):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()["invalid"]
old_over = np.geterr()["over"]
np.seterr(invalid="ignore", over="ignore")
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while np.nanmax(dn) >= tol2 and k < maxiter:
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix * dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = dn >= dnprev
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = dn >= tol2
(inddiv,) = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = dn < dnprev
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
(ind,) = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
(ind,) = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while ind.shape[0] > 0 and k < maxiter:
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookkeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = dnnew < dnprev[ind]
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
(subind,) = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
(subind,) = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = (~np.all(np.isfinite(pix), axis=1)) & (
np.all(np.isfinite(world), axis=1)
)
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
(inddiv,) = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
(ind,) = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
f"converge to the requested accuracy after {k:d} "
"iterations.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=None,
)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
f"After {k:d} iterations, the solution is diverging "
"at least for one input point.",
best_solution=pix,
accuracy=np.abs(dpix),
niter=k,
slow_conv=ind,
divergent=inddiv,
)
return pix
def all_world2pix(
self,
*args,
tolerance=1e-4,
maxiter=20,
adaptive=False,
detect_divergence=True,
quiet=False,
**kwargs,
):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs: self._all_world2pix(
*args,
tolerance=tolerance,
maxiter=maxiter,
adaptive=adaptive,
detect_divergence=detect_divergence,
quiet=quiet,
),
"input",
*args,
**kwargs,
)
all_world2pix.__doc__ = f"""
all_world2pix(*arg, tolerance=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("naxis", 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
tolerance : float, optional (default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{docstrings.RETURNS("pixel coordinates", 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
"""
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)["pixcrd"], "input", *args, **kwargs
)
wcs_world2pix.__doc__ = f"""
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("naxis", 8)}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{docstrings.RA_DEC_ORDER(8)}
Returns
-------
{docstrings.RETURNS("pixel coordinates", 8)}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
"""
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("2", 8)}
Returns
-------
{docstrings.RETURNS("focal coordinates", 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("2", 8)}
Returns
-------
{docstrings.RETURNS("focal coordinates", 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = f"""
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("2", 8)}
Returns
-------
{docstrings.RETURNS("pixel coordinates", 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = f"""
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("2", 8)}
Returns
-------
{docstrings.RETURNS("focal coordinates", 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = f"""
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{docstrings.TWO_OR_MORE_ARGS("2", 8)}
Returns
-------
{docstrings.RETURNS("pixel coordinates", 8)}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
"""
def proj_plane_pixel_scales(self):
"""
Calculate pixel scales along each axis of the image pixel at
the ``CRPIX`` location once it is projected onto the
"plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This method is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
scale : list of `~astropy.units.Quantity`
A vector of projection plane increments corresponding to each
pixel side (axis).
See Also
--------
astropy.wcs.utils.proj_plane_pixel_scales
"""
from astropy.wcs.utils import proj_plane_pixel_scales # Avoid circular import
values = proj_plane_pixel_scales(self)
units = [u.Unit(x) for x in self.wcs.cunit]
return [
value * unit for (value, unit) in zip(values, units)
] # Can have different units
def proj_plane_pixel_area(self):
"""
For a **celestial** WCS (see `astropy.wcs.WCS.celestial`), returns pixel
area of the image pixel at the ``CRPIX`` location once it is projected
onto the "plane of intermediate world coordinates" as defined in
`Greisen & Calabretta 2002, A&A, 395, 1061 <https://ui.adsabs.harvard.edu/abs/2002A%26A...395.1061G>`_.
.. note::
This function is concerned **only** about the transformation
"image plane"->"projection plane" and **not** about the
transformation "celestial sphere"->"projection plane"->"image plane".
Therefore, this function ignores distortions arising due to
non-linear nature of most projections.
.. note::
This method only returns sensible answers if the WCS contains
celestial axes, i.e., the `~astropy.wcs.WCS.celestial` WCS object.
Returns
-------
area : `~astropy.units.Quantity`
Area (in the projection plane) of the pixel at ``CRPIX`` location.
Raises
------
ValueError
Pixel area is defined only for 2D pixels. Most likely the
`~astropy.wcs.Wcsprm.cd` matrix of the `~astropy.wcs.WCS.celestial`
WCS is not a square matrix of second order.
Notes
-----
Depending on the application, square root of the pixel area can be used to
represent a single pixel scale of an equivalent square pixel
whose area is equal to the area of a generally non-square pixel.
See Also
--------
astropy.wcs.utils.proj_plane_pixel_area
"""
from astropy.wcs.utils import proj_plane_pixel_area # Avoid circular import
value = proj_plane_pixel_area(self)
unit = u.Unit(self.wcs.cunit[0]) * u.Unit(self.wcs.cunit[1]) # 2D only
return value * unit
def to_fits(self, relax=False, key=None):
"""
Generate an `~astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`astropy:relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14 # Defined by C-ext
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe # Defined by C-ext
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
# Check if we can handle TPD distortion correctly
if _WCS_TPD_WARN_LT71:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}. WCSLIB"
f" {WCSLIB_VERSION} is writing this in a format"
" incompatible with current versions - please update to"
" 7.4 or use the bundled WCSLIB.",
AstropyWarning,
)
elif _WCS_TPD_WARN_LT74:
for kw, val in header.items():
if kw[:5] in ("CPDIS", "CQDIS") and val == "TPD":
warnings.warn(
f"WCS contains a TPD distortion model in {kw}, which"
" requires WCSLIB 7.4 or later to store in a FITS header"
f" (having {WCSLIB_VERSION}).",
AstropyWarning,
)
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(
not ctyp.endswith("-SIP") for ctyp in self.wcs.ctype
):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if (
not do_sip
and self.wcs is not None
and any(self.wcs.ctype)
and self.sip is not None
):
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = [k for k in full_header if k not in header]
if missing_keys:
warnings.warn(
"Some non-standard WCS keywords were excluded:"
f" {', '.join(missing_keys)} Use the ``relax`` kwarg to control"
" this.",
AstropyWarning,
)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis + 1):
# strip() must be called here to cover the case of alt key= " "
kw = f"CTYPE{i}{self.wcs.alt}".strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(
self, filename="footprint.reg", color="green", width=2, coordsys=None
):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = (
"# Region file format: DS9 version 4.0 \n"
'# global color=green font="helvetica 12 bold '
"select=1 highlite=1 edit=1 move=1 delete=1 "
"include=1 fixed=0 source\n"
)
coordsys = coordsys or self.wcs.radesys
if coordsys not in (
"PHYSICAL",
"IMAGE",
"FK4",
"B1950",
"FK5",
"J2000",
"GALACTIC",
"ECLIPTIC",
"ICRS",
"LINEAR",
"AMPLIFIER",
"DETECTOR",
):
raise ValueError(
f"Coordinate system '{coordsys}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
)
with open(filename, mode="w") as f:
f.write(comments)
f.write(f"{coordsys}\n")
f.write("polygon(")
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=",")
f.write(f") # color={color}, width={width:d} \n")
def _get_naxis(self, header=None):
_naxis = []
if header is not None and not isinstance(header, (str, bytes)):
for naxis in itertools.count(1):
try:
_naxis.append(header[f"NAXIS{naxis}"])
except KeyError:
break
if len(_naxis) == 0:
_naxis = self.naxis * [0]
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
"""
Return a short description. Simply porting the behavior from
the `printwcs()` method.
"""
description = ["WCS Keywords", "", f"Number of WCS axes: {self.naxis!r}"]
sfmt = " : " + "".join([f"{{{i}}} " for i in range(self.naxis)])
keywords = ["CTYPE", "CUNIT", "CRVAL", "CRPIX"]
values = [
[repr(v) for v in self.wcs.ctype],
[repr(str(v)) for v in self.wcs.cunit],
self.wcs.crval,
self.wcs.crpix,
]
for keyword, value in zip(keywords, values):
description.append(keyword + sfmt.format(*value))
if hasattr(self.wcs, "pc"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["PC", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = "CDELT" + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, "cd"):
for i in range(self.naxis):
s = ""
for j in range(self.naxis):
s += "".join(["CD", str(i + 1), "_", str(j + 1), " "])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append(f"NAXIS : {' '.join(map(str, self._naxis))}")
# Strip trailing space in lines
description = [line.rstrip() for line in description]
return "\n".join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dict
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError("This WCS object does not have a wcsprm object.")
coordinate_type_map = {0: None, 1: "stokes", 2: "celestial", 3: "spectral"}
scale_map = {
0: "linear",
1: "quantized",
2: "non-linear celestial",
3: "non-linear spectral",
4: "logarithmic",
5: "tabular",
}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult["coordinate_type"] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult["scale"] = scale_map[scale]
group = (axis_type // 10) % 10
subresult["group"] = group
number = axis_type % 10
subresult["number"] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
dct = self.__dict__.copy()
dct["_alt_wcskey"] = self.wcs.alt
return (
__WCS_unpickle__,
(
self.__class__,
dct,
buffer.getvalue(),
),
)
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i + 1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
`~astropy.wcs.WCS`
A new `~astropy.wcs.WCS` instance with the same number of axes,
but two swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i + 1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub(
[WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES, WCSSUB_TIME]
) # Defined by C-ext
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool, default: True
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if view is Ellipsis:
return self.deepcopy()
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if len(view) < self.wcs.naxis:
view = list(view) + [slice(None) for i in range(self.wcs.naxis - len(view))]
if not numpy_order:
view = view[::-1]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
# Group the distortion tables by which axis (x or y) they correspond to
x_tables = [t for t in (wcs_new.cpdis1, wcs_new.det2im1) if t is not None]
y_tables = [t for t in (wcs_new.cpdis2, wcs_new.det2im2) if t is not None]
distortion_tables = [*x_tables, *y_tables]
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
wcs_index = self.wcs.naxis - 1 - i
if wcs_index < 2:
itables = [x_tables, y_tables][wcs_index]
else:
itables = []
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
scale_pixel = lambda px: (
(px - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
crp = scale_pixel(crpix)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
for table in distortion_tables:
# The table's crval (which is an image pixel location)
# should be adjusted to the corresponding location in
# the sliced array
table.crval[wcs_index] = scale_pixel(table.crval[wcs_index])
# And its cdelt (with units image pixels / distortion
# table pixel) should reflect the stride
table.cdelt[wcs_index] /= iview.step
for table in itables:
# If we stride an x axis, for example, x distortions
# should be adjusted in magnitude
table.data /= iview.step
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
for table in distortion_tables:
table.crval[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError(f"'{self.__class__.__name__}' object is not iterable")
@property
def axis_type_names(self):
"""
World names for each coordinate axis.
Returns
-------
list of str
A list of names along each axis.
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split("-")[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included.
"""
return self.sub([WCSSUB_CELESTIAL]) # Defined by C-ext
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def spectral(self):
"""
A copy of the current WCS with only the spectral axes included.
"""
return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext
@property
def is_spectral(self):
return self.has_spectral and self.naxis == 1
@property
def has_spectral(self):
try:
return self.wcs.spec >= 0
except InconsistentAxisTypesError:
return False
@property
def temporal(self):
"""
A copy of the current WCS with only the time axes included.
"""
if not _WCSSUB_TIME_SUPPORT:
raise NotImplementedError(
"Support for 'temporal' axis requires WCSLIB version 7.8 or "
f"greater but linked WCSLIB version is {WCSLIB_VERSION}"
)
return self.sub([WCSSUB_TIME]) # Defined by C-ext
@property
def is_temporal(self):
return self.has_temporal and self.naxis == 1
@property
def has_temporal(self):
return any(t // 1000 == 4 for t in self.wcs.axis_types)
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (
self.sip is not None
or self.cpdis1 is not None
or self.cpdis2 is not None
or (self.det2im1 is not None and self.det2im2 is not None)
)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"cdelt will be ignored since cd is present",
RuntimeWarning,
)
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.dot(cdelt, pc)
return pccd
@property
def preserve_units(self):
"""
Indicates whether the ``WCS`` class is preserving the original units.
If `True`, units are always kept as specified, whereas is `False`,
units will in some cases be converted to SI/degrees - for example units
for celestial axes are converted to degrees, spectral frequencies to
Hz, and wavelengths to meters.
"""
return self._preserve_units
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
naxis = dct.pop("naxis", None)
if naxis:
hdulist[0].header["naxis"] = naxis
naxes = dct.pop("_naxis", [])
for k, na in enumerate(naxes):
hdulist[0].header[f"naxis{k + 1:d}"] = na
kwargs = dct.pop("_init_kwargs", {})
self.__dict__.update(dct)
wcskey = dct.pop("_alt_wcskey", " ")
WCS.__init__(self, hdulist[0].header, hdulist, key=wcskey, **kwargs)
self.pixel_bounds = dct.get("_pixel_bounds", None)
return self
def find_all_wcs(
header, relax=True, keysel=None, fix=True, translate_units="", _do_set=True
):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or `~astropy.io.fits.Header` object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`astropy:relaxread` for details.
keysel : sequence of str, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS`
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError("header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode("ascii")
else:
header_bytes = header_string
wcsprms = find_all_wcs_c(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str or file-like or `~astropy.io.fits.HDUList`
The FITS file to validate.
Returns
-------
results : list subclass instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [f" WCS key '{self._key or ' '}':"]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = " - "
else:
initial_indent = " "
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=" ",
)
)
else:
result.append(" No issues.")
return "\n".join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = f" ({self._hdu_name})"
else:
hdu_name = ""
result = [f"HDU {self._hdu_index}{hdu_name}:"]
for wcs in self:
result.append(repr(wcs))
return "\n".join(result)
return ""
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if content:
result.append(content)
return "\n\n".join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
close_file = False
else:
hdulist = fits.open(source)
close_file = True
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=WCSHDR_reject, fix=False, _do_set=False
)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", FITSFixedWarning, append=True)
try:
WCS(
hdu.header,
hdulist,
key=wcs.wcs.alt or " ",
relax=WCSHDR_reject,
fix=True,
_do_set=False,
)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
if close_file:
hdulist.close()
return results
| WCS |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/transform_reward.py | {
"start": 1817,
"end": 3540
} | class ____(TransformReward[ObsType, ActType], gym.utils.RecordConstructorArgs):
"""Clips the rewards for an environment between an upper and lower bound.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.ClipReward`.
Example:
>>> import gymnasium as gym
>>> from gymnasium.wrappers import ClipReward
>>> env = gym.make("CartPole-v1")
>>> env = ClipReward(env, 0, 0.5)
>>> _ = env.reset()
>>> _, rew, _, _, _ = env.step(1)
>>> rew
np.float64(0.5)
Change logs:
* v1.0.0 - Initially added
"""
def __init__(
self,
env: gym.Env[ObsType, ActType],
min_reward: float | np.ndarray | None = None,
max_reward: float | np.ndarray | None = None,
):
"""Initialize ClipRewards wrapper.
Args:
env (Env): The environment to wrap
min_reward (Union[float, np.ndarray]): lower bound to apply
max_reward (Union[float, np.ndarray]): higher bound to apply
"""
if min_reward is None and max_reward is None:
raise InvalidBound("Both `min_reward` and `max_reward` cannot be None")
elif max_reward is not None and min_reward is not None:
if np.any(max_reward - min_reward < 0):
raise InvalidBound(
f"Min reward ({min_reward}) must be smaller than max reward ({max_reward})"
)
gym.utils.RecordConstructorArgs.__init__(
self, min_reward=min_reward, max_reward=max_reward
)
TransformReward.__init__(
self, env=env, func=lambda x: np.clip(x, a_min=min_reward, a_max=max_reward)
)
| ClipReward |
python | zarr-developers__zarr-python | tests/test_store/test_wrapper.py | {
"start": 759,
"end": 4566
} | class ____(StoreTests[WrapperStore[Any], Buffer]):
store_cls = WrapperStore
buffer_cls = CPUBuffer
async def get(self, store: WrapperStore[LocalStore], key: str) -> Buffer:
return self.buffer_cls.from_bytes((store._store.root / key).read_bytes())
async def set(self, store: WrapperStore[LocalStore], key: str, value: Buffer) -> None:
parent = (store._store.root / key).parent
if not parent.exists():
parent.mkdir(parents=True)
(store._store.root / key).write_bytes(value.to_bytes())
@pytest.fixture
def store_kwargs(self, tmp_path: Path) -> StoreKwargs:
return {"store": LocalStore(str(tmp_path))}
@pytest.fixture
def open_kwargs(self, tmp_path: Path) -> OpenKwargs:
return {"store_cls": LocalStore, "root": str(tmp_path)}
def test_store_supports_writes(self, store: WrapperStore[LocalStore]) -> None:
assert store.supports_writes
def test_store_supports_listing(self, store: WrapperStore[LocalStore]) -> None:
assert store.supports_listing
def test_store_repr(self, store: WrapperStore[LocalStore]) -> None:
assert f"{store!r}" == f"WrapperStore(LocalStore, 'file://{store._store.root.as_posix()}')"
def test_store_str(self, store: WrapperStore[LocalStore]) -> None:
assert str(store) == f"wrapping-file://{store._store.root.as_posix()}"
def test_check_writeable(self, store: WrapperStore[LocalStore]) -> None:
"""
Test _check_writeable() runs without errors.
"""
store._check_writable()
def test_close(self, store: WrapperStore[LocalStore]) -> None:
"Test store can be closed"
store.close()
assert not store._is_open
def test_is_open_setter_raises(self, store: WrapperStore[LocalStore]) -> None:
"""
Test that a user cannot change `_is_open` without opening the underlying store.
"""
with pytest.raises(
NotImplementedError, match="WrapperStore must be opened via the `_open` method"
):
store._is_open = True
# TODO: work out where warning is coming from and fix
@pytest.mark.filterwarnings(
"ignore:coroutine 'ClientCreatorContext.__aexit__' was never awaited:RuntimeWarning"
)
@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=True)
async def test_wrapped_set(store: Store, capsys: pytest.CaptureFixture[str]) -> None:
# define a class that prints when it sets
class NoisySetter(WrapperStore[Store]):
async def set(self, key: str, value: Buffer) -> None:
print(f"setting {key}")
await super().set(key, value)
key = "foo"
value = CPUBuffer.from_bytes(b"bar")
store_wrapped = NoisySetter(store)
await store_wrapped.set(key, value)
captured = capsys.readouterr()
assert f"setting {key}" in captured.out
assert await store_wrapped.get(key, buffer_prototype) == value
@pytest.mark.filterwarnings("ignore:Unclosed client session:ResourceWarning")
@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=True)
async def test_wrapped_get(store: Store, capsys: pytest.CaptureFixture[str]) -> None:
# define a class that prints when it sets
class NoisyGetter(WrapperStore[Any]):
async def get(
self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None
) -> None:
print(f"getting {key}")
await super().get(key, prototype=prototype, byte_range=byte_range)
key = "foo"
value = CPUBuffer.from_bytes(b"bar")
store_wrapped = NoisyGetter(store)
await store_wrapped.set(key, value)
await store_wrapped.get(key, buffer_prototype)
captured = capsys.readouterr()
assert f"getting {key}" in captured.out
| TestWrapperStore |
python | kamyu104__LeetCode-Solutions | Python/magic-squares-in-grid.py | {
"start": 34,
"end": 1158
} | class ____(object):
def numMagicSquaresInside(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def magic(grid, r, c):
expect = k * (k**2+1) // 2
nums = set()
min_num = float("inf")
sum_diag, sum_anti = 0, 0
for i in xrange(k):
sum_diag += grid[r+i][c+i]
sum_anti += grid[r+i][c+k-1-i]
sum_r, sum_c = 0, 0
for j in xrange(k):
min_num = min(min_num, grid[r+i][c+j])
nums.add(grid[r+i][c+j])
sum_r += grid[r+i][c+j]
sum_c += grid[r+j][c+i]
if not (sum_r == sum_c == expect):
return False
return sum_diag == sum_anti == expect and \
len(nums) == k**2 and \
min_num == 1
k = 3
result = 0
for r in xrange(len(grid)-k+1):
for c in xrange(len(grid[r])-k+1):
if magic(grid, r, c):
result += 1
return result
| Solution |
python | ray-project__ray | rllib/policy/tests/test_timesteps.py | {
"start": 199,
"end": 1878
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_timesteps(self):
"""Test whether PG can be built with both frameworks."""
config = (
ppo.PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.experimental(_disable_preprocessor_api=True)
.environment(RandomEnv)
.env_runners(num_env_runners=0)
.training(
model={
"fcnet_hiddens": [1],
"fcnet_activation": None,
}
)
)
obs = np.array(1)
obs_batch = np.array([1])
algo = config.build()
policy = algo.get_policy()
for i in range(1, 21):
algo.compute_single_action(obs)
check(int(policy.global_timestep), i)
for i in range(1, 21):
policy.compute_actions(obs_batch)
check(int(policy.global_timestep), i + 20)
# Artificially set ts to 100Bio, then keep computing actions and
# train.
crazy_timesteps = int(1e11)
policy.on_global_var_update({"timestep": crazy_timesteps})
# Run for 10 more ts.
for i in range(1, 11):
policy.compute_actions(obs_batch)
check(int(policy.global_timestep), i + crazy_timesteps)
algo.train()
algo.stop()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestTimeSteps |
python | huggingface__transformers | src/transformers/models/diffllama/modeling_diffllama.py | {
"start": 3149,
"end": 8619
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: DiffLlamaConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[DiffLlamaConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def lambda_init_fn(layer_idx):
return 0.8 - 0.6 * math.exp(-0.3 * layer_idx)
| DiffLlamaRotaryEmbedding |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_query.py | {
"start": 42674,
"end": 43736
} | class ____(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
def test_tuple_containment(self, connection):
for test, exp in [
([("a", "b")], True),
([("a", "c")], False),
([("f", "q"), ("a", "b")], True),
([("f", "q"), ("a", "c")], False),
]:
eq_(
connection.execute(
select(
tuple_(
literal_column("'a'"), literal_column("'b'")
).in_(
[
tuple_(
*[
literal_column("'%s'" % letter)
for letter in elem
]
)
for elem in test
]
)
)
).scalar(),
exp,
)
| TupleTest |
python | pypa__warehouse | tests/unit/utils/test_paginate.py | {
"start": 1314,
"end": 1592
} | class ____:
def __init__(self, fake):
self.fake = fake
self.range = slice(None)
def __getitem__(self, range):
self.range = range
return self
def execute(self):
return FakeResult(self.fake[self.range], len(self.fake))
| FakeQuery |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 8087,
"end": 9617
} | class ____:
"""Define how the BM25 query's token matching should be performed."""
def __init__(self) -> None:
raise TypeError("BM25Operator cannot be instantiated. Use the static methods to create.")
@staticmethod
def or_(minimum_match: int) -> BM25OperatorOptions:
"""Use the 'Or' operator for keyword queries, where at least a minimum number of tokens must match.
Note that the query is tokenized using the respective tokenization method of each property.
Args:
minimum_match: The minimum number of keyword tokens (excluding stopwords) that must match for an object to be considered a match.
"""
return BM25OperatorOr(minimum_should_match=minimum_match)
@staticmethod
def and_() -> BM25OperatorOptions:
"""Use the 'And' operator for keyword queries, where all query tokens must match.
Note that the query is tokenized using the respective tokenization method of each property.
"""
return BM25OperatorAnd()
OneDimensionalVectorType = Sequence[NUMBER]
"""Represents a one-dimensional vector, e.g. one produced by the `Configure.Vectors.text2vec_jinaai()` module"""
TwoDimensionalVectorType = Sequence[Sequence[NUMBER]]
"""Represents a two-dimensional vector, e.g. one produced by the `Configure.MultiVectors.text2vec_jinaai()` module"""
PrimitiveVectorType = Union[OneDimensionalVectorType, TwoDimensionalVectorType]
V = TypeVar("V", OneDimensionalVectorType, TwoDimensionalVectorType)
| BM25OperatorFactory |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/base.py | {
"start": 24310,
"end": 25958
} | class ____(_MappedAnnotationBase[_T_co]):
"""Represent the ORM mapped attribute type for a "dynamic" relationship.
The :class:`_orm.DynamicMapped` type annotation may be used in an
:ref:`Annotated Declarative Table <orm_declarative_mapped_column>` mapping
to indicate that the ``lazy="dynamic"`` loader strategy should be used
for a particular :func:`_orm.relationship`.
.. legacy:: The "dynamic" lazy loader strategy is the legacy form of what
is now the "write_only" strategy described in the section
:ref:`write_only_relationship`.
E.g.::
class User(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
addresses: DynamicMapped[Address] = relationship(
cascade="all,delete-orphan"
)
See the section :ref:`dynamic_relationship` for background.
.. versionadded:: 2.0
.. seealso::
:ref:`dynamic_relationship` - complete background
:class:`.WriteOnlyMapped` - fully 2.0 style version
"""
__slots__ = ()
if TYPE_CHECKING:
@overload
def __get__(
self, instance: None, owner: Any
) -> InstrumentedAttribute[_T_co]: ...
@overload
def __get__(
self, instance: object, owner: Any
) -> AppenderQuery[_T_co]: ...
def __get__(
self, instance: Optional[object], owner: Any
) -> Union[InstrumentedAttribute[_T_co], AppenderQuery[_T_co]]: ...
def __set__(
self, instance: Any, value: typing.Collection[_T_co]
) -> None: ...
| DynamicMapped |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchaudio_models.py | {
"start": 4844,
"end": 6003
} | class ____(nn.Module):
def __init__(self, seq_module):
"""
Adds padding to the output of the module based on the given lengths. This is to ensure that the
results of the model do not change when batch sizes change during inference.
Input needs to be in the shape of (BxCxDxT)
:param seq_module: The sequential module containing the conv stack.
"""
super().__init__()
self.seq_module = seq_module
def forward(self, x, lengths):
"""
:param x: The input of size BxCxDxT
:param lengths: The actual length of each sequence in the batch
:return: Masked output from the module
"""
for module in self.seq_module:
x = module(x)
mask = torch.BoolTensor(x.size()).fill_(0)
if x.is_cuda:
mask = mask.cuda()
for i, length in enumerate(lengths):
length = length.item()
if (mask[i].size(2) - length) > 0:
mask[i].narrow(2, length, mask[i].size(2) - length).fill_(1)
x = x.masked_fill(mask, 0)
return x, lengths
| MaskConv |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/general_tests/grpc_tests/state_versions/sample_state_backed_component.py | {
"start": 315,
"end": 1207
} | class ____(StateBackedComponent, dg.Model, dg.Resolvable):
def build_defs_from_state(
self, context: dg.ComponentLoadContext, state_path: Optional[Path]
) -> dg.Definitions:
assert state_path is not None
with open(state_path) as f:
state = f.read()
assert state == "hi"
@dg.asset(name=state)
def the_asset(): ...
return dg.Definitions(assets=[the_asset])
@property
def defs_state_config(self) -> DefsStateConfig:
return DefsStateConfig(
key=self.__class__.__name__,
management_type=DefsStateManagementType.VERSIONED_STATE_STORAGE,
refresh_if_dev=True,
)
async def write_state_to_path(self, state_path: Path) -> None:
# for the tests that use this, we're going to manually do this outside of the component
pass
| SampleStateBackedComponent |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramNames1.py | {
"start": 114,
"end": 1598
} | class ____:
# This should generate an error or warning if the setting
# is enabled because __new__ is expected to take cls.
def __new__(blah):
return super().__new__(blah)
# This should generate an error or warning if the setting
# is enabled because it's missing a "self" parameter.
def foo1():
return 3
# This should generate an error or warning if the setting
# is enabled because "self" is misspelled.
def foo2(seeeelf):
return 4
# This should generate an error or warning if the setting
# is enabled because "self" is misspelled.
def foo3(cls):
return 4
@classmethod
def foo4(cls):
return 4
@classmethod
# This should generate an error or warning if the setting
# is enabled because "cls" is expected.
def foo5(self):
return 4
@overload
# This should generate an error or warning if the setting
# is enabled because "self" is expected.
def foo6(x: "Class1") -> int: ...
@overload
# This should generate an error or warning if the setting
# is enabled because "self" is expected.
def foo6(x: int) -> str: ...
# This should generate an error or warning if the setting
# is enabled because "self" is expected.
def foo6(x) -> int | str: ...
@classmethod
# This should generate an error or warning if the setting
# is enabled because this isn't a metaclass.
def foo7(mcls):
return 4
| Class1 |
python | numba__numba | numba/tests/test_np_randomgen.py | {
"start": 53649,
"end": 54301
} | class ____(TestCase, SerialMixin):
def test_randomgen_caching(self):
nb_rng = np.random.default_rng(1)
np_rng = np.random.default_rng(1)
numba_func = numba.njit(lambda x: x.random(10), cache=True)
self.assertPreciseEqual(np_rng.random(10), numba_func(nb_rng))
# Run the function twice to make sure caching doesn't break anything.
self.assertPreciseEqual(np_rng.random(10), numba_func(nb_rng))
# Check that the function can be retrieved successfully from the cache.
res = run_in_new_process_caching(test_generator_caching)
self.assertEqual(res['exitcode'], 0)
| TestGeneratorCaching |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 14968,
"end": 15114
} | class ____(InventoryConfig, NetworkConfig):
"""Configuration for network hosts using inventory."""
@dataclasses.dataclass
| NetworkInventoryConfig |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 44085,
"end": 45960
} | class ____(TestCase):
def setUp(self):
self.patterns = [
path('excluded-cbv/', ExcludedAPIView.as_view()),
path('excluded-fbv/', excluded_fbv),
path('included-fbv/', included_fbv),
]
def test_schema_generator_excludes_correctly(self):
"""Schema should not include excluded views"""
generator = SchemaGenerator(title='Exclusions', patterns=self.patterns)
schema = generator.get_schema()
expected = coreapi.Document(
url='',
title='Exclusions',
content={
'included-fbv': {
'list': coreapi.Link(url='/included-fbv/', action='get')
}
}
)
assert len(schema.data) == 1
assert 'included-fbv' in schema.data
assert schema == expected
def test_endpoint_enumerator_excludes_correctly(self):
"""It is responsibility of EndpointEnumerator to exclude views"""
inspector = EndpointEnumerator(self.patterns)
endpoints = inspector.get_api_endpoints()
assert len(endpoints) == 1
path, method, callback = endpoints[0]
assert path == '/included-fbv/'
def test_should_include_endpoint_excludes_correctly(self):
"""This is the specific method that should handle the exclusion"""
inspector = EndpointEnumerator(self.patterns)
# Not pretty. Mimics internals of EndpointEnumerator to put should_include_endpoint under test
pairs = [(inspector.get_path_from_regex(pattern.pattern.regex.pattern), pattern.callback)
for pattern in self.patterns]
should_include = [
inspector.should_include_endpoint(*pair) for pair in pairs
]
expected = [False, False, True]
assert should_include == expected
| SchemaGenerationExclusionTests |
python | great-expectations__great_expectations | great_expectations/execution_engine/execution_engine.py | {
"start": 2114,
"end": 2298
} | class ____(ValueError):
def __init__(self, condition: Condition):
super().__init__(f"Invalid condition type: {type(condition)}")
@dataclass(frozen=True)
| InvalidConditionError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external.py | {
"start": 32230,
"end": 37079
} | class ____:
def __init__(self, schedule_snap: ScheduleSnap, handle: RepositoryHandle):
self._schedule_snap = check.inst_param(schedule_snap, "schedule_snap", ScheduleSnap)
self._handle = InstigatorHandle(
self._schedule_snap.name,
check.inst_param(handle, "handle", RepositoryHandle),
)
@property
def name(self) -> str:
return self._schedule_snap.name
@property
def cron_schedule(self) -> Union[str, Sequence[str]]:
return self._schedule_snap.cron_schedule
@property
def execution_timezone(self) -> Optional[str]:
return self._schedule_snap.execution_timezone
@property
def op_selection(self) -> Optional[Sequence[str]]:
return self._schedule_snap.op_selection
@property
def job_name(self) -> str:
return self._schedule_snap.job_name
@property
def asset_selection(self) -> Optional[AssetSelection]:
return self._schedule_snap.asset_selection
@property
def mode(self) -> Optional[str]:
return self._schedule_snap.mode
@property
def description(self) -> Optional[str]:
return self._schedule_snap.description
@property
def partition_set_name(self) -> Optional[str]:
return self._schedule_snap.partition_set_name
@property
def environment_vars(self) -> Optional[Mapping[str, str]]:
return self._schedule_snap.environment_vars
@property
def handle(self) -> InstigatorHandle:
return self._handle
@property
def tags(self) -> Mapping[str, str]:
return self._schedule_snap.tags
@property
def metadata(self) -> Mapping[str, MetadataValue]:
return self._schedule_snap.metadata
@property
def owners(self) -> Optional[Sequence[str]]:
return getattr(self._schedule_snap, "owners", None)
def get_remote_origin(self) -> RemoteInstigatorOrigin:
return self.handle.get_remote_origin()
def get_remote_origin_id(self) -> str:
return self.get_remote_origin().get_id()
@property
def selector(self) -> InstigatorSelector:
return InstigatorSelector(
location_name=self.handle.location_name,
repository_name=self.handle.repository_name,
name=self._schedule_snap.name,
)
@property
def schedule_selector(self) -> ScheduleSelector:
return ScheduleSelector(
location_name=self.handle.location_name,
repository_name=self.handle.repository_name,
schedule_name=self._schedule_snap.name,
)
@cached_property
def selector_id(self) -> str:
return create_snapshot_id(self.selector)
def get_compound_id(self) -> CompoundID:
return CompoundID(
remote_origin_id=self.get_remote_origin_id(),
selector_id=self.selector_id,
)
@property
def default_status(self) -> DefaultScheduleStatus:
return self._schedule_snap.default_status or DefaultScheduleStatus.STOPPED
def get_current_instigator_state(
self, stored_state: Optional["InstigatorState"]
) -> "InstigatorState":
from dagster._core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
ScheduleInstigatorData,
)
if self.default_status == DefaultScheduleStatus.RUNNING:
if stored_state:
return stored_state
return InstigatorState(
self.get_remote_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.DECLARED_IN_CODE,
ScheduleInstigatorData(self.cron_schedule, start_timestamp=None),
)
else:
# Ignore DECLARED_IN_CODE states in the DB if the default status
# isn't DefaultScheduleStatus.RUNNING - this would indicate that the schedule's
# default has been changed in code but there's still a lingering DECLARED_IN_CODE
# row in the database that can be ignored
if stored_state:
return (
stored_state.with_status(InstigatorStatus.STOPPED)
if stored_state.status == InstigatorStatus.DECLARED_IN_CODE
else stored_state
)
return InstigatorState(
self.get_remote_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.STOPPED,
ScheduleInstigatorData(self.cron_schedule, start_timestamp=None),
)
def execution_time_iterator(
self, start_timestamp: float, ascending: bool = True
) -> Iterator[datetime]:
return schedule_execution_time_iterator(
start_timestamp, self.cron_schedule, self.execution_timezone, ascending
)
| RemoteSchedule |
python | neetcode-gh__leetcode | python/0523-continuous-subarray-sum.py | {
"start": 157,
"end": 581
} | class ____:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
hashmap = {}
hashmap[0]=-1
summ=0
for i,j in enumerate(nums):
summ+=j
if summ%k in hashmap.keys():
if i-hashmap[summ%k]>=2:
return True
else:
continue
hashmap[summ%k]=i
return False
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/util.py | {
"start": 10235,
"end": 11799
} | class ____(str):
"""
A string that compares equal to another string regardless of case,
unless it is quoted.
"""
def __init__(self, string: str):
# TODO: check if string is already a CaseInsensitiveString?
self._original = string
self._folded = (
string.casefold()
) # Using casefold instead of lower for better Unicode handling
self._quote_string = '"'
@override
def __eq__(self, other: CaseInsensitiveString | str | object):
# First check if it's another CaseInsensitiveString to avoid recursion
if isinstance(other, CaseInsensitiveString):
if self.is_quoted() or other.is_quoted():
return self._original == other._original
return self._folded == other._folded
# Handle mock ANY or similar objects that would claim equality with anything
# Only for non-CaseInsensitiveString objects to avoid recursion
if hasattr(other, "__eq__") and not isinstance(other, str) and other.__eq__(self):
return True
if self.is_quoted():
return self._original == str(other)
elif isinstance(other, str):
return self._folded == other.casefold()
else:
return False
def __hash__(self): # type: ignore[explicit-override] # FIXME
return hash(self._folded)
@override
def __str__(self) -> str:
return self._original
def is_quoted(self):
return self._original.startswith(self._quote_string)
| CaseInsensitiveString |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/encoders.py | {
"start": 7468,
"end": 8684
} | class ____(nn.Module):
def __init__(
self, height: int, width: int, initial_channels: int, output_size: int
):
super().__init__()
self.h_size = output_size
conv_1_hw = conv_output_shape((height, width), 8, 4)
conv_2_hw = conv_output_shape(conv_1_hw, 4, 2)
conv_3_hw = conv_output_shape(conv_2_hw, 3, 1)
self.final_flat = conv_3_hw[0] * conv_3_hw[1] * 64
self.conv_layers = nn.Sequential(
nn.Conv2d(initial_channels, 32, [8, 8], [4, 4]),
nn.LeakyReLU(),
nn.Conv2d(32, 64, [4, 4], [2, 2]),
nn.LeakyReLU(),
nn.Conv2d(64, 64, [3, 3], [1, 1]),
nn.LeakyReLU(),
)
self.dense = nn.Sequential(
linear_layer(
self.final_flat,
self.h_size,
kernel_init=Initialization.KaimingHeNormal,
kernel_gain=1.41, # Use ReLU gain
),
nn.LeakyReLU(),
)
def forward(self, visual_obs: torch.Tensor) -> torch.Tensor:
hidden = self.conv_layers(visual_obs)
hidden = hidden.reshape([-1, self.final_flat])
return self.dense(hidden)
| NatureVisualEncoder |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 398389,
"end": 399201
} | class ____(Request):
"""
Gets task information
:param task: Task ID
:type task: str
"""
_service = "tasks"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task, **kwargs):
super(GetByIdRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetByIdRequest |
python | pennersr__django-allauth | allauth/socialaccount/providers/yahoo/views.py | {
"start": 181,
"end": 995
} | class ____(OAuth2Adapter):
provider_id = "yahoo"
access_token_url = "https://api.login.yahoo.com/oauth2/get_token" # nosec
authorize_url = "https://api.login.yahoo.com/oauth2/request_auth"
profile_url = "https://api.login.yahoo.com/openid/v1/userinfo"
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "Bearer {0}".format(token.token)}
resp = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
resp.raise_for_status()
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(YahooOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(YahooOAuth2Adapter)
| YahooOAuth2Adapter |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 83085,
"end": 83412
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return self.conv1.get_example_inputs()
| SingleLayerFunctionalConvModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/vertex_ai/test_generative_model.py | {
"start": 15004,
"end": 16970
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("generative_model.GenerativeModelHook"))
def test_execute(self, mock_hook):
model_name = "gemini-1.5-pro-002"
system_instruction = """
You are an expert researcher. You always stick to the facts in the sources provided, and never make up new facts.
Now look at these research papers, and answer the following questions.
"""
contents = [
Part.from_uri(
"gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf",
mime_type="application/pdf",
),
Part.from_uri(
"gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
mime_type="application/pdf",
),
]
ttl_hours = 1
display_name = "test-example-cache"
with pytest.warns(AirflowProviderDeprecationWarning):
op = CreateCachedContentOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
model_name=model_name,
system_instruction=system_instruction,
contents=contents,
ttl_hours=ttl_hours,
display_name=display_name,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_cached_content.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
model_name=model_name,
system_instruction=system_instruction,
contents=contents,
ttl_hours=ttl_hours,
display_name=display_name,
)
| TestVertexAICreateCachedContentOperator |
python | encode__django-rest-framework | tests/test_api_client.py | {
"start": 4041,
"end": 4652
} | class ____(APIView):
def get(self, request):
return Response({
'method': request.method,
'query_params': _get_query_params(request)
})
def post(self, request):
if request.content_type:
content_type = request.content_type.split(';')[0]
else:
content_type = None
return Response({
'method': request.method,
'query_params': _get_query_params(request),
'data': _get_data(request),
'files': _get_files(request),
'content_type': content_type
})
| ListView |
python | pytorch__pytorch | test/test_cpp_extensions_aot.py | {
"start": 15881,
"end": 16596
} | class ____(common.TestCase):
def test_torch_library(self):
import torch_test_cpp_extension.torch_library # noqa: F401
def f(a: bool, b: bool):
return torch.ops.torch_library.logical_and(a, b)
self.assertTrue(f(True, True))
self.assertFalse(f(True, False))
self.assertFalse(f(False, True))
self.assertFalse(f(False, False))
s = torch.jit.script(f)
self.assertTrue(s(True, True))
self.assertFalse(s(True, False))
self.assertFalse(s(False, True))
self.assertFalse(s(False, False))
self.assertIn("torch_library::logical_and", str(s.graph))
if __name__ == "__main__":
common.run_tests()
| TestTorchLibrary |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.