language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_condition.py | {
"start": 1737,
"end": 3230
} | class ____(QueryConditionVisitor[QueryCondition]):
"""
Visitor that recursively transforms all conditions to work on tags in the form `tags[x]`.
"""
def __init__(self, check_sentry_tags: bool):
self._check_sentry_tags = check_sentry_tags
def _visit_condition(self, condition: Condition) -> QueryCondition:
if not isinstance(condition.lhs, Column):
return condition
# We assume that all incoming conditions are on tags, since we do not allow filtering by project in the
# query filters.
tag_column = f"tags[{condition.lhs.name}]"
sentry_tag_column = f"sentry_tags[{condition.lhs.name}]"
if self._check_sentry_tags:
tag_column = f"tags[{condition.lhs.name}]"
# We might have tags across multiple nested structures such as `tags` and `sentry_tags` for this reason
# we want to emit a condition that spans both.
return BooleanCondition(
op=BooleanOp.OR,
conditions=[
Condition(lhs=Column(name=tag_column), op=condition.op, rhs=condition.rhs),
Condition(
lhs=Column(name=sentry_tag_column),
op=condition.op,
rhs=condition.rhs,
),
],
)
else:
return Condition(lhs=Column(name=tag_column), op=condition.op, rhs=condition.rhs)
| TagsTransformationVisitor |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/trajectory.py | {
"start": 1078,
"end": 2214
} | class ____:
@staticmethod
def get_name_at(index: int) -> AgentBufferKey:
"""
returns the name of the observation given the index of the observation
"""
return ObservationKeyPrefix.OBSERVATION, index
@staticmethod
def get_name_at_next(index: int) -> AgentBufferKey:
"""
returns the name of the next observation given the index of the observation
"""
return ObservationKeyPrefix.NEXT_OBSERVATION, index
@staticmethod
def from_buffer(batch: AgentBuffer, num_obs: int) -> List[np.array]:
"""
Creates the list of observations from an AgentBuffer
"""
result: List[np.array] = []
for i in range(num_obs):
result.append(batch[ObsUtil.get_name_at(i)])
return result
@staticmethod
def from_buffer_next(batch: AgentBuffer, num_obs: int) -> List[np.array]:
"""
Creates the list of next observations from an AgentBuffer
"""
result = []
for i in range(num_obs):
result.append(batch[ObsUtil.get_name_at_next(i)])
return result
| ObsUtil |
python | pydantic__pydantic | tests/test_pickle.py | {
"start": 5266,
"end": 7205
} | class ____(ImportableDataclass):
pass
def child_dataclass_factory() -> type:
class NonImportableChildDataclass(ImportableDataclass):
pass
return NonImportableChildDataclass
@pytest.mark.parametrize(
'dataclass_type,use_cloudpickle',
[
# Importable Pydantic dataclass can be pickled with either pickle or cloudpickle.
(ImportableDataclass, False),
(ImportableDataclass, True),
(ImportableChildDataclass, False),
(ImportableChildDataclass, True),
# Locally-defined Pydantic dataclass can only be pickled with cloudpickle.
pytest.param(dataclass_factory(), True, marks=cloudpickle_pypy_xfail),
(child_dataclass_factory(), True),
# Pydantic dataclass generated from builtin can only be pickled with cloudpickle.
pytest.param(pydantic.dataclasses.dataclass(ImportableBuiltinDataclass), True, marks=cloudpickle_pypy_xfail),
# Pydantic dataclass generated from locally-defined builtin can only be pickled with cloudpickle.
pytest.param(pydantic.dataclasses.dataclass(builtin_dataclass_factory()), True, marks=cloudpickle_pypy_xfail),
],
)
def test_pickle_dataclass(dataclass_type: type, use_cloudpickle: bool):
if use_cloudpickle:
dataclass_type = cloudpickle.loads(cloudpickle.dumps(dataclass_type))
else:
dataclass_type = pickle.loads(pickle.dumps(dataclass_type))
d = dataclass_type('1', '2.5')
assert d.a == 1
assert d.b == 2.5
if use_cloudpickle:
d = cloudpickle.loads(cloudpickle.dumps(d))
else:
d = pickle.loads(pickle.dumps(d))
assert d.a == 1
assert d.b == 2.5
d = dataclass_type(b=10, a=20)
assert d.a == 20
assert d.b == 10
if use_cloudpickle:
d = cloudpickle.loads(cloudpickle.dumps(d))
else:
d = pickle.loads(pickle.dumps(d))
assert d.a == 20
assert d.b == 10
| ImportableChildDataclass |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_select.py | {
"start": 25476,
"end": 29439
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.some_table.insert(),
[
{"id": 1, "x": 1, "y": 2},
{"id": 2, "x": 2, "y": 3},
{"id": 3, "x": 3, "y": 4},
{"id": 4, "x": 4, "y": 5},
],
)
def _assert_result(self, select, result, params=None):
with config.db.connect() as conn:
eq_(conn.execute(select, params).fetchall(), result)
def test_plain_union(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2)
s2 = select(table).where(table.c.id == 3)
u1 = union(s1, s2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
def test_select_from_plain_union(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2)
s2 = select(table).where(table.c.id == 3)
u1 = union(s1, s2).alias().select()
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.order_by_col_from_union
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).limit(1).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).limit(1).order_by(table.c.id)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.parens_in_union_contained_select_wo_limit_offset
def test_order_by_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).order_by(table.c.id)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
def test_distinct_selectable_in_unions(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).distinct()
s2 = select(table).where(table.c.id == 3).distinct()
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
@testing.requires.parens_in_union_contained_select_w_limit_offset
def test_limit_offset_in_unions_from_alias(self):
table = self.tables.some_table
s1 = select(table).where(table.c.id == 2).limit(1).order_by(table.c.id)
s2 = select(table).where(table.c.id == 3).limit(1).order_by(table.c.id)
# this necessarily has double parens
u1 = union(s1, s2).alias()
self._assert_result(
u1.select().limit(2).order_by(u1.c.id), [(2, 2, 3), (3, 3, 4)]
)
def test_limit_offset_aliased_selectable_in_unions(self):
table = self.tables.some_table
s1 = (
select(table)
.where(table.c.id == 2)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
s2 = (
select(table)
.where(table.c.id == 3)
.limit(1)
.order_by(table.c.id)
.alias()
.select()
)
u1 = union(s1, s2).limit(2)
self._assert_result(
u1.order_by(u1.selected_columns.id), [(2, 2, 3), (3, 3, 4)]
)
| CompoundSelectTest |
python | marshmallow-code__marshmallow | tests/test_context.py | {
"start": 605,
"end": 8390
} | class ____:
def test_context_load_dump(self):
class ContextField(fields.Integer):
def _serialize(self, value, attr, obj, **kwargs):
if (context := Context[dict].get(None)) is not None:
value *= context.get("factor", 1)
return super()._serialize(value, attr, obj, **kwargs)
def _deserialize(self, value, attr, data, **kwargs):
val = super()._deserialize(value, attr, data, **kwargs)
if (context := Context[dict].get(None)) is not None:
val *= context.get("factor", 1)
return val
class ContextSchema(Schema):
ctx_fld = ContextField()
ctx_schema = ContextSchema()
assert ctx_schema.load({"ctx_fld": 1}) == {"ctx_fld": 1}
assert ctx_schema.dump({"ctx_fld": 1}) == {"ctx_fld": 1}
with Context({"factor": 2}):
assert ctx_schema.load({"ctx_fld": 1}) == {"ctx_fld": 2}
assert ctx_schema.dump({"ctx_fld": 1}) == {"ctx_fld": 2}
def test_context_method(self):
owner = User("Joe")
blog = Blog(title="Joe Blog", user=owner)
serializer = UserContextSchema()
with Context({"blog": blog}):
data = serializer.dump(owner)
assert data["is_owner"] is True
nonowner = User("Fred")
data = serializer.dump(nonowner)
assert data["is_owner"] is False
def test_context_function(self):
owner = User("Fred")
blog = Blog("Killer Queen", user=owner)
collab = User("Brian")
blog.collaborators.append(collab)
with Context({"blog": blog}):
serializer = UserContextSchema()
data = serializer.dump(collab)
assert data["is_collab"] is True
noncollab = User("Foo")
data = serializer.dump(noncollab)
assert data["is_collab"] is False
def test_function_field_handles_bound_serializer(self):
class SerializeA:
def __call__(self, value):
return "value"
serialize = SerializeA()
# only has a function field
class UserFunctionContextSchema(Schema):
is_collab = fields.Function(serialize)
owner = User("Joe")
serializer = UserFunctionContextSchema()
data = serializer.dump(owner)
assert data["is_collab"] == "value"
def test_nested_fields_inherit_context(self):
class InnerSchema(Schema):
likes_bikes = fields.Function(lambda obj: "bikes" in Context.get()["info"])
class CSchema(Schema):
inner = fields.Nested(InnerSchema)
ser = CSchema()
with Context[dict]({"info": "i like bikes"}):
obj: dict[str, dict] = {"inner": {}}
result = ser.dump(obj)
assert result["inner"]["likes_bikes"] is True
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/820
def test_nested_list_fields_inherit_context(self):
class InnerSchema(Schema):
foo = fields.Raw()
@validates("foo")
def validate_foo(self, value, **kwargs):
if "foo_context" not in Context[dict].get():
raise ValidationError("Missing context")
class OuterSchema(Schema):
bars = fields.List(fields.Nested(InnerSchema()))
inner = InnerSchema()
with Context({"foo_context": "foo"}):
assert inner.load({"foo": 42})
outer = OuterSchema()
with Context({"foo_context": "foo"}):
assert outer.load({"bars": [{"foo": 42}]})
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/820
def test_nested_dict_fields_inherit_context(self):
class InnerSchema(Schema):
foo = fields.Raw()
@validates("foo")
def validate_foo(self, value, **kwargs):
if "foo_context" not in Context[dict].get():
raise ValidationError("Missing context")
class OuterSchema(Schema):
bars = fields.Dict(values=fields.Nested(InnerSchema()))
inner = InnerSchema()
with Context({"foo_context": "foo"}):
assert inner.load({"foo": 42})
outer = OuterSchema()
with Context({"foo_context": "foo"}):
assert outer.load({"bars": {"test": {"foo": 42}}})
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/1404
def test_nested_field_with_unpicklable_object_in_context(self):
class Unpicklable:
def __deepcopy__(self, _):
raise NotImplementedError
class InnerSchema(Schema):
foo = fields.Raw()
class OuterSchema(Schema):
inner = fields.Nested(InnerSchema())
outer = OuterSchema()
obj = {"inner": {"foo": 42}}
with Context({"unp": Unpicklable()}):
assert outer.dump(obj)
def test_function_field_passed_serialize_with_context(self, user):
class Parent(Schema):
pass
field = fields.Function(
serialize=lambda obj: obj.name.upper() + Context.get()["key"]
)
field.parent = Parent()
with Context({"key": "BAR"}):
assert field.serialize("key", user) == "MONTYBAR"
def test_function_field_deserialization_with_context(self):
class Parent(Schema):
pass
field = fields.Function(
lambda x: None,
deserialize=lambda val: val.upper() + Context.get()["key"],
)
field.parent = Parent()
with Context({"key": "BAR"}):
assert field.deserialize("foo") == "FOOBAR"
def test_decorated_processors_with_context(self):
NumDictContext = Context[dict[int, int]]
class MySchema(Schema):
f_1 = fields.Integer()
f_2 = fields.Integer()
f_3 = fields.Integer()
f_4 = fields.Integer()
@pre_dump
def multiply_f_1(self, item, **kwargs):
item["f_1"] *= NumDictContext.get()[1]
return item
@pre_load
def multiply_f_2(self, data, **kwargs):
data["f_2"] *= NumDictContext.get()[2]
return data
@post_dump
def multiply_f_3(self, item, **kwargs):
item["f_3"] *= NumDictContext.get()[3]
return item
@post_load
def multiply_f_4(self, data, **kwargs):
data["f_4"] *= NumDictContext.get()[4]
return data
schema = MySchema()
with NumDictContext({1: 2, 2: 3, 3: 4, 4: 5}):
assert schema.dump({"f_1": 1, "f_2": 1, "f_3": 1, "f_4": 1}) == {
"f_1": 2,
"f_2": 1,
"f_3": 4,
"f_4": 1,
}
assert schema.load({"f_1": 1, "f_2": 1, "f_3": 1, "f_4": 1}) == {
"f_1": 1,
"f_2": 3,
"f_3": 1,
"f_4": 5,
}
def test_validates_schema_with_context(self):
class MySchema(Schema):
f_1 = fields.Integer()
f_2 = fields.Integer()
@validates_schema
def validate_schema(self, data, **kwargs):
if data["f_2"] != data["f_1"] * Context.get():
raise ValidationError("Fail")
schema = MySchema()
with Context(2):
schema.load({"f_1": 1, "f_2": 2})
with pytest.raises(ValidationError) as excinfo:
schema.load({"f_1": 1, "f_2": 3})
assert excinfo.value.messages["_schema"] == ["Fail"]
| TestContext |
python | pennersr__django-allauth | allauth/socialaccount/providers/twitter/provider.py | {
"start": 230,
"end": 1106
} | class ____(ProviderAccount):
def get_screen_name(self):
"""The screen name is the username of the Twitter account."""
return self.account.extra_data.get("screen_name")
def get_profile_url(self):
ret = None
screen_name = self.get_screen_name()
if screen_name:
ret = "https://x.com/" + screen_name
return ret
def get_avatar_url(self):
ret = None
profile_image_url = self.account.extra_data.get("profile_image_url")
if profile_image_url:
# Hmm, hack to get our hands on the large image. Not
# really documented, but seems to work.
ret = profile_image_url.replace("_normal", "")
return ret
def to_str(self):
screen_name = self.get_screen_name()
return screen_name or super(TwitterAccount, self).to_str()
| TwitterAccount |
python | openai__openai-python | src/openai/cli/_models.py | {
"start": 151,
"end": 491
} | class ____(_models.BaseModel):
if PYDANTIC_V1:
class Config(pydantic.BaseConfig): # type: ignore
extra: Any = pydantic.Extra.ignore # type: ignore
arbitrary_types_allowed: bool = True
else:
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True)
| BaseModel |
python | gevent__gevent | src/greentest/3.9/test_httplib.py | {
"start": 70163,
"end": 74415
} | class ____(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
| RequestBodyTest |
python | pytorch__pytorch | torch/utils/tensorboard/writer.py | {
"start": 6443,
"end": 47533
} | class ____:
"""Writes entries directly to event files in the log_dir to be consumed by TensorBoard.
The `SummaryWriter` class provides a high-level API to create an event file
in a given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(
self,
log_dir=None,
comment="",
purge_step=None,
max_queue=10,
flush_secs=120,
filename_suffix="",
) -> None:
"""Create a `SummaryWriter` that will write out events and summaries to the event file.
Args:
log_dir (str): Save directory location. Default is
runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run.
Use hierarchical folder structure to compare
between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc.
for each new experiment to compare across them.
comment (str): Comment log_dir suffix appended to the default
``log_dir``. If ``log_dir`` is assigned, this argument has no effect.
purge_step (int):
When logging crashes at step :math:`T+X` and restarts at step :math:`T`,
any events whose global_step larger or equal to :math:`T` will be
purged and hidden from TensorBoard.
Note that crashed and resumed experiments should have the same ``log_dir``.
max_queue (int): Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
Default is ten items.
flush_secs (int): How often, in seconds, to flush the
pending events and summaries to disk. Default is every two minutes.
filename_suffix (str): Suffix added to all event filenames in
the log_dir directory. More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
Examples::
from torch.utils.tensorboard import SummaryWriter
# create a summary writer with automatically generated folder name.
writer = SummaryWriter()
# folder location: runs/May04_22-14-54_s-MacBook-Pro.local/
# create a summary writer using the specified folder name.
writer = SummaryWriter("my_experiment")
# folder location: my_experiment
# create a summary writer with comment appended.
writer = SummaryWriter(comment="LR_0.1_BATCH_16")
# folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/
"""
torch._C._log_api_usage_once("tensorboard.create.summarywriter")
if not log_dir:
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
log_dir = os.path.join(
"runs", current_time + "_" + socket.gethostname() + comment
)
self.log_dir = log_dir
self.purge_step = purge_step
self.max_queue = max_queue
self.flush_secs = flush_secs
self.filename_suffix = filename_suffix
# Initialize the file writers, but they can be cleared out on close
# and recreated later as needed.
self.file_writer = self.all_writers = None
self._get_file_writer()
# Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard
v = 1e-12
buckets = []
neg_buckets = []
while v < 1e20:
# pyrefly: ignore [bad-argument-type]
buckets.append(v)
# pyrefly: ignore [bad-argument-type]
neg_buckets.append(-v)
v *= 1.1
self.default_bins = neg_buckets[::-1] + [0] + buckets
def _get_file_writer(self):
"""Return the default FileWriter instance. Recreates it if closed."""
if self.all_writers is None or self.file_writer is None:
# pyrefly: ignore [bad-assignment]
self.file_writer = FileWriter(
self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix
)
# pyrefly: ignore [bad-assignment, missing-attribute]
self.all_writers = {self.file_writer.get_logdir(): self.file_writer}
if self.purge_step is not None:
most_recent_step = self.purge_step
# pyrefly: ignore [missing-attribute]
self.file_writer.add_event(
Event(step=most_recent_step, file_version="brain.Event:2")
)
# pyrefly: ignore [missing-attribute]
self.file_writer.add_event(
Event(
step=most_recent_step,
# pyrefly: ignore [missing-attribute]
session_log=SessionLog(status=SessionLog.START),
)
)
self.purge_step = None
return self.file_writer
def get_logdir(self):
"""Return the directory where event files will be written."""
return self.log_dir
def add_hparams(
self,
hparam_dict,
metric_dict,
hparam_domain_discrete=None,
run_name=None,
global_step=None,
) -> None:
"""Add a set of hyperparameters to be compared in TensorBoard.
Args:
hparam_dict (dict): Each key-value pair in the dictionary is the
name of the hyper parameter and it's corresponding value.
The type of the value can be one of `bool`, `string`, `float`,
`int`, or `None`.
metric_dict (dict): Each key-value pair in the dictionary is the
name of the metric and it's corresponding value. Note that the key used
here should be unique in the tensorboard record. Otherwise the value
you added by ``add_scalar`` will be displayed in hparam plugin. In most
cases, this is unwanted.
hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that
contains names of the hyperparameters and all discrete values they can hold
run_name (str): Name of the run, to be included as part of the logdir.
If unspecified, will use current timestamp.
global_step (int): Global step value to record
Examples::
from torch.utils.tensorboard import SummaryWriter
with SummaryWriter() as w:
for i in range(5):
w.add_hparams({'lr': 0.1*i, 'bsize': i},
{'hparam/accuracy': 10*i, 'hparam/loss': 10*i})
Expected result:
.. image:: _static/img/tensorboard/add_hparam.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError("hparam_dict and metric_dict should be dictionary.")
exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)
if not run_name:
run_name = str(time.time())
logdir = os.path.join(self._get_file_writer().get_logdir(), run_name)
with SummaryWriter(log_dir=logdir) as w_hp:
w_hp.file_writer.add_summary(exp, global_step)
w_hp.file_writer.add_summary(ssi, global_step)
w_hp.file_writer.add_summary(sei, global_step)
for k, v in metric_dict.items():
w_hp.add_scalar(k, v, global_step)
def add_scalar(
self,
tag,
scalar_value,
global_step=None,
walltime=None,
new_style=False,
double_precision=False,
) -> None:
"""Add scalar data to summary.
Args:
tag (str): Data identifier
scalar_value (float or string/blobname): Value to save
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
with seconds after epoch of event
new_style (boolean): Whether to use new style (tensor field) or old
style (simple_value field). New style could lead to faster data loading.
Examples::
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
x = range(100)
for i in x:
writer.add_scalar('y=2x', i * 2, i)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_scalar.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_scalar")
summary = scalar(
tag, scalar_value, new_style=new_style, double_precision=double_precision
)
self._get_file_writer().add_summary(summary, global_step, walltime)
def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None) -> None:
"""Add many scalar data to summary.
Args:
main_tag (str): The parent name for the tags
tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
r = 5
for i in range(100):
writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r),
'xcosx':i*np.cos(i/r),
'tanx': np.tan(i/r)}, i)
writer.close()
# This call adds three values to the same scalar plot with the tag
# 'run_14h' in TensorBoard's scalar section.
Expected result:
.. image:: _static/img/tensorboard/add_scalars.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_scalars")
walltime = time.time() if walltime is None else walltime
fw_logdir = self._get_file_writer().get_logdir()
for tag, scalar_value in tag_scalar_dict.items():
fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag
if self.all_writers is None:
raise AssertionError("self.all_writers is None")
if fw_tag in self.all_writers:
fw = self.all_writers[fw_tag]
else:
fw = FileWriter(
fw_tag, self.max_queue, self.flush_secs, self.filename_suffix
)
self.all_writers[fw_tag] = fw
fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime)
def add_tensor(
self,
tag,
tensor,
global_step=None,
walltime=None,
) -> None:
"""Add tensor data to summary.
Args:
tag (str): Data identifier
tensor (torch.Tensor): tensor to save
global_step (int): Global step value to record
Examples::
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
x = torch.tensor([1,2,3])
writer.add_scalar('x', x)
writer.close()
Expected result:
Summary::tensor::float_val [1,2,3]
::tensor::shape [3]
::tag 'x'
"""
torch._C._log_api_usage_once("tensorboard.logging.add_tensor")
summary = tensor_proto(tag, tensor)
self._get_file_writer().add_summary(summary, global_step, walltime)
def add_histogram(
self,
tag,
values,
global_step=None,
bins="tensorflow",
walltime=None,
max_bins=None,
) -> None:
"""Add histogram to summary.
Args:
tag (str): Data identifier
values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram
global_step (int): Global step value to record
bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find
other options in: https://numpy.org/doc/stable/reference/generated/numpy.histogram.html
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for i in range(10):
x = np.random.random(1000)
writer.add_histogram('distribution centers', x + i, i)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_histogram.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_histogram")
if isinstance(bins, str) and bins == "tensorflow":
bins = self.default_bins
self._get_file_writer().add_summary(
histogram(tag, values, bins, max_bins=max_bins), global_step, walltime
)
def add_histogram_raw(
self,
tag,
min,
max,
num,
sum,
sum_squares,
bucket_limits,
bucket_counts,
global_step=None,
walltime=None,
) -> None:
"""Add histogram with raw data.
Args:
tag (str): Data identifier
min (float or int): Min value
max (float or int): Max value
num (int): Number of values
sum (float or int): Sum of all values
sum_squares (float or int): Sum of squares for all values
bucket_limits (torch.Tensor, numpy.ndarray): Upper value per bucket.
The number of elements of it should be the same as `bucket_counts`.
bucket_counts (torch.Tensor, numpy.ndarray): Number of values per bucket
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
dummy_data = []
for idx, value in enumerate(range(50)):
dummy_data += [idx + 0.001] * value
bins = list(range(50+2))
bins = np.array(bins)
values = np.array(dummy_data).astype(float).reshape(-1)
counts, limits = np.histogram(values, bins=bins)
sum_sq = values.dot(values)
writer.add_histogram_raw(
tag='histogram_with_raw_data',
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limits=limits[1:].tolist(),
bucket_counts=counts.tolist(),
global_step=0)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_histogram_raw.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw")
if len(bucket_limits) != len(bucket_counts):
raise ValueError(
"len(bucket_limits) != len(bucket_counts), see the document."
)
self._get_file_writer().add_summary(
histogram_raw(
tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts
),
global_step,
walltime,
)
def add_image(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"
) -> None:
"""Add image data to summary.
Note that this requires the ``pillow`` package.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
dataformats (str): Image data format specification of the form
CHW, HWC, HW, WH, etc.
Shape:
img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to
convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job.
Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as
corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``.
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
img = np.zeros((3, 100, 100))
img[0] = np.arange(0, 10000).reshape(100, 100) / 10000
img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
img_HWC = np.zeros((100, 100, 3))
img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000
img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
writer = SummaryWriter()
writer.add_image('my_image', img, 0)
# If you have non-default dimension setting, set the dataformats argument.
writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC')
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_image.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_image")
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def add_images(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="NCHW"
) -> None:
"""Add batched image data to summary.
Note that this requires the ``pillow`` package.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
dataformats (str): Image data format specification of the form
NCHW, NHWC, CHW, HWC, HW, WH, etc.
Shape:
img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be
accepted. e.g. NCHW or NHWC.
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
img_batch = np.zeros((16, 3, 100, 100))
for i in range(16):
img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i
writer = SummaryWriter()
writer.add_images('my_image_batch', img_batch, 0)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_images.png
:scale: 30 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_images")
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def add_image_with_boxes(
self,
tag,
img_tensor,
box_tensor,
global_step=None,
walltime=None,
rescale=1,
dataformats="CHW",
labels=None,
) -> None:
"""Add image and draw bounding boxes on the image.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects)
box should be represented as [x1, y1, x2, y2].
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
rescale (float): Optional scale override
dataformats (str): Image data format specification of the form
NCHW, NHWC, CHW, HWC, HW, WH, etc.
labels (list of string): The label to be shown for each bounding box.
Shape:
img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument.
e.g. CHW or HWC
box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of
boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax).
"""
torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes")
if labels is not None:
if isinstance(labels, str):
labels = [labels]
if len(labels) != box_tensor.shape[0]:
labels = None
self._get_file_writer().add_summary(
image_boxes(
tag,
img_tensor,
box_tensor,
rescale=rescale,
dataformats=dataformats,
labels=labels,
),
global_step,
walltime,
)
def add_figure(
self,
tag: str,
figure: Union["Figure", list["Figure"]],
global_step: int | None = None,
close: bool = True,
walltime: float | None = None,
) -> None:
"""Render matplotlib figure into an image and add it to summary.
Note that this requires the ``matplotlib`` package.
Args:
tag: Data identifier
figure: Figure or a list of figures
global_step: Global step value to record
close: Flag to automatically close the figure
walltime: Optional override default walltime (time.time())
seconds after epoch of event
"""
torch._C._log_api_usage_once("tensorboard.logging.add_figure")
if isinstance(figure, list):
self.add_image(
tag,
figure_to_image(figure, close),
global_step,
walltime,
dataformats="NCHW",
)
else:
self.add_image(
tag,
figure_to_image(figure, close),
global_step,
walltime,
dataformats="CHW",
)
def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None) -> None:
"""Add video data to summary.
Note that this requires the ``moviepy`` package.
Args:
tag (str): Data identifier
vid_tensor (torch.Tensor): Video data
global_step (int): Global step value to record
fps (float or int): Frames per second
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`.
"""
torch._C._log_api_usage_once("tensorboard.logging.add_video")
self._get_file_writer().add_summary(
video(tag, vid_tensor, fps), global_step, walltime
)
def add_audio(
self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None
) -> None:
"""Add audio data to summary.
Args:
tag (str): Data identifier
snd_tensor (torch.Tensor): Sound data
global_step (int): Global step value to record
sample_rate (int): sample rate in Hz
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1].
"""
torch._C._log_api_usage_once("tensorboard.logging.add_audio")
self._get_file_writer().add_summary(
audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime
)
def add_text(self, tag, text_string, global_step=None, walltime=None) -> None:
"""Add text data to summary.
Args:
tag (str): Data identifier
text_string (str): String to save
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
writer.add_text('lstm', 'This is an lstm', 0)
writer.add_text('rnn', 'This is an rnn', 10)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_text")
self._get_file_writer().add_summary(
text(tag, text_string), global_step, walltime
)
def add_onnx_graph(self, prototxt) -> None:
torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph")
self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt))
def add_graph(
self, model, input_to_model=None, verbose=False, use_strict_trace=True
) -> None:
"""Add graph data to summary.
Args:
model (torch.nn.Module): Model to draw.
input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of
variables to be fed.
verbose (bool): Whether to print graph structure in console.
use_strict_trace (bool): Whether to pass keyword argument `strict` to
`torch.jit.trace`. Pass False when you want the tracer to
record your mutable container types (list, dict)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_graph")
# A valid PyTorch model should have a 'forward' method
self._get_file_writer().add_graph(
graph(model, input_to_model, verbose, use_strict_trace)
)
@staticmethod
def _encode(rawstr):
# I'd use urllib but, I'm unsure about the differences from python3 to python2, etc.
retval = rawstr
retval = retval.replace("%", f"%{ord('%'):02x}")
retval = retval.replace("/", f"%{ord('/'):02x}")
retval = retval.replace("\\", "%%%02x" % (ord("\\"))) # noqa: UP031
return retval
def add_embedding(
self,
mat,
metadata=None,
label_img=None,
global_step=None,
tag="default",
metadata_header=None,
) -> None:
"""Add embedding projector data to summary.
Args:
mat (torch.Tensor or numpy.ndarray): A matrix which each row is the feature vector of the data point
metadata (list): A list of labels, each element will be converted to string
label_img (torch.Tensor): Images correspond to each data point
global_step (int): Global step value to record
tag (str): Name for the embedding
metadata_header (list): A list of headers for multi-column metadata. If given, each metadata must be
a list with values corresponding to headers.
Shape:
mat: :math:`(N, D)`, where N is number of data and D is feature dimension
label_img: :math:`(N, C, H, W)`
Examples::
import keyword
import torch
meta = []
while len(meta)<100:
meta = meta+keyword.kwlist # get some strings
meta = meta[:100]
for i, v in enumerate(meta):
meta[i] = v+str(i)
label_img = torch.rand(100, 3, 10, 32)
for i in range(100):
label_img[i]*=i/100.0
writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img)
writer.add_embedding(torch.randn(100, 5), label_img=label_img)
writer.add_embedding(torch.randn(100, 5), metadata=meta)
.. note::
Categorical (i.e. non-numeric) metadata cannot have more than 50 unique values if they are to be used for
coloring in the embedding projector.
"""
torch._C._log_api_usage_once("tensorboard.logging.add_embedding")
mat = make_np(mat)
if global_step is None:
global_step = 0
# clear pbtxt?
# Maybe we should encode the tag so slashes don't trip us up?
# I don't think this will mess us up, but better safe than sorry.
subdir = f"{str(global_step).zfill(5)}/{self._encode(tag)}"
save_path = os.path.join(self._get_file_writer().get_logdir(), subdir)
fs = tf.io.gfile
if fs.exists(save_path):
if fs.isdir(save_path):
print(
"warning: Embedding dir exists, did you set global_step for add_embedding()?"
)
else:
raise NotADirectoryError(
f"Path: `{save_path}` exists, but is a file. Cannot proceed."
)
else:
fs.makedirs(save_path)
if metadata is not None:
if mat.shape[0] != len(
metadata
):
raise AssertionError("#labels should equal with #data points")
make_tsv(metadata, save_path, metadata_header=metadata_header)
if label_img is not None:
if mat.shape[0] != label_img.shape[0]:
raise AssertionError("#images should equal with #data points")
make_sprite(label_img, save_path)
if mat.ndim != 2:
raise AssertionError("mat should be 2D, where mat.size(0) is the number of data points")
make_mat(mat, save_path)
# Filesystem doesn't necessarily have append semantics, so we store an
# internal buffer to append to and re-write whole file after each
# embedding is added
if not hasattr(self, "_projector_config"):
self._projector_config = ProjectorConfig()
embedding_info = get_embedding_info(
metadata, label_img, subdir, global_step, tag
)
self._projector_config.embeddings.extend([embedding_info])
from google.protobuf import text_format
config_pbtxt = text_format.MessageToString(self._projector_config)
write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt)
def add_pr_curve(
self,
tag,
labels,
predictions,
global_step=None,
num_thresholds=127,
weights=None,
walltime=None,
) -> None:
"""Add precision recall curve.
Plotting a precision-recall curve lets you understand your model's
performance under different threshold settings. With this function,
you provide the ground truth labeling (T/F) and prediction confidence
(usually the output of your model) for each target. The TensorBoard UI
will let you choose the threshold interactively.
Args:
tag (str): Data identifier
labels (torch.Tensor, numpy.ndarray, or string/blobname):
Ground truth data. Binary label for each element.
predictions (torch.Tensor, numpy.ndarray, or string/blobname):
The probability that an element be classified as true.
Value should be in [0, 1]
global_step (int): Global step value to record
num_thresholds (int): Number of thresholds used to draw the curve.
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
labels = np.random.randint(2, size=100) # binary label
predictions = np.random.rand(100)
writer = SummaryWriter()
writer.add_pr_curve('pr_curve', labels, predictions, 0)
writer.close()
"""
torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve")
labels, predictions = make_np(labels), make_np(predictions)
self._get_file_writer().add_summary(
pr_curve(tag, labels, predictions, num_thresholds, weights),
global_step,
walltime,
)
def add_pr_curve_raw(
self,
tag,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
global_step=None,
num_thresholds=127,
weights=None,
walltime=None,
) -> None:
"""Add precision recall curve with raw data.
Args:
tag (str): Data identifier
true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts
false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts
true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts
false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts
precision (torch.Tensor, numpy.ndarray, or string/blobname): precision
recall (torch.Tensor, numpy.ndarray, or string/blobname): recall
global_step (int): Global step value to record
num_thresholds (int): Number of thresholds used to draw the curve.
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md
"""
torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw")
self._get_file_writer().add_summary(
pr_curve_raw(
tag,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
weights,
),
global_step,
walltime,
)
def add_custom_scalars_multilinechart(
self, tags, category="default", title="untitled"
) -> None:
"""Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*.
Args:
tags (list): list of tags that have been used in ``add_scalar()``
Examples::
writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330'])
"""
torch._C._log_api_usage_once(
"tensorboard.logging.add_custom_scalars_multilinechart"
)
layout = {category: {title: ["Multiline", tags]}}
self._get_file_writer().add_summary(custom_scalars(layout))
def add_custom_scalars_marginchart(
self, tags, category="default", title="untitled"
) -> None:
"""Shorthand for creating marginchart.
Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*,
which should have exactly 3 elements.
Args:
tags (list): list of tags that have been used in ``add_scalar()``
Examples::
writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006'])
"""
torch._C._log_api_usage_once(
"tensorboard.logging.add_custom_scalars_marginchart"
)
if len(tags) != 3:
raise AssertionError(f"Expected 3 tags, got {len(tags)}.")
layout = {category: {title: ["Margin", tags]}}
self._get_file_writer().add_summary(custom_scalars(layout))
def add_custom_scalars(self, layout) -> None:
"""Create special chart by collecting charts tags in 'scalars'.
NOTE: This function can only be called once for each SummaryWriter() object.
Because it only provides metadata to tensorboard, the function can be called before or after the training loop.
Args:
layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary
{chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type
(one of **Multiline** or **Margin**) and the second element should be a list containing the tags
you have used in add_scalar function, which will be collected into the new chart.
Examples::
layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]},
'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']],
'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}}
writer.add_custom_scalars(layout)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars")
self._get_file_writer().add_summary(custom_scalars(layout))
def add_mesh(
self,
tag,
vertices,
colors=None,
faces=None,
config_dict=None,
global_step=None,
walltime=None,
) -> None:
"""Add meshes or 3D point clouds to TensorBoard.
The visualization is based on Three.js,
so it allows users to interact with the rendered object. Besides the basic definitions
such as vertices, faces, users can further provide camera parameter, lighting condition, etc.
Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for
advanced usage.
Args:
tag (str): Data identifier
vertices (torch.Tensor): List of the 3D coordinates of vertices.
colors (torch.Tensor): Colors for each vertex
faces (torch.Tensor): Indices of vertices within each triangle. (Optional)
config_dict: Dictionary with ThreeJS classes names and configuration.
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels)
colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`.
faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`.
Examples::
from torch.utils.tensorboard import SummaryWriter
vertices_tensor = torch.as_tensor([
[1, 1, 1],
[-1, -1, 1],
[1, -1, -1],
[-1, 1, -1],
], dtype=torch.float).unsqueeze(0)
colors_tensor = torch.as_tensor([
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[255, 0, 255],
], dtype=torch.int).unsqueeze(0)
faces_tensor = torch.as_tensor([
[0, 2, 3],
[0, 3, 1],
[0, 1, 2],
[1, 3, 2],
], dtype=torch.int).unsqueeze(0)
writer = SummaryWriter()
writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor)
writer.close()
"""
torch._C._log_api_usage_once("tensorboard.logging.add_mesh")
self._get_file_writer().add_summary(
mesh(tag, vertices, colors, faces, config_dict), global_step, walltime
)
def flush(self) -> None:
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
if self.all_writers is None:
return
for writer in self.all_writers.values():
writer.flush()
def close(self) -> None:
if self.all_writers is None:
return # ignore double close
for writer in self.all_writers.values():
writer.flush()
writer.close()
# pyrefly: ignore [bad-assignment]
self.file_writer = self.all_writers = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| SummaryWriter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/operators.py | {
"start": 3641,
"end": 5182
} | class ____(Hashable, Protocol):
"""describe an op() function."""
__slots__ = ()
__name__: str
@overload
def __call__(
self,
left: ColumnExpressionArgument[Any],
right: Optional[Any] = None,
*other: Any,
**kwargs: Any,
) -> ColumnElement[Any]: ...
@overload
def __call__(
self,
left: Operators,
right: Optional[Any] = None,
*other: Any,
**kwargs: Any,
) -> Operators: ...
def __call__(
self,
left: Any,
right: Optional[Any] = None,
*other: Any,
**kwargs: Any,
) -> Operators: ...
add = cast(OperatorType, _uncast_add)
and_ = cast(OperatorType, _uncast_and_)
contains = cast(OperatorType, _uncast_contains)
eq = cast(OperatorType, _uncast_eq)
floordiv = cast(OperatorType, _uncast_floordiv)
ge = cast(OperatorType, _uncast_ge)
getitem = cast(OperatorType, _uncast_getitem)
gt = cast(OperatorType, _uncast_gt)
inv = cast(OperatorType, _uncast_inv)
le = cast(OperatorType, _uncast_le)
lshift = cast(OperatorType, _uncast_lshift)
lt = cast(OperatorType, _uncast_lt)
matmul = cast(OperatorType, _uncast_matmul)
mod = cast(OperatorType, _uncast_mod)
mul = cast(OperatorType, _uncast_mul)
ne = cast(OperatorType, _uncast_ne)
neg = cast(OperatorType, _uncast_neg)
or_ = cast(OperatorType, _uncast_or_)
pow_ = cast(OperatorType, _uncast_pow)
rshift = cast(OperatorType, _uncast_rshift)
sub = cast(OperatorType, _uncast_sub)
truediv = cast(OperatorType, _uncast_truediv)
| OperatorType |
python | pola-rs__polars | py-polars/src/polars/exceptions.py | {
"start": 4920,
"end": 5039
} | class ____(PolarsError):
"""Exception raised when unsuitable SQL is given to a database method."""
| UnsuitableSQLError |
python | huggingface__transformers | tests/models/regnet/test_modeling_regnet.py | {
"start": 3973,
"end": 7761
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as RegNet does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = RegNetModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=RegNetConfig,
has_text_modality=False,
common_properties=["num_channels", "hidden_sizes"],
)
def test_config(self):
self.config_tester.run_common_tests()
@is_flaky(description="Larger difference with A10. Still flaky after setting larger tolerance")
def test_batching_equivalence(self, atol=3e-5, rtol=3e-5):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 2, self.model_tester.image_size // 2],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
layers_type = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
config.layer_type = layer_type
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/regnet-y-040"
model = RegNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| RegNetModelTest |
python | python-pillow__Pillow | src/PIL/ExifTags.py | {
"start": 9461,
"end": 9931
} | class ____(IntEnum):
Unknown = 0x00
Daylight = 0x01
Fluorescent = 0x02
Tungsten = 0x03
Flash = 0x04
Fine = 0x09
Cloudy = 0x0A
Shade = 0x0B
DaylightFluorescent = 0x0C
DayWhiteFluorescent = 0x0D
CoolWhiteFluorescent = 0x0E
WhiteFluorescent = 0x0F
StandardLightA = 0x11
StandardLightB = 0x12
StandardLightC = 0x13
D55 = 0x14
D65 = 0x15
D75 = 0x16
D50 = 0x17
ISO = 0x18
Other = 0xFF
| LightSource |
python | spack__spack | lib/spack/spack/vendor/jinja2/nativetypes.py | {
"start": 2543,
"end": 2703
} | class ____(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
| NativeEnvironment |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 86057,
"end": 87744
} | class ____(TypeDecorator[_T]):
"""deprecated. symbol is present for backwards-compatibility with
workaround recipes, however this actual type should not be used.
"""
def __init__(self, *arg: Any, **kw: Any):
raise NotImplementedError(
"Variant is no longer used in SQLAlchemy; this is a "
"placeholder symbol for backwards compatibility."
)
@overload
def to_instance(
typeobj: Union[Type[_TE], _TE], *arg: Any, **kw: Any
) -> _TE: ...
@overload
def to_instance(typeobj: None, *arg: Any, **kw: Any) -> TypeEngine[None]: ...
def to_instance(
typeobj: Union[Type[_TE], _TE, None], *arg: Any, **kw: Any
) -> Union[_TE, TypeEngine[None]]:
if typeobj is None:
return NULLTYPE
if callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(
typeobj: _TypeEngineArgument[Any],
colspecs: Mapping[Type[Any], Type[TypeEngine[Any]]],
) -> TypeEngine[Any]:
typeobj = to_instance(typeobj)
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if issubclass(typeobj.__class__, impltype):
return typeobj
return typeobj.adapt(impltype)
| Variant |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/integration_test/public_symbol_test.py | {
"start": 828,
"end": 1058
} | class ____(tf.test.TestCase):
def testSimple(self):
a = 0.1
b = 0.2
self.assertAllClose(onp.add(a, b), np.add(a, b))
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
| PublicSymbolTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 42746,
"end": 43391
} | class ____(AssetSelection):
"""Used to represent a UI asset selection by table name. This should not be resolved against
an in-process asset graph.
"""
selected_table_name: Optional[str]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
"""This should not be invoked in user code."""
raise NotImplementedError
def to_selection_str(self) -> str:
if self.selected_table_name is None:
return "table_name:<null>"
return f'table_name:"{self.selected_table_name}"'
@whitelist_for_serdes
@record
| TableNameAssetSelection |
python | django__django | tests/i18n/tests.py | {
"start": 77880,
"end": 78116
} | class ____(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(gettext("Date/time"), "Datum/Zeit")
@override_settings(INSTALLED_APPS=["i18n.territorial_fallback"])
| DjangoFallbackResolutionOrderI18NTests |
python | networkx__networkx | networkx/algorithms/bipartite/tests/test_basic.py | {
"start": 81,
"end": 4291
} | class ____:
def test_is_bipartite(self):
assert bipartite.is_bipartite(nx.path_graph(4))
assert bipartite.is_bipartite(nx.DiGraph([(1, 0)]))
assert not bipartite.is_bipartite(nx.complete_graph(3))
def test_bipartite_color(self):
G = nx.path_graph(4)
c = bipartite.color(G)
assert c == {0: 1, 1: 0, 2: 1, 3: 0}
def test_not_bipartite_color(self):
with pytest.raises(nx.NetworkXError):
c = bipartite.color(nx.complete_graph(4))
def test_bipartite_directed(self):
G = bipartite.random_graph(10, 10, 0.1, directed=True)
assert bipartite.is_bipartite(G)
def test_bipartite_sets(self):
G = nx.path_graph(4)
X, Y = bipartite.sets(G)
assert X == {0, 2}
assert Y == {1, 3}
def test_bipartite_sets_directed(self):
G = nx.path_graph(4)
D = G.to_directed()
X, Y = bipartite.sets(D)
assert X == {0, 2}
assert Y == {1, 3}
def test_bipartite_sets_given_top_nodes(self):
G = nx.path_graph(4)
top_nodes = [0, 2]
X, Y = bipartite.sets(G, top_nodes)
assert X == {0, 2}
assert Y == {1, 3}
def test_bipartite_sets_disconnected(self):
with pytest.raises(nx.AmbiguousSolution):
G = nx.path_graph(4)
G.add_edges_from([(5, 6), (6, 7)])
X, Y = bipartite.sets(G)
def test_is_bipartite_node_set(self):
G = nx.path_graph(4)
with pytest.raises(nx.AmbiguousSolution):
bipartite.is_bipartite_node_set(G, [1, 1, 2, 3])
assert bipartite.is_bipartite_node_set(G, [0, 2])
assert bipartite.is_bipartite_node_set(G, [1, 3])
assert not bipartite.is_bipartite_node_set(G, [1, 2])
G.add_edge(10, 20)
assert bipartite.is_bipartite_node_set(G, [0, 2, 10])
assert bipartite.is_bipartite_node_set(G, [0, 2, 20])
assert bipartite.is_bipartite_node_set(G, [1, 3, 10])
assert bipartite.is_bipartite_node_set(G, [1, 3, 20])
def test_bipartite_density(self):
G = nx.path_graph(5)
X, Y = bipartite.sets(G)
density = len(list(G.edges())) / (len(X) * len(Y))
assert bipartite.density(G, X) == density
D = nx.DiGraph(G.edges())
assert bipartite.density(D, X) == density / 2.0
assert bipartite.density(nx.Graph(), {}) == 0.0
def test_bipartite_degrees(self):
G = nx.path_graph(5)
X = {1, 3}
Y = {0, 2, 4}
u, d = bipartite.degrees(G, Y)
assert dict(u) == {1: 2, 3: 2}
assert dict(d) == {0: 1, 2: 2, 4: 1}
def test_bipartite_weighted_degrees(self):
G = nx.path_graph(5)
G.add_edge(0, 1, weight=0.1, other=0.2)
X = {1, 3}
Y = {0, 2, 4}
u, d = bipartite.degrees(G, Y, weight="weight")
assert dict(u) == {1: 1.1, 3: 2}
assert dict(d) == {0: 0.1, 2: 2, 4: 1}
u, d = bipartite.degrees(G, Y, weight="other")
assert dict(u) == {1: 1.2, 3: 2}
assert dict(d) == {0: 0.2, 2: 2, 4: 1}
def test_biadjacency_matrix_weight(self):
pytest.importorskip("scipy")
G = nx.path_graph(5)
G.add_edge(0, 1, weight=2, other=4)
X = [1, 3]
Y = [0, 2, 4]
M = bipartite.biadjacency_matrix(G, X, weight="weight")
assert M[0, 0] == 2
M = bipartite.biadjacency_matrix(G, X, weight="other")
assert M[0, 0] == 4
def test_biadjacency_matrix(self):
pytest.importorskip("scipy")
tops = [2, 5, 10]
bots = [5, 10, 15]
for i in range(len(tops)):
G = bipartite.random_graph(tops[i], bots[i], 0.2)
top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
M = bipartite.biadjacency_matrix(G, top)
assert M.shape[0] == tops[i]
assert M.shape[1] == bots[i]
def test_biadjacency_matrix_order(self):
pytest.importorskip("scipy")
G = nx.path_graph(5)
G.add_edge(0, 1, weight=2)
X = [3, 1]
Y = [4, 2, 0]
M = bipartite.biadjacency_matrix(G, X, Y, weight="weight")
assert M[1, 2] == 2
| TestBipartiteBasic |
python | google__jax | tests/pallas/fusion_test.py | {
"start": 8952,
"end": 9431
} | class ____(jtu.JaxTestCase):
def test_basic_fusion(self):
@jax.jit
@fuser.fuse
@fuser.fusible
def f(x_fn, y_fn):
x = x_fn()
if y_fn is None:
y_fn = lambda x: x
return y_fn(x)
xt = ArrayTuple(x0=jnp.ones((8, 8)), x1=jnp.zeros(4))
ot = f(xt)
np.testing.assert_array_equal(ot.x0, xt.x0)
np.testing.assert_array_equal(ot.x1, xt.x1)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| FusionHijaxTest |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 26816,
"end": 39839
} | class ____(test.TestCase):
def _check(self, s, *input_shapes, **kwargs):
dtype = kwargs.pop('dtype', np.float32)
r = np.random.RandomState(0)
inputs = []
for shape in input_shapes:
arr = np.array(r.randn(*shape)).astype(dtype)
if dtype == np.complex64 or dtype == np.complex128:
arr += 1j * np.array(r.randn(*shape)).astype(dtype)
inputs.append(arr)
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
a = np.einsum(s, *inputs)
b = self.evaluate(special_math_ops.einsum(s, *input_tensors))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def test_invalid_keyword_arguments(self):
r = np.random.RandomState(0)
a = array_ops.placeholder_with_default(r.randn(2, 3), shape=(2, 3))
b = array_ops.placeholder_with_default(r.randn(3, 4), shape=(3, 4))
with self.assertRaises(TypeError):
_ = special_math_ops.einsum(
'ij,jk->ik', a, b, name='name', invalid1='value1', invalid2='value2')
def test_unary(self):
self._check('a', (3,))
self._check('aa', (3, 3))
self._check('ab->', (3, 3))
self._check('ab->ab', (3, 3))
self._check('abc->b', (3, 4, 5))
self._check('abc->ca', (3, 4, 5))
self._check('abc->cab', (3, 4, 5))
# Empty cases.
self._check('', ())
self._check('->', ())
# Repeated indices cases.
self._check('aa->', (3, 3))
self._check('aa->a', (3, 3))
self._check('aaa->', (3, 3, 3))
self._check('aaa->a', (3, 3, 3))
self._check('aab->a', (3, 3, 4))
self._check('aabcc->a', (3, 3, 5, 4, 4))
self._check('aabcc->ac', (3, 3, 5, 4, 4))
self._check('aabcd->ad', (3, 3, 5, 4, 4))
def test_unary_ellipsis(self):
self._check('...->', ())
self._check('...ijk->...ki', (3, 4, 5))
self._check('...ijk->...ki', (1, 3, 4, 5))
self._check('...ijk->...ki', (2, 2, 3, 4, 5))
self._check('...ij->...ji', (5, 2, 3)) # batch matrix transpose
self._check('...ij->...', (5, 2, 3)) # batch sum
self._check('...->...', ())
self._check('->...', ())
# Repeated indices.
self._check('i...ii->...i', (3, 2, 3, 3))
self._check('i...i->i...', (2, 2))
self._check('i...i->', (2, 2))
self._check('i...i->...', (2, 5, 1, 2))
self._check('i...i->i...', (2, 1, 2))
self._check('i...i->i...', (2, 3, 4, 5, 2))
def test_binary_simple(self):
# Binary cases in XLA mode must have either (a) each index appearing exactly
# once in both the inputs (batch or contraction index), or (b) appearing
# exactly once in an input and in the output (free index).
self._check(',->', (), ())
self._check('a,a->', (3,), (3,))
self._check('a,a->a', (3,), (3,))
self._check('ab,b->a', (3, 4), (4,))
self._check('ab,ab->', (3, 4), (3, 4))
self._check('ab,bc->ac', (3, 4), (4, 5))
self._check('nij,jk->nik', (5, 2, 3), (3, 4))
self._check('abc,bad->abcd', (1, 2, 3), (2, 1, 4))
# Based on https://github.com/google/jax/issues/37#issuecomment-448572187
self._check('sa,shb->shab', (2, 1), (2, 3, 4))
# Infer the output subscripts.
self._check('ab,b', (3, 4), (4,))
self._check('cab,b', (1, 3, 4), (4,))
def test_reduced_indices(self):
self._check('ba,b->', (3, 2), (3,))
self._check('ab,ab->', (3, 4), (3, 4))
def test_repeated_indices(self):
# Repeated indices.
self._check('ijj,k->ik', (2, 3, 3), (4,))
self._check('aba,a->b', (3, 4, 3), (3,))
# From https://github.com/dask/dask/pull/3412#discussion_r182413444
self._check('aab,bc->ac', (2, 2, 3), (3, 4))
self._check('aab,bcc->ac', (2, 2, 3), (3, 4, 4))
def test_binary_ellipsis(self):
# Batch matmul with ellipsis but without broadcasting.
self._check('...mk,...kn->...mn', (5, 1, 2, 3), (5, 1, 3, 4))
# Empty batch dimensions.
self._check('...mk,...kn->...mn', (2, 3), (3, 4))
# Tensor contraction with transpose.
self._check('...ija,aijb...->ba...ij', (1, 2, 2, 3, 1), (1, 2, 3, 4, 1, 2))
# Output subscripts may omit ellipsis when batch shape is empty.
self._check('...mk,...kn->mn', (2, 3), (3, 4))
self._check('...mk,kn->mn', (2, 3), (3, 4))
self._check('mk,...kn->mn', (2, 3), (3, 4))
self._check('...,...->...', (2, 3), (2, 3)) # hadamard product
self._check('...i,...j->...ij', (5, 2), (5, 3)) # outer product
def test_broadcasting(self):
# Batch matmul with broadcasting.
self._check('...ij,...jk->...ik', (1, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (1, 3, 5))
self._check('...ij,...jk->...ik', (5, 2, 3), (3, 5))
self._check('...ij,...jk->...ik', (2, 3), (5, 3, 5))
self._check('...ij,...jk->...ik', (3, 1, 2, 3), (1, 1, 7, 3, 5))
self._check('i...j,j...k->...ik', (2, 1, 3, 1, 3), (3, 1, 7, 5))
# Broadcasting with repeated indices.
self._check('ij,jk...k->i...', (3, 2), (2, 4, 1, 4))
self._check('ij,jk...k->...i', (3, 2), (2, 4, 5, 4))
self._check('ijj,jk...k->i...', (3, 2, 2), (2, 4, 1, 4))
self._check('i...jj,jk...k->i...', (3, 3, 1, 2, 2), (2, 4, 1, 5, 4))
# Following 2 from https://stackoverflow.com/a/19203475/1611416
self._check('...abc,...abcd->...d', (1, 1, 2, 3, 4), (5, 2, 3, 4, 6))
self._check('ab...,b->ab...', (2, 3, 1, 1, 5), (3,))
def test_dtypes(self):
dtypes = [np.float64, np.float32, np.complex64, np.complex128]
for dtype in dtypes:
self._check('ij,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,jk->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ik', (2, 2), (2, 2), dtype=dtype)
self._check('ij,jk->ki', (2, 2), (2, 2), dtype=dtype)
self._check('ji,kj->ki', (2, 2), (2, 2), dtype=dtype)
def test_multiple_inputs(self):
self._check('ijk,ijl,ikl->i', (1, 2, 3), (1, 2, 4), (1, 3, 4))
self._check('i,ijk,j->k', (1,), (1, 2, 4), (2,))
self._check('ij,ij,jk,kl->il', (1, 2), (1, 2), (2, 3), (3, 4))
# Tests from dask.
self._check('a,b,c', (5,), (7,), (9,))
self._check('ab,ab,c->c', (5, 6), (5, 6), (2,))
@test_util.disable_xla('b/131919749')
def test_placeholder(self):
def check(equation, *input_and_placeholder_shapes):
r = np.random.RandomState(0)
inputs = []
input_placeholders = []
for actual_shape, placeholder_shape in input_and_placeholder_shapes:
input_np = np.array(r.randn(*actual_shape))
inputs.append(input_np)
input_placeholders.append(
array_ops.placeholder_with_default(input_np, placeholder_shape))
a = np.einsum(equation, *inputs)
b = self.evaluate(special_math_ops.einsum(equation, *input_placeholders))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
check('bijl,bjkm->bik', ((9, 2, 3, 5), (None, None, None, 5)),
((9, 3, 4, 7), (None, None, 4, None)))
check('...ij,...->...i', ((4, 3, 1, 2), (None, 3, None, 2)),
((4, 3), (None, 3)))
# Ellipsis with unknown rank.
check('bijl,bjkm->bik', ((9, 2, 3, 5), None), ((9, 3, 4, 7), None))
check('...ij,...jk->...ik', ((3, 1, 2, 3), None), ((1, 7, 3, 4), None))
def test_numpy_input(self):
# In addition to Tensors, we also support raw numpy arrays as inputs.
r = np.random.RandomState(0)
s = 'ijk,ijl,ikl->i'
x = r.randn(1, 2, 3)
y = r.randn(1, 2, 4)
z = r.randn(1, 3, 4)
a = np.einsum(s, x, y, z)
b = self.evaluate(special_math_ops.einsum(s, x, y, z))
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4)
def test_long_cases(self):
cases = [
'efc,dbc,acf,fd->abe',
'ea,fb,gc,hd,abcd->efgh',
'abhe,hidj,jgba,hiab,gab->ed',
# Cases with whitespace.
'efc, dbc, acf, fd -> abe',
'abhe, hidj, jgba, hiab, gab',
# Repeated equations for cache hit on the opt_einsum call.
'ea,fb,abcd,gc,hd->efgh',
'ea,fb,abcd,gc,hd->efgh',
]
dimension_map = dict((c, ord(c) - ord('a') + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check(equation, *input_shapes)
def test_opt_einsum_cached(self):
# Checks call_count to opt_einsum which are only reflected in eager mode.
if not context.executing_eagerly():
return
input_1 = ('ijk,ijl,ikl->i', (1, 2, 3), (1, 2, 4), (1, 3, 4))
input_2 = ('ij,ij,jk,kl->il', (1, 2), (1, 2), (2, 3), (3, 4))
with test.mock.patch.object(
opt_einsum, 'contract_path',
wraps=opt_einsum.contract_path) as mock_contract_path:
# explicitly clear the lru_cache contents for the method
# special_math_ops.get_opt_einsum_contract_path
# We need to do this because other tests in this file invoke that method
# with the same input args (as input_1 and input_2 above), and if
# those tests run before this test, then the call_count for the method
# mock_contract_path will not increment.
special_math_ops._get_opt_einsum_contract_path.cache_clear()
self.assertEqual(mock_contract_path.call_count, 0)
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 1)
# The same input results in no extra call if we're caching the
# opt_einsum.contract_path call. We only cache in Python3.
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 1)
# New input results in another call to opt_einsum.
self._check(*input_2)
self.assertEqual(mock_contract_path.call_count, 2)
# No more extra calls as the inputs should be cached.
self._check(*input_1)
self._check(*input_2)
self._check(*input_1)
self.assertEqual(mock_contract_path.call_count, 2)
@test_util.disable_xla('b/131919749')
def test_long_cases_with_repeated_labels(self):
cases = [
# Tests from dask.
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
]
dimension_map = dict((c, ord(c) - ord('a') + 1) for c in 'abcdefghij')
for equation in cases:
inputs = equation.split('->')[0].replace(' ', '')
input_shapes = []
for input_str in inputs.split(','):
input_shapes.append(tuple([dimension_map[c] for c in input_str]))
self._check(equation, *input_shapes)
@test_util.disable_xla('b/131919749')
@test_util.run_in_graph_and_eager_modes
def test_invalid_equation(self):
r = np.random.RandomState(0)
cases = [
# invalid equation format.
('a0->a', r.randn(5, 3)),
('a->a,a', r.randn(5)),
('a->a->a', r.randn(5)),
('ijk ijk', r.randn(1, 2, 3), r.randn(1, 2, 3)),
('ij.jk->ik', r.randn(2, 3), r.randn(3, 4)),
# output label not present in input.
('a->b', r.randn(5)),
('ij,jk->im', r.randn(2, 3), r.randn(3, 4)),
# wrong shape.
('ij,jk->ik', r.randn(1, 2, 3), r.randn(3, 4)),
# inconsistent dimensions.
('ij,jk->ik', r.randn(2, 3), r.randn(4, 4)),
# output has repeated subscripts.
('ij,jk->iik', r.randn(2, 3), r.randn(3, 4)),
# too many ellipses
('...ij...,jk...->ik...', r.randn(2, 3), r.randn(3, 4)),
('...ij,jk...->...ik...', r.randn(2, 3), r.randn(3, 4)),
# invalid broadcast dimensions.
('...ij,...jk->...ik', r.randn(5, 2, 3), r.randn(7, 3, 4)),
# output should have ellipsis when broadcasting shape is non-empty.
('...ij,...jk->ik', r.randn(2, 2, 3), r.randn(3, 4)),
]
for args in cases:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = special_math_ops.einsum(*args)
placeholders = [
array_ops.placeholder_with_default(x, shape=None) for x in args[1:]
]
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = self.evaluate(special_math_ops.einsum(args[0], *placeholders))
@test_util.disable_xla('b/131919749')
def test_empty(self):
def check(equation, input_shapes, output_shape):
# All these cases result in an output filled with zeros, so we don't call
# np.einsum. Also np.einsum doesn't support generalized diagonals which
# are needed for EinsumOp gradients.
r = np.random.RandomState(0)
inputs = [np.array(r.randn(*shape)) for shape in input_shapes]
input_tensors = [constant_op.constant(x, shape=x.shape) for x in inputs]
output = self.evaluate(special_math_ops.einsum(equation, *input_tensors))
self.assertAllClose(output, np.zeros(output_shape), atol=1e-4, rtol=1e-4)
# Contractions along zero-sized dimensions.
check('ab,bc->ac', [(0, 10), (10, 10)], (0, 10))
# From transformer xl.
check('ibnd,ijbn->jnd', [(1, 0, 5, 10), (1, 1, 0, 5)], (1, 5, 10))
# Generalized traces with zero-sized dimensions.
check('aab,bc->ac', [(0, 0, 10), (10, 10)], (0, 10))
check('aaab,bc->c', [(0, 0, 0, 3), (3, 4)], (4,))
@test_util.run_all_in_graph_and_eager_modes
| EinsumTest |
python | pandas-dev__pandas | pandas/tests/indexing/test_coercion.py | {
"start": 482,
"end": 815
} | class ____:
klasses = ["index", "series"]
dtypes = [
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"timedelta64",
"period",
]
@property
def method(self):
raise NotImplementedError(self)
| CoercionBase |
python | walkccc__LeetCode | solutions/313. Super Ugly Number/313.py | {
"start": 0,
"end": 409
} | class ____:
def nthSuperUglyNumber(self, n: int, primes: list[int]) -> int:
k = len(primes)
nums = [1]
indices = [0] * k
while len(nums) < n:
nexts = [0] * k
for i in range(k):
nexts[i] = nums[indices[i]] * primes[i]
next = min(nexts)
for i in range(k):
if next == nexts[i]:
indices[i] += 1
nums.append(next)
return nums[-1]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/vgg_block_test.py | {
"start": 1176,
"end": 2520
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Single vgg layer test in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
x, _, _ = nn_impl.fused_batch_norm(
x, [1.0, 1.0], [0.0, 0.0],
mean=[0.5, 0.5],
variance=[1.0, 1.0],
is_training=False)
e = constant_op.constant(
np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = array_ops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(v, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 8, 8, 2]],
[[5, 2, 2, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000"]
# TODO(b/159459919): remove this routine to disallow native segment execution.
def setUp(self):
super().setUp()
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"
if __name__ == "__main__":
test.main()
| VGGBlockTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 817543,
"end": 818979
} | class ____(VegaLiteSchema):
"""
OrderValueDef schema wrapper.
Parameters
----------
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
condition : dict, :class:`ConditionalValueDefnumber`, :class:`ConditionalParameterValueDefnumber`, :class:`ConditionalPredicateValueDefnumber`, Sequence[dict, :class:`ConditionalValueDefnumber`, :class:`ConditionalParameterValueDefnumber`, :class:`ConditionalPredicateValueDefnumber`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
"""
_schema = {"$ref": "#/definitions/OrderValueDef"}
def __init__(
self,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
| OrderValueDef |
python | django__django | tests/migrations/test_migrations_no_changes/0002_second.py | {
"start": 43,
"end": 666
} | class ____(migrations.Migration):
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
(
"author",
models.ForeignKey("migrations.Author", models.SET_NULL, null=True),
),
],
),
]
| Migration |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/python.py | {
"start": 416,
"end": 705
} | class ____(PythonExtension):
build_system_class = "PythonPackage"
default_buildsystem = "python_pip"
install_time_test_callbacks = ["test_imports"]
build_system("python_pip")
extends("python", when="build_system=python_pip")
@register_builder("python_pip")
| PythonPackage |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict5.py | {
"start": 393,
"end": 1297
} | class ____(TypedDict, total=True):
name: str
year: float
movie1: Movie1 = Movie2(name="hello", year=1971)
# This should generate an error because
# items are required in Movie3 but not Movie2.
movie2: Movie2 = Movie3(name="hello", year=1971)
# This should generate an error because
# items are required in Movie3 but not Movie2.
movie3: Movie3 = Movie2(name="hello", year=1971)
# This should generate an error.
movie4: Movie4 = Movie3(name="hello", year=1971)
movie5: Movie3 = Movie4(name="hello", year=1971, earnings=23)
movie6 = Movie2(name="hello", year=1971)
movie6["name"] = "goodbye"
movie7 = {"name": "hello", "year": 1971}
movie7["name"] = "goodbye"
movie8: Movie2 = {"year": 1981, "name": "test"}
movie8["year"] = 1982
movie9 = Movie3(name="", year=1971)
# This should generate an error because "year" is mutable,
# so its type must match exactly.
movie10: Movie5 = movie9
| Movie5 |
python | django__django | tests/model_options/test_tablespaces.py | {
"start": 816,
"end": 5441
} | class ____(TransactionTestCase):
available_apps = ["model_options"]
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs["model_options"].models.copy()
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = True
def tearDown(self):
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = False
apps.app_configs["model_options"].models = self._old_models
apps.all_models["model_options"] = self._old_models
apps.clear_cache()
def assertNumContains(self, haystack, needle, count):
real_count = haystack.count(needle)
self.assertEqual(
real_count,
count,
"Found %d instances of '%s', expected %d" % (real_count, needle, count),
)
@skipUnlessDBFeature("supports_tablespaces")
def test_tablespace_for_model(self):
sql = sql_for_table(Scientist).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, "tbl_tbsp", 1)
# 1 for the index on the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, "tbl_tbsp", 2)
@skipIfDBFeature("supports_tablespaces")
def test_tablespace_ignored_for_model(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Scientist), sql_for_table(ScientistRef))
@skipUnlessDBFeature("supports_tablespaces")
def test_tablespace_for_indexed_field(self):
sql = sql_for_table(Article).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, "tbl_tbsp", 1)
# 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
# 1 for the table + 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, "tbl_tbsp", 3)
# 1 for the index on reference
self.assertNumContains(sql, "idx_tbsp", 1)
@skipIfDBFeature("supports_tablespaces")
def test_tablespace_ignored_for_indexed_field(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Article), sql_for_table(ArticleRef))
@skipUnlessDBFeature("supports_tablespaces")
def test_tablespace_for_many_to_many_field(self):
sql = sql_for_table(Authors).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, "tbl_tbsp", 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, "tbl_tbsp", 2)
self.assertNumContains(sql, "idx_tbsp", 0)
sql = sql_for_index(Authors).lower()
# The ManyToManyField declares no db_tablespace, its indexes go to
# the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
self.assertNumContains(sql, "tbl_tbsp", 2)
self.assertNumContains(sql, "idx_tbsp", 0)
sql = sql_for_table(Reviewers).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, "tbl_tbsp", 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, "tbl_tbsp", 2)
self.assertNumContains(sql, "idx_tbsp", 0)
sql = sql_for_index(Reviewers).lower()
# The ManyToManyField declares db_tablespace, its indexes go there.
self.assertNumContains(sql, "tbl_tbsp", 0)
self.assertNumContains(sql, "idx_tbsp", 2)
| TablespacesTests |
python | Lightning-AI__lightning | examples/pytorch/bug_report/bug_report_model.py | {
"start": 131,
"end": 388
} | class ____(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
| RandomDataset |
python | django__django | tests/template_tests/filter_tests/test_unordered_list.py | {
"start": 2075,
"end": 6180
} | class ____(SimpleTestCase):
def test_list(self):
self.assertEqual(
unordered_list(["item 1", "item 2"]), "\t<li>item 1</li>\n\t<li>item 2</li>"
)
def test_list_gettext(self):
self.assertEqual(
unordered_list(["item 1", gettext_lazy("item 2")]),
"\t<li>item 1</li>\n\t<li>item 2</li>",
)
def test_nested(self):
self.assertEqual(
unordered_list(["item 1", ["item 1.1"]]),
"\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>",
)
def test_nested2(self):
self.assertEqual(
unordered_list(["item 1", ["item 1.1", "item1.2"], "item 2"]),
"\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2"
"</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>",
)
def test_nested3(self):
self.assertEqual(
unordered_list(["item 1", "item 2", ["item 2.1"]]),
"\t<li>item 1</li>\n\t<li>item 2\n\t<ul>\n\t\t<li>item 2.1"
"</li>\n\t</ul>\n\t</li>",
)
def test_nested_multiple(self):
self.assertEqual(
unordered_list(["item 1", ["item 1.1", ["item 1.1.1", ["item 1.1.1.1"]]]]),
"\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>"
"item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t"
"</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>",
)
def test_nested_multiple2(self):
self.assertEqual(
unordered_list(["States", ["Kansas", ["Lawrence", "Topeka"], "Illinois"]]),
"\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>"
"Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>"
"\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>",
)
def test_autoescape(self):
self.assertEqual(
unordered_list(["<a>item 1</a>", "item 2"]),
"\t<li><a>item 1</a></li>\n\t<li>item 2</li>",
)
def test_autoescape_off(self):
self.assertEqual(
unordered_list(["<a>item 1</a>", "item 2"], autoescape=False),
"\t<li><a>item 1</a></li>\n\t<li>item 2</li>",
)
def test_ulitem(self):
class ULItem:
def __init__(self, title):
self.title = title
def __str__(self):
return "ulitem-%s" % str(self.title)
a = ULItem("a")
b = ULItem("b")
c = ULItem("<a>c</a>")
self.assertEqual(
unordered_list([a, b, c]),
"\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t"
"<li>ulitem-<a>c</a></li>",
)
def item_generator():
yield from (a, b, c)
self.assertEqual(
unordered_list(item_generator()),
"\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t"
"<li>ulitem-<a>c</a></li>",
)
def test_nested_generators(self):
def inner_generator():
yield from ("B", "C")
def item_generator():
yield "A"
yield inner_generator()
yield "D"
self.assertEqual(
unordered_list(item_generator()),
"\t<li>A\n\t<ul>\n\t\t<li>B</li>\n\t\t<li>C</li>\n\t</ul>\n\t</li>\n\t"
"<li>D</li>",
)
def test_ulitem_autoescape_off(self):
class ULItem:
def __init__(self, title):
self.title = title
def __str__(self):
return "ulitem-%s" % str(self.title)
a = ULItem("a")
b = ULItem("b")
c = ULItem("<a>c</a>")
self.assertEqual(
unordered_list([a, b, c], autoescape=False),
"\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>",
)
def item_generator():
yield from (a, b, c)
self.assertEqual(
unordered_list(item_generator(), autoescape=False),
"\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>",
)
| FunctionTests |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/webhook.py | {
"start": 3596,
"end": 3690
} | class ____(GitHubEnterpriseWebhook, PushEventWebhook):
pass
| GitHubEnterprisePushEventWebhook |
python | streamlit__streamlit | lib/tests/streamlit/commands/experimental_query_params_test.py | {
"start": 970,
"end": 3971
} | class ____(DeltaGeneratorTestCase):
"""Test Query params commands APIs."""
def test_set_query_params_sends_protobuf_message(self):
"""Test valid st.set_query_params sends protobuf message."""
st.experimental_set_query_params(x="a")
message = self.get_message_from_queue(0)
assert message.page_info_changed.query_string == "x=a"
def test_set_query_params_exceptions(self):
"""Test invalid st.set_query_params raises exceptions."""
with pytest.raises(StreamlitAPIException):
st.experimental_set_query_params(embed="True")
with pytest.raises(StreamlitAPIException):
st.experimental_set_query_params(embed_options="show_colored_line")
def test_get_query_params_after_set_query_params(self):
"""Test valid st.set_query_params sends protobuf message."""
p_set = dict(x=["a"])
st.experimental_set_query_params(**p_set)
p_get = st.experimental_get_query_params()
assert p_get == p_set
def test_set_query_params_empty_str(self):
empty_str_params = dict(x=[""])
st.experimental_set_query_params(**empty_str_params)
params_get = st.experimental_get_query_params()
assert params_get == empty_str_params
@parameterized.expand(
[
({"x": ["a"]}, ["x"], {}),
({"a": ["a1", "a2"], "b": ["b1", "b2"]}, ["a"], {"b": ["b1", "b2"]}),
({"c": ["c1", "c2"]}, "no_existing_key", {"c": ["c1", "c2"]}),
(
{
"embed": ["true"],
"embed_options": ["show_padding", "show_colored_line"],
},
["embed", "embed_options"],
{},
),
(
{"EMBED": ["TRUE"], "EMBED_OPTIONS": ["DISABLE_SCROLLING"]},
["embed", "embed_options"],
{},
),
]
)
def test_exclude_keys_in_dict(
self,
d: dict[str, list[str]],
keys_to_drop: list[str],
result: dict[str, list[str]],
):
assert _exclude_keys_in_dict(d, keys_to_drop) == result
@parameterized.expand(
[
({"x": ["a"]}, "x", {"a"}),
({"a": ["a1"], "b": ["b1", "b2"]}, "a", {"a1"}),
({"c": ["c1", "c2"]}, "no_existing_key", set()),
(
{
"embed": ["true"],
"embed_options": ["show_padding", "show_colored_line"],
},
"embed",
{"true"},
),
(
{"EMBED": ["TRUE"], "EMBED_OPTIONS": ["DISABLE_SCROLLING"]},
"embed_options",
{"disable_scrolling"},
),
]
)
def test_extract_key_query_params(
self, query_params: dict[str, list[str]], param_key: str, result: set[str]
):
assert _extract_key_query_params(query_params, param_key) == result
| QueryParamsAPITest |
python | TheAlgorithms__Python | data_structures/binary_tree/is_sorted.py | {
"start": 736,
"end": 3044
} | class ____:
data: float
left: Node | None = None
right: Node | None = None
def __iter__(self) -> Iterator[float]:
"""
>>> root = Node(data=2.1)
>>> list(root)
[2.1]
>>> root.left=Node(data=2.0)
>>> list(root)
[2.0, 2.1]
>>> root.right=Node(data=2.2)
>>> list(root)
[2.0, 2.1, 2.2]
"""
if self.left:
yield from self.left
yield self.data
if self.right:
yield from self.right
@property
def is_sorted(self) -> bool:
"""
>>> Node(data='abc').is_sorted
True
>>> Node(data=2,
... left=Node(data=1.999),
... right=Node(data=3)).is_sorted
True
>>> Node(data=0,
... left=Node(data=0),
... right=Node(data=0)).is_sorted
True
>>> Node(data=0,
... left=Node(data=-11),
... right=Node(data=3)).is_sorted
True
>>> Node(data=5,
... left=Node(data=1),
... right=Node(data=4, left=Node(data=3))).is_sorted
False
>>> Node(data='a',
... left=Node(data=1),
... right=Node(data=4, left=Node(data=3))).is_sorted
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
>>> Node(data=2,
... left=Node([]),
... right=Node(data=4, left=Node(data=3))).is_sorted
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'int' and 'list'
"""
if self.left and (self.data < self.left.data or not self.left.is_sorted):
return False
return not (
self.right and (self.data > self.right.data or not self.right.is_sorted)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
tree = Node(data=2.1, left=Node(data=2.0), right=Node(data=2.2))
print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.")
assert tree.right
tree.right.data = 2.0
print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.")
tree.right.data = 2.1
print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.")
| Node |
python | huggingface__transformers | src/transformers/models/edgetam/configuration_edgetam.py | {
"start": 5641,
"end": 7737
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EdgeTamPromptEncoder`]. The [`EdgeTamPromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1024):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1024,
patch_size=16,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.scale = scale
| EdgeTamPromptEncoderConfig |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 223038,
"end": 223107
} | class ____(COONonCanonicalMixin, TestCOO):
pass
| TestCOONonCanonical |
python | pytorch__pytorch | test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py | {
"start": 831,
"end": 3572
} | class ____(ShardedTensorTestBase):
def _run_sharded_tensor_reshard(self, sharding_spec, reshard_spec, input_size):
torch.manual_seed(0)
local_tensor = torch.rand(*input_size).cuda(self.rank)
st = _shard_tensor(local_tensor, sharding_spec)
st_compare = _shard_tensor(local_tensor, reshard_spec)
st.reshard(reshard_spec)
self.assertEqual(1, len(st.local_shards()))
self.assertEqual(1, len(st_compare.local_shards()))
st_compare._metadata.shards_metadata.sort(
key=lambda metadata: metadata.placement.rank()
)
self.assertEqual(st._metadata, st_compare._metadata)
self.assertEqual(st.local_tensor(), st_compare.local_tensor())
self.assertEqual(
st.local_shards()[0].metadata, st_compare.local_shards()[0].metadata
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_reshard(self):
dims = [0, 1]
for sharding_dim, reshard_dim in product(dims, dims):
specs = _chunk_sharding_specs_list_for_test(
[sharding_dim, reshard_dim], seed=5
)
spec, reshard_spec = specs[0], specs[1]
self._run_sharded_tensor_reshard(spec, reshard_spec, [13, 21])
self._run_sharded_tensor_reshard(spec, reshard_spec, [14, 23])
self._run_sharded_tensor_reshard(spec, reshard_spec, [15, 26])
self._run_sharded_tensor_reshard(spec, reshard_spec, [12, 24])
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_reshard_errors(self):
specs = _chunk_sharding_specs_list_for_test([0, 1], seed=6)
spec, reshard_spec = specs[0], specs[1]
enumerable_sharding_spec = EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
]
)
st = sharded_tensor.rand(spec, 24, 12)
with self.assertRaisesRegex(
NotImplementedError, "Only ChunkShardingSpec supported for reshard."
):
st.reshard(enumerable_sharding_spec)
st._local_shards = [st.local_shards()[0], st.local_shards()[0]]
with self.assertRaisesRegex(
NotImplementedError, "Only single local shard supported for reshard."
):
st.reshard(reshard_spec)
if __name__ == "__main__":
run_tests()
| TestReshard |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 97857,
"end": 98112
} | class ____:
xlDisplayPropertyInPivotTable = 1 # from enum XlPropertyDisplayedIn
xlDisplayPropertyInPivotTableAndTooltip = 3 # from enum XlPropertyDisplayedIn
xlDisplayPropertyInTooltip = 2 # from enum XlPropertyDisplayedIn
| PropertyDisplayedIn |
python | dagster-io__dagster | python_modules/libraries/dagster-celery/dagster_celery/launcher.py | {
"start": 1156,
"end": 8754
} | class ____(RunLauncher, ConfigurableClass):
"""Dagster [Run Launcher](https://docs.dagster.io/guides/deploy/execution/run-launchers) which
starts runs as Celery tasks.
"""
_instance: DagsterInstance # pyright: ignore[reportIncompatibleMethodOverride]
celery: Celery
def __init__(
self,
default_queue: str,
broker: Optional[str] = None,
backend: Optional[str] = None,
include: Optional[list[str]] = None,
config_source: Optional[dict] = None,
inst_data: Optional[ConfigurableClassData] = None,
) -> None:
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self.broker = check.opt_str_param(broker, "broker", default=broker)
self.backend = check.opt_str_param(backend, "backend", default=backend)
self.include = check.opt_list_param(include, "include", of_type=str)
self.config_source = dict(
DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source")
)
self.default_queue = check.str_param(default_queue, "default_queue")
self.celery = make_app(
app_args=self.app_args(),
)
super().__init__()
def app_args(self) -> dict:
return {
"broker": self.broker,
"backend": self.backend,
"include": self.include,
"config_source": self.config_source,
"task_default_queue": self.default_queue,
}
def launch_run(self, context: LaunchRunContext) -> None:
run = context.dagster_run
job_origin = check.not_none(run.job_code_origin)
args = ExecuteRunArgs(
job_origin=job_origin,
run_id=run.run_id,
instance_ref=self._instance.get_ref(),
set_exit_code_on_failure=True,
)
task = create_execute_job_task(self.celery)
task_signature = task.si(
execute_job_args_packed=pack_value(args),
)
self._launch_celery_task_run(
run=run,
task_signature=task_signature,
routing_key=TASK_EXECUTE_JOB_NAME,
)
def terminate(self, run_id: str) -> bool:
run = self._instance.get_run_by_id(run_id)
if run is None:
return False
task_id = run.tags[DAGSTER_CELERY_TASK_ID_TAG]
result: AsyncResult = self.celery.AsyncResult(task_id)
result.revoke(terminate=True)
return True
@property
def supports_resume_run(self) -> bool:
return True
def resume_run(self, context: ResumeRunContext) -> None:
run = context.dagster_run
job_origin = check.not_none(run.job_code_origin)
args = ResumeRunArgs(
job_origin=job_origin,
run_id=run.run_id,
instance_ref=self._instance.get_ref(),
set_exit_code_on_failure=True,
)
task = create_resume_job_task(args)
task_signature = task.si(
execute_job_args_packed=pack_value(args),
)
self._launch_celery_task_run(
run=run,
task_signature=task_signature,
routing_key=TASK_RESUME_JOB_NAME,
)
def _launch_celery_task_run(
self,
run: DagsterRun,
task_signature: Celery.Task,
routing_key: str,
) -> None:
run_priority = _get_run_priority(run)
queue = run.tags.get(DAGSTER_CELERY_QUEUE_TAG, self.default_queue)
self._instance.report_engine_event(
"Creating Celery run worker job task",
run,
cls=self.__class__,
)
result: AsyncResult = task_signature.apply_async(
priority=run_priority,
queue=queue,
routing_key=f"{queue}.{routing_key}",
)
self._instance.add_run_tags(
run.run_id,
{DAGSTER_CELERY_TASK_ID_TAG: result.task_id},
)
self._instance.report_engine_event(
"Celery task has been forwarded to the broker.",
run,
EngineEventData(
{
"Run ID": run.run_id,
"Celery Task ID": result.task_id,
"Celery Queue": queue,
}
),
cls=self.__class__,
)
@property
def supports_check_run_worker_health(self) -> bool:
return True
def check_run_worker_health(self, run: DagsterRun) -> CheckRunHealthResult:
task_id = run.tags[DAGSTER_CELERY_TASK_ID_TAG]
result: AsyncResult = self.celery.AsyncResult(task_id)
task_status = result.state
if task_status == "SUCCESS":
return CheckRunHealthResult(WorkerStatus.SUCCESS)
if task_status == "FAILURE":
return CheckRunHealthResult(WorkerStatus.FAILED, "Celery task failed.")
if task_status == "STARTED":
return CheckRunHealthResult(WorkerStatus.RUNNING)
# Handles the PENDING and RETRYING states.
return CheckRunHealthResult(WorkerStatus.UNKNOWN, f"Unknown task status: {task_status}")
@override
def get_run_worker_debug_info(
self, run: DagsterRun, include_container_logs: Optional[bool] = True
) -> Optional[str]:
task_id = run.tags[DAGSTER_CELERY_TASK_ID_TAG]
result: AsyncResult = self.celery.AsyncResult(task_id)
task_status = result.state
worker = result.worker
return str(
{
"run_id": run.run_id,
"celery_task_id": task_id,
"task_status": task_status,
"worker": worker,
}
)
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> "UserConfigSchema":
return {
"broker": Field(
Noneable(StringSource),
is_required=False,
description=(
"The URL of the Celery broker. Default: "
"'pyamqp://guest@{os.getenv('DAGSTER_CELERY_BROKER_HOST',"
"'localhost')}//'."
),
),
"backend": Field(
Noneable(StringSource),
is_required=False,
default_value="rpc://",
description="The URL of the Celery results backend. Default: 'rpc://'.",
),
"include": Field(
[str],
is_required=False,
description="List of modules every worker should import",
),
"default_queue": Field(
StringSource,
is_required=False,
description="The default queue to use when a run does not specify "
"Celery queue tag.",
default_value=task_default_queue,
),
"config_source": Field(
Noneable(Permissive()),
is_required=False,
description="Additional settings for the Celery app.",
),
}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data, **config_value)
def _get_run_priority(run: DagsterRun) -> int:
if DAGSTER_CELERY_RUN_PRIORITY_TAG not in run.tags:
return 0
try:
return int(run.tags[DAGSTER_CELERY_RUN_PRIORITY_TAG])
except ValueError:
return 0
| CeleryRunLauncher |
python | google__jax | tests/array_extensibility_test.py | {
"start": 2053,
"end": 2663
} | class ____(NamedTuple):
fun: Callable[..., Any]
args: list[jax.ShapeDtypeStruct]
kwargs: dict[str, Any]
skip_on_devices: list[str] | None
def name(self):
return self.fun.__name__
def make_args(self, rng):
rng = jtu.rand_default(rng)
return jax.tree.map(lambda arg: rng(arg.shape, arg.dtype), self.args)
def with_skip_on_devices(self, disabled_devices: list[str]) -> 'NumPyAPI':
return self._replace(skip_on_devices=disabled_devices)
@classmethod
def sig(cls, fun: Callable[..., Any], *args: Any, **kwargs: Any) -> 'NumPyAPI':
return cls(fun, args, kwargs, None)
| NumPyAPI |
python | kamyu104__LeetCode-Solutions | Python/pairs-of-songs-with-total-durations-divisible-by-60.py | {
"start": 50,
"end": 357
} | class ____(object):
def numPairsDivisibleBy60(self, time):
"""
:type time: List[int]
:rtype: int
"""
result = 0
count = collections.Counter()
for t in time:
result += count[-t%60]
count[t%60] += 1
return result
| Solution |
python | openai__openai-python | src/openai/resources/models.py | {
"start": 10440,
"end": 10823
} | class ____:
def __init__(self, models: Models) -> None:
self._models = models
self.retrieve = to_streamed_response_wrapper(
models.retrieve,
)
self.list = to_streamed_response_wrapper(
models.list,
)
self.delete = to_streamed_response_wrapper(
models.delete,
)
| ModelsWithStreamingResponse |
python | viewflow__viewflow | viewflow/workflow/flow/views/actions.py | {
"start": 1087,
"end": 1701
} | class ____(
mixins.SuccessMessageMixin,
mixins.TaskSuccessUrlMixin,
mixins.TaskViewTemplateNames,
generic.FormView,
):
"""
Default unassign view for flow task.
Get confirmation from user, and unassign task
"""
form_class = forms.Form
template_filename = "task_unassign.html"
success_message = _("Task {task} has been unassigned.")
def form_valid(self, *args, **kwargs):
"""If the form is valid, save the associated model and unassign the task."""
self.request.activation.unassign()
return super().form_valid(*args, **kwargs)
| UnassignTaskView |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/conv.py | {
"start": 12078,
"end": 16624
} | class ____(_ConvNd):
r"""Applies a 1D convolution over a quantized input signal composed of
several quantized input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv1d`.
.. note::
Only `zeros` is supported for the :attr:`padding_mode` argument.
.. note::
Only `torch.quint8` is supported for the input data type.
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv1d` for other attributes.
Examples::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
>>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 100)
>>> # quantize input to quint8
>>> # xdoctest: +SKIP
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
... dtype=torch.quint8)
>>> output = m(q_input)
"""
_FLOAT_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
_NNIQAT_CONV_BN_MODULE: ClassVar[Optional[type[nn.Module]]] = nniqat.ConvBn1d
_NNI_CONV_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nni.ConvReLU1d
_NNI_CONV_ADD_MODULE: ClassVar[Optional[type[nn.Module]]] = None
_NNI_CONV_ADD_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = None
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
# pyrefly: ignore [bad-assignment]
padding = padding if isinstance(padding, str) else _single(padding)
dilation = _single(dilation)
# Subclasses of _ConvNd needs to call _init rather than __init__. See
# discussion on PR #49702
super()._init(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_single(0),
groups,
bias,
padding_mode,
**factory_kwargs,
)
def _get_name(self):
return "QuantizedConv1d"
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
if self.padding_mode == "zeros":
self._packed_params = torch.ops.quantized.conv1d_prepack(
w, b, self.stride, self.padding, self.dilation, self.groups
)
else:
self._packed_params = torch.ops.quantized.conv1d_prepack(
w, b, self.stride, _pair(0), self.dilation, self.groups
)
def _weight_bias(self):
w, b = torch.ops.quantized.conv1d_unpack(self._packed_params)
return w, b
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
if self.padding_mode != "zeros":
# Padding in Conv1d is stored as (p, p), need to get (p,)
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
input = F.pad(
input, _reversed_padding_repeated_twice, mode=self.padding_mode
)
return ops.quantized.conv1d(
input, self._packed_params, self.scale, self.zero_point
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
return _ConvNd.from_float(
cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
| Conv1d |
python | realpython__materials | django-markdown/dmd_app/admin.py | {
"start": 72,
"end": 225
} | class ____(admin.ModelAdmin):
prepopulated_fields = {"slug": ["title"]}
admin.site.register(MarkdownContent, MarkdownContentAdmin)
| MarkdownContentAdmin |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 71479,
"end": 73953
} | class ____(NonStrictDataModel):
"""
:param model_urls:
:type model_urls: Sequence[str]
:param event_urls:
:type event_urls: Sequence[str]
:param artifact_urls:
:type artifact_urls: Sequence[str]
"""
_schema = {
"properties": {
"artifact_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
"event_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
"model_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
},
"type": "object",
}
def __init__(
self,
model_urls: Optional[List[str]] = None,
event_urls: Optional[List[str]] = None,
artifact_urls: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(TaskUrls, self).__init__(**kwargs)
self.model_urls = model_urls
self.event_urls = event_urls
self.artifact_urls = artifact_urls
@schema_property("model_urls")
def model_urls(self) -> Optional[List[str]]:
return self._property_model_urls
@model_urls.setter
def model_urls(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_model_urls = None
return
self.assert_isinstance(value, "model_urls", (list, tuple))
self.assert_isinstance(value, "model_urls", six.string_types, is_array=True)
self._property_model_urls = value
@schema_property("event_urls")
def event_urls(self) -> Optional[List[str]]:
return self._property_event_urls
@event_urls.setter
def event_urls(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_event_urls = None
return
self.assert_isinstance(value, "event_urls", (list, tuple))
self.assert_isinstance(value, "event_urls", six.string_types, is_array=True)
self._property_event_urls = value
@schema_property("artifact_urls")
def artifact_urls(self) -> Optional[List[str]]:
return self._property_artifact_urls
@artifact_urls.setter
def artifact_urls(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_artifact_urls = None
return
self.assert_isinstance(value, "artifact_urls", (list, tuple))
self.assert_isinstance(value, "artifact_urls", six.string_types, is_array=True)
self._property_artifact_urls = value
| TaskUrls |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_mcp_toolset_param.py | {
"start": 474,
"end": 1040
} | class ____(TypedDict, total=False):
mcp_server_name: Required[str]
"""Name of the MCP server to configure tools for"""
type: Required[Literal["mcp_toolset"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
configs: Optional[Dict[str, BetaMCPToolConfigParam]]
"""Configuration overrides for specific tools, keyed by tool name"""
default_config: BetaMCPToolDefaultConfigParam
"""Default configuration applied to all tools from this server"""
| BetaMCPToolsetParam |
python | facebook__pyre-check | tools/generate_taint_models/get_constructor_initialized_attribute_sources.py | {
"start": 661,
"end": 3828
} | class ____(ModelGenerator[AssignmentModel]):
"""
This Generator will taint the attributes initialized by the constructors of
'classes_to_taint' and their descendants. Only descendants that have had
their modules loaded at preprocessing time will be tainted. Models are
generated on a best effort basis by assuming the name of the parameter will
match the name of the attribute it is assigned to. This naive approach means
this model generator will likely generate some invalid models.
"""
def __init__(
self,
classes_to_taint: List[str],
pyre_connection: PyreConnection,
filter_classes_by: Optional[Callable[[Type[T]], bool]] = None,
taint_annotation: str = "TaintSource[UserControlled]",
) -> None:
self.classes_to_taint: List[str] = classes_to_taint
self.pyre_connection = pyre_connection
self.filter_classes_by = filter_classes_by
self.taint_annotation: str = taint_annotation
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return gather_all_constructors_in_hierarchy(
self.classes_to_taint, self.filter_classes_by
)
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[AssignmentModel]:
constructors = {}
for constructor in functions_to_model:
qualified_name = extract_qualified_name(constructor)
if not qualified_name:
continue
# Strip off __init__ and append the parameter name as an attribute
# name.
class_name = ".".join(qualified_name.split(".")[:-1])
constructors[class_name] = constructor
attributes_map = query.get_attributes(
self.pyre_connection, constructors.keys(), BATCH_SIZE
)
for class_name, constructor in constructors.items():
attributes = {attribute.name for attribute in attributes_map[class_name]}
parameters = extract_parameters(constructor)
for parameter in parameters:
# Skip 'self', and attributes that are callables
if parameter.name == "self" or (
"Callable[" in (parameter.annotation or "")
or "Coroutine[" in (parameter.annotation or "")
):
continue
if parameter.name in attributes:
# If a parameter is a valid attribute, add a taint model.
target = f"{class_name}.{parameter.name}"
yield AssignmentModel(
target=target, annotation=self.taint_annotation
)
if "_" + parameter.name in attributes:
# Same as above, but parameters might be prefixed with an
# underscore to indicate a private attribute.
target = f"{class_name}._{parameter.name}"
yield AssignmentModel(
target=target, annotation=self.taint_annotation
)
| ConstructorInitializedAttributeSourceGenerator |
python | lxml__lxml | src/lxml/html/formfill.py | {
"start": 427,
"end": 5721
} | class ____(LookupError):
"""
Raised when no form can be found
"""
_form_name_xpath = XPath('descendant-or-self::form[name=$name]|descendant-or-self::x:form[name=$name]', namespaces={'x':XHTML_NAMESPACE})
_input_xpath = XPath('|'.join(['descendant-or-self::'+_tag for _tag in ('input','select','textarea','x:input','x:select','x:textarea')]),
namespaces={'x':XHTML_NAMESPACE})
_label_for_xpath = XPath('//label[@for=$for_id]|//x:label[@for=$for_id]',
namespaces={'x':XHTML_NAMESPACE})
_name_xpath = XPath('descendant-or-self::*[@name=$name]')
def fill_form(
el,
values,
form_id=None,
form_index=None,
):
el = _find_form(el, form_id=form_id, form_index=form_index)
_fill_form(el, values)
def fill_form_html(html, values, form_id=None, form_index=None):
result_type = type(html)
if isinstance(html, basestring):
doc = fromstring(html)
else:
doc = copy.deepcopy(html)
fill_form(doc, values, form_id=form_id, form_index=form_index)
return _transform_result(result_type, doc)
def _fill_form(el, values):
counts = {}
if hasattr(values, 'mixed'):
# For Paste request parameters
values = values.mixed()
inputs = _input_xpath(el)
for input in inputs:
name = input.get('name')
if not name:
continue
if _takes_multiple(input):
value = values.get(name, [])
if not isinstance(value, (list, tuple)):
value = [value]
_fill_multiple(input, value)
elif name not in values:
continue
else:
index = counts.get(name, 0)
counts[name] = index + 1
value = values[name]
if isinstance(value, (list, tuple)):
try:
value = value[index]
except IndexError:
continue
elif index > 0:
continue
_fill_single(input, value)
def _takes_multiple(input):
if _nons(input.tag) == 'select' and input.get('multiple'):
# FIXME: multiple="0"?
return True
type = input.get('type', '').lower()
if type in ('radio', 'checkbox'):
return True
return False
def _fill_multiple(input, value):
type = input.get('type', '').lower()
if type == 'checkbox':
v = input.get('value')
if v is None:
if not value:
result = False
else:
result = value[0]
if isinstance(value, basestring):
# The only valid "on" value for an unnamed checkbox is 'on'
result = result == 'on'
_check(input, result)
else:
_check(input, v in value)
elif type == 'radio':
v = input.get('value')
_check(input, v in value)
else:
assert _nons(input.tag) == 'select'
for option in _options_xpath(input):
v = option.get('value')
if v is None:
# This seems to be the default, at least on IE
# FIXME: but I'm not sure
v = option.text_content()
_select(option, v in value)
def _check(el, check):
if check:
el.set('checked', '')
else:
if 'checked' in el.attrib:
del el.attrib['checked']
def _select(el, select):
if select:
el.set('selected', '')
else:
if 'selected' in el.attrib:
del el.attrib['selected']
def _fill_single(input, value):
if _nons(input.tag) == 'textarea':
input.text = value
else:
input.set('value', value)
def _find_form(el, form_id=None, form_index=None):
if form_id is None and form_index is None:
forms = _forms_xpath(el)
for form in forms:
return form
raise FormNotFound(
"No forms in page")
if form_id is not None:
form = el.get_element_by_id(form_id)
if form is not None:
return form
forms = _form_name_xpath(el, name=form_id)
if forms:
return forms[0]
else:
raise FormNotFound(
"No form with the name or id of %r (forms: %s)"
% (id, ', '.join(_find_form_ids(el))))
if form_index is not None:
forms = _forms_xpath(el)
try:
return forms[form_index]
except IndexError:
raise FormNotFound(
"There is no form with the index %r (%i forms found)"
% (form_index, len(forms)))
def _find_form_ids(el):
forms = _forms_xpath(el)
if not forms:
yield '(no forms)'
return
for index, form in enumerate(forms):
if form.get('id'):
if form.get('name'):
yield '%s or %s' % (form.get('id'),
form.get('name'))
else:
yield form.get('id')
elif form.get('name'):
yield form.get('name')
else:
yield '(unnamed form %s)' % index
############################################################
## Error filling
############################################################
| FormNotFound |
python | pytorch__pytorch | torch/fx/graph.py | {
"start": 35503,
"end": 36889
} | class ____(CodeGen):
"""
CodeGen subclass that generates code using the "boxed" calling convention.
The boxed calling convention takes a single list argument and clears it
after extracting the arguments, which allows for early deallocation of
input tensors.
"""
def gen_fn_def(
self, free_vars, maybe_return_annotation, *, expanded_def: bool = False
):
"""
Generate function definition for boxed calling convention.
Instead of taking individual arguments, the generated function takes
a single 'args_list' parameter, extracts placeholder values from it,
and clears the list.
"""
# Generate the function signature with args_list parameter
fn_def = f"def {self._func_name}(self, args_list){maybe_return_annotation}:"
if free_vars:
# This is horribly manual but we don't get the "raw" free vars
# without a bigger refactor.
placeholder_vars = [
v.split(":")[0].split("=")[0].strip() for v in free_vars if v != "self"
]
if placeholder_vars:
fn_def += "\n args_iter = iter(args_list)"
for var in placeholder_vars:
fn_def += f"\n {var} = next(args_iter)"
fn_def += "\n args_list.clear()"
return fn_def
| _BoxedCodeGen |
python | apache__airflow | providers/databricks/tests/unit/databricks/operators/test_databricks_repos.py | {
"start": 5897,
"end": 9037
} | class ____:
@mock.patch("airflow.providers.databricks.operators.databricks_repos.DatabricksHook")
def test_create_plus_checkout(self, db_mock_class):
"""
Test the execute function creating new Repo.
"""
git_url = "https://github.com/test/test"
repo_path = "/Repos/Project1/test-repo"
op = DatabricksReposCreateOperator(
task_id=TASK_ID, git_url=git_url, repo_path=repo_path, branch="releases"
)
db_mock = db_mock_class.return_value
db_mock.update_repo.return_value = {"head_commit_id": "123456"}
db_mock.create_repo.return_value = {"id": "123", "branch": "main"}
db_mock.get_repo_by_path.return_value = None
op.execute(None)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
caller="DatabricksReposCreateOperator",
)
db_mock.create_repo.assert_called_once_with({"url": git_url, "provider": "gitHub", "path": repo_path})
db_mock.update_repo.assert_called_once_with("123", {"branch": "releases"})
@mock.patch("airflow.providers.databricks.operators.databricks_repos.DatabricksHook")
def test_create_ignore_existing_plus_checkout(self, db_mock_class):
"""
Test the execute function creating new Repo.
"""
git_url = "https://github.com/test/test"
repo_path = "/Repos/Project1/test-repo"
op = DatabricksReposCreateOperator(
task_id=TASK_ID,
git_url=git_url,
repo_path=repo_path,
branch="releases",
ignore_existing_repo=True,
)
db_mock = db_mock_class.return_value
db_mock.update_repo.return_value = {"head_commit_id": "123456"}
db_mock.get_repo_by_path.return_value = "123"
op.execute(None)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
caller="DatabricksReposCreateOperator",
)
db_mock.get_repo_by_path.assert_called_once_with(repo_path)
db_mock.update_repo.assert_called_once_with("123", {"branch": "releases"})
def test_init_exception(self):
"""
Tests handling of incorrect parameters passed to ``__init__``
"""
git_url = "https://github.com/test/test"
repo_path = "/Repos/test-repo"
exception_message = (
f"repo_path should have form of /Repos/{{folder}}/{{repo-name}}, got '{repo_path}'"
)
op = DatabricksReposCreateOperator(task_id=TASK_ID, git_url=git_url, repo_path=repo_path)
with pytest.raises(AirflowException, match=exception_message):
op.execute(None)
with pytest.raises(
AirflowException, match="Only one of branch or tag should be provided, but not both"
):
DatabricksReposCreateOperator(task_id=TASK_ID, git_url=git_url, branch="123", tag="123")
| TestDatabricksReposCreateOperator |
python | celery__celery | celery/app/trace.py | {
"start": 4683,
"end": 30264
} | class ____:
"""Information about task execution."""
__slots__ = ('state', 'retval')
def __init__(self, state, retval=None):
self.state = state
self.retval = retval
def handle_error_state(self, task, req,
eager=False, call_errbacks=True):
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored
elif eager and task.store_eager_result:
store_errors = True
else:
store_errors = not eager
return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
}[self.state](task, req,
store_errors=store_errors,
call_errbacks=call_errbacks)
def handle_reject(self, task, req, **kwargs):
self._log_error(task, req, ExceptionInfo())
def handle_ignore(self, task, req, **kwargs):
self._log_error(task, req, ExceptionInfo())
def handle_retry(self, task, req, store_errors=True, **kwargs):
"""Handle retry exception."""
# the exception raised is the Retry semi-predicate,
# and it's exc' attribute is the original exception raised (if any).
type_, _, tb = sys.exc_info()
einfo = None
try:
reason = self.retval
einfo = ExceptionInfo((type_, reason, tb))
if store_errors:
task.backend.mark_as_retry(
req.id, reason.exc, einfo.traceback, request=req,
)
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
signals.task_retry.send(sender=task, request=req,
reason=reason, einfo=einfo)
info(LOG_RETRY, {
'id': req.id,
'name': get_task_name(req, task.name),
'exc': str(reason),
})
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(einfo.exception)
return einfo
finally:
# MEMORY LEAK FIX: Clean up direct traceback reference to prevent
# retention of frame objects and their local variables (Issue #8882)
if tb is not None:
del tb
def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
"""Handle exception."""
orig_exc = self.retval
tb_ref = None
try:
exc = get_pickleable_exception(orig_exc)
if exc.__traceback__ is None:
# `get_pickleable_exception` may have created a new exception without
# a traceback.
_, _, tb_ref = sys.exc_info()
exc.__traceback__ = tb_ref
exc_type = get_pickleable_etype(type(orig_exc))
# make sure we only send pickleable exceptions back to parent.
einfo = ExceptionInfo(exc_info=(exc_type, exc, exc.__traceback__))
task.backend.mark_as_failure(
req.id, exc, einfo.traceback,
request=req, store_result=store_errors,
call_errbacks=call_errbacks,
)
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
signals.task_failure.send(sender=task, task_id=req.id,
exception=exc, args=req.args,
kwargs=req.kwargs,
traceback=exc.__traceback__,
einfo=einfo)
self._log_error(task, req, einfo)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
# Note: We return einfo, so we can't clean it up here
# The calling function is responsible for cleanup
return einfo
finally:
# MEMORY LEAK FIX: Clean up any direct traceback references we may have created
# to prevent retention of frame objects and their local variables (Issue #8882)
if tb_ref is not None:
del tb_ref
def _log_error(self, task, req, einfo):
eobj = einfo.exception = get_pickled_exception(einfo.exception)
if isinstance(eobj, ExceptionWithTraceback):
eobj = einfo.exception = eobj.exc
exception, traceback, exc_info, sargs, skwargs = (
safe_repr(eobj),
safe_str(einfo.traceback),
einfo.exc_info,
req.get('argsrepr') or safe_repr(req.args),
req.get('kwargsrepr') or safe_repr(req.kwargs),
)
policy = get_log_policy(task, einfo, eobj)
context = {
'hostname': req.hostname,
'id': req.id,
'name': get_task_name(req, task.name),
'exc': exception,
'traceback': traceback,
'args': sargs,
'kwargs': skwargs,
'description': policy.description,
'internal': einfo.internal,
}
logger.log(policy.severity, policy.format.strip(), context,
exc_info=exc_info if policy.traceback else None,
extra={'data': context})
def traceback_clear(exc=None):
"""Clear traceback frames to prevent memory leaks.
MEMORY LEAK FIX: This function helps break reference cycles between
traceback objects and frame objects that can prevent garbage collection.
Clearing frames releases local variables that may be holding large objects.
"""
# Cleared Tb, but einfo still has a reference to Traceback.
# exc cleans up the Traceback at the last moment that can be revealed.
tb = None
if exc is not None:
if hasattr(exc, '__traceback__'):
tb = exc.__traceback__
else:
_, _, tb = sys.exc_info()
else:
_, _, tb = sys.exc_info()
while tb is not None:
try:
# MEMORY LEAK FIX: tb.tb_frame.clear() clears ALL frame data including
# local variables, which is more efficient than accessing f_locals separately.
# Removed redundant tb.tb_frame.f_locals access that was creating unnecessary references.
tb.tb_frame.clear()
except RuntimeError:
# Ignore the exception raised if the frame is still executing.
pass
tb = tb.tb_next
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False, app=None,
monotonic=time.monotonic, trace_ok_t=trace_ok_t,
IGNORE_STATES=IGNORE_STATES):
"""Return a function that traces task execution.
Catches all exceptions and updates result backend with the
state and result.
If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.
If the call raises :exc:`~@Retry`, it extracts
the original exception, uses that as the result and sets the task state
to `"RETRY"`.
If the call results in an exception, it saves the exception as the task
result, and sets the task state to `"FAILURE"`.
Return a function that takes the following arguments:
:param uuid: The id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict.
"""
# pylint: disable=too-many-statements
# If the task doesn't define a custom __call__ method
# we optimize it away by simply calling the run method directly,
# saving the extra method call and a line less in the stack trace.
fun = task if task_has_custom(task, '__call__') else task.run
loader = loader or app.loader
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
# #6476
if eager and not ignore_result and task.store_eager_result:
publish_result = True
else:
publish_result = not eager and not ignore_result
deduplicate_successful_tasks = ((app.conf.task_acks_late or task.acks_late)
and app.conf.worker_deduplicate_successful_tasks
and app.backend.persistent)
hostname = hostname or gethostname()
inherit_parent_priority = app.conf.task_inherit_parent_priority
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
task_before_start = None
task_on_success = None
task_after_return = None
if task_has_custom(task, 'before_start'):
task_before_start = task.before_start
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return
pid = os.getpid()
request_stack = task.request_stack
push_request = request_stack.push
pop_request = request_stack.pop
push_task = _task_stack.push
pop_task = _task_stack.pop
_does_info = logger.isEnabledFor(logging.INFO)
resultrepr_maxsize = task.resultrepr_maxsize
prerun_receivers = signals.task_prerun.receivers
postrun_receivers = signals.task_postrun.receivers
success_receivers = signals.task_success.receivers
from celery import canvas
signature = canvas.maybe_signature # maybe_ does not clone if already
def on_error(request, exc, state=FAILURE, call_errbacks=True):
if propagate:
raise
I = Info(state, exc)
R = I.handle_error_state(
task, request, eager=eager, call_errbacks=call_errbacks,
)
return I, R, I.state, I.retval
def trace_task(uuid, args, kwargs, request=None):
# R - is the possibly prepared return value.
# I - is the Info object.
# T - runtime
# Rstr - textual representation of return value
# retval - is the always unmodified return value.
# state - is the resulting task state.
# This function is very long because we've unrolled all the calls
# for performance reasons, and because the function is so long
# we want the main variables (I, and R) to stand out visually from the
# the rest of the variables, so breaking PEP8 is worth it ;)
R = I = T = Rstr = retval = state = None
task_request = None
time_start = monotonic()
try:
try:
kwargs.items
except AttributeError:
raise InvalidTaskError(
'Task keyword arguments is not a mapping')
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
redelivered = (task_request.delivery_info
and task_request.delivery_info.get('redelivered', False))
if deduplicate_successful_tasks and redelivered:
if task_request.id in successful_requests:
return trace_ok_t(R, I, T, Rstr)
r = AsyncResult(task_request.id, app=app)
try:
state = r.state
except BackendGetMetaError:
pass
else:
if state == SUCCESS:
info(LOG_IGNORED, {
'id': task_request.id,
'name': get_task_name(task_request, name),
'description': 'Task already completed successfully.'
})
return trace_ok_t(R, I, T, Rstr)
push_task(task)
root_id = task_request.root_id or uuid
task_priority = task_request.delivery_info.get('priority') if \
inherit_parent_priority else None
push_request(task_request)
try:
# -*- PRE -*-
if prerun_receivers:
send_prerun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
task.backend.store_result(
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
request=task_request,
)
# -*- TRACE -*-
try:
if task_before_start:
task_before_start(uuid, args, kwargs)
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
I.handle_reject(task, task_request)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
except Ignore as exc:
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
I.handle_ignore(task, task_request)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
except Retry as exc:
I, R, state, retval = on_error(
task_request, exc, RETRY, call_errbacks=False)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
except Exception as exc:
I, R, state, retval = on_error(task_request, exc)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
except BaseException:
raise
else:
try:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.
# groups are called inline and will store trail
# separately, so need to call them separately
# so that the trail's not added multiple times :(
# (Issue #1936)
callbacks = task.request.callbacks
if callbacks:
if len(task.request.callbacks) > 1:
sigs, groups = [], []
for sig in callbacks:
sig = signature(sig, app=app)
if isinstance(sig, group):
groups.append(sig)
else:
sigs.append(sig)
for group_ in groups:
group_.apply_async(
(retval,),
parent_id=uuid, root_id=root_id,
priority=task_priority
)
if sigs:
group(sigs, app=app).apply_async(
(retval,),
parent_id=uuid, root_id=root_id,
priority=task_priority
)
else:
signature(callbacks[0], app=app).apply_async(
(retval,), parent_id=uuid, root_id=root_id,
priority=task_priority
)
# execute first task in chain
chain = task_request.chain
if chain:
_chsig = signature(chain.pop(), app=app)
_chsig.apply_async(
(retval,), chain=chain,
parent_id=uuid, root_id=root_id,
priority=task_priority
)
task.backend.mark_as_done(
uuid, retval, task_request, publish_result,
)
except EncodeError as exc:
I, R, state, retval = on_error(task_request, exc)
# MEMORY LEAK FIX: Clear traceback frames to prevent memory retention (Issue #8882)
traceback_clear(exc)
else:
Rstr = saferepr(R, resultrepr_maxsize)
T = monotonic() - time_start
if task_on_success:
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)
if _does_info:
info(LOG_SUCCESS, {
'id': uuid,
'name': get_task_name(task_request, name),
'return_value': Rstr,
'runtime': T,
'args': task_request.get('argsrepr') or safe_repr(args),
'kwargs': task_request.get('kwargsrepr') or safe_repr(kwargs),
})
# -* POST *-
if state not in IGNORE_STATES:
if task_after_return:
task_after_return(
state, retval, uuid, args, kwargs, None,
)
finally:
try:
if postrun_receivers:
send_postrun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs,
retval=retval, state=state)
finally:
pop_task()
pop_request()
if not eager:
try:
task.backend.process_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as exc:
logger.error('Process cleanup failed: %r', exc,
exc_info=True)
except MemoryError:
raise
except Exception as exc:
_signal_internal_error(task, uuid, args, kwargs, request, exc)
if eager:
raise
R = report_internal_error(task, exc)
if task_request is not None:
I, _, _, _ = on_error(task_request, exc)
return trace_ok_t(R, I, T, Rstr)
return trace_task
def trace_task(task, uuid, args, kwargs, request=None, **opts):
"""Trace task execution."""
request = {} if not request else request
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
return task.__trace__(uuid, args, kwargs, request)
except Exception as exc:
_signal_internal_error(task, uuid, args, kwargs, request, exc)
return trace_ok_t(report_internal_error(task, exc), TraceInfo(FAILURE, exc), 0.0, None)
def _signal_internal_error(task, uuid, args, kwargs, request, exc):
"""Send a special `internal_error` signal to the app for outside body errors."""
tb = None
einfo = None
try:
_, _, tb = sys.exc_info()
einfo = ExceptionInfo()
einfo.exception = get_pickleable_exception(einfo.exception)
einfo.type = get_pickleable_etype(einfo.type)
signals.task_internal_error.send(
sender=task,
task_id=uuid,
args=args,
kwargs=kwargs,
request=request,
exception=exc,
traceback=tb,
einfo=einfo,
)
finally:
# MEMORY LEAK FIX: Clean up local references to prevent memory leaks (Issue #8882)
# Both 'tb' and 'einfo' can hold references to frame objects and their local variables.
# Explicitly clearing these prevents reference cycles that block garbage collection.
if tb is not None:
del tb
if einfo is not None:
# Clear traceback frames to ensure consistent cleanup
traceback_clear(einfo.exception)
# Break potential reference cycles by deleting the einfo object
del einfo
def trace_task_ret(name, uuid, request, body, content_type,
content_encoding, loads=loads_message, app=None,
**extra_request):
app = app or current_app._get_current_object()
embed = None
if content_type:
accept = prepare_accept_content(app.conf.accept_content)
args, kwargs, embed = loads(
body, content_type, content_encoding, accept=accept,
)
else:
args, kwargs, embed = body
hostname = gethostname()
request.update({
'args': args, 'kwargs': kwargs,
'hostname': hostname, 'is_eager': False,
}, **embed or {})
R, I, T, Rstr = trace_task(app.tasks[name],
uuid, args, kwargs, request, app=app)
return (1, R, T) if I else (0, Rstr, T)
def fast_trace_task(task, uuid, request, body, content_type,
content_encoding, loads=loads_message, _loc=None,
hostname=None, **_):
_loc = _localized if not _loc else _loc
embed = None
tasks, accept, hostname = _loc
if content_type:
args, kwargs, embed = loads(
body, content_type, content_encoding, accept=accept,
)
else:
args, kwargs, embed = body
request.update({
'args': args, 'kwargs': kwargs,
'hostname': hostname, 'is_eager': False,
}, **embed or {})
R, I, T, Rstr = tasks[task].__trace__(
uuid, args, kwargs, request,
)
return (1, R, T) if I else (0, Rstr, T)
def report_internal_error(task, exc):
_type, _value, _tb = sys.exc_info()
try:
_value = task.backend.prepare_exception(exc, 'pickle')
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
warn(RuntimeWarning(
'Exception raised outside body: {!r}:\n{}'.format(
exc, exc_info.traceback)))
return exc_info
finally:
del _tb
def setup_worker_optimizations(app, hostname=None):
"""Setup worker related optimizations."""
hostname = hostname or gethostname()
# make sure custom Task.__call__ methods that calls super
# won't mess up the request/task stack.
_install_stack_protection()
# all new threads start without a current app, so if an app is not
# passed on to the thread it will fall back to the "default app",
# which then could be the wrong app. So for the worker
# we set this to always return our app. This is a hack,
# and means that only a single app can be used for workers
# running in the same process.
app.set_current()
app.set_default()
# evaluate all task classes by finalizing the app.
app.finalize()
# set fast shortcut to task registry
_localized[:] = [
app._tasks,
prepare_accept_content(app.conf.accept_content),
hostname,
]
app.use_fast_trace_task = True
def reset_worker_optimizations(app=current_app):
"""Reset previously configured optimizations."""
try:
delattr(BaseTask, '_stackprotected')
except AttributeError:
pass
try:
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
app.use_fast_trace_task = False
def _install_stack_protection():
# Patches BaseTask.__call__ in the worker to handle the edge case
# where people override it and also call super.
#
# - The worker optimizes away BaseTask.__call__ and instead
# calls task.run directly.
# - so with the addition of current_task and the request stack
# BaseTask.__call__ now pushes to those stacks so that
# they work when tasks are called directly.
#
# The worker only optimizes away __call__ in the case
# where it hasn't been overridden, so the request/task stack
# will blow if a custom task class defines __call__ and also
# calls super().
if not getattr(BaseTask, '_stackprotected', False):
_patched['BaseTask.__call__'] = orig = BaseTask.__call__
def __protected_call__(self, *args, **kwargs):
stack = self.request_stack
req = stack.top
if req and not req._protected and \
len(stack) == 1 and not req.called_directly:
req._protected = 1
return self.run(*args, **kwargs)
return orig(self, *args, **kwargs)
BaseTask.__call__ = __protected_call__
BaseTask._stackprotected = True
| TraceInfo |
python | pytorch__pytorch | test/inductor/test_split_cat_fx_aten_passes.py | {
"start": 1991,
"end": 4358
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self, x1: torch.Tensor, x2: torch.Tensor, y: torch.Tensor, z: torch.Tensor
):
split_with_sizes_1 = torch.ops.aten.split_with_sizes.default(
x1,
[
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
96,
],
1,
)
split_with_sizes_2 = torch.ops.aten.split_with_sizes.default(
x2, [96, 96, 96, 96], 1
)
getitem_71 = split_with_sizes_1[0]
getitem_72 = split_with_sizes_1[1]
getitem_73 = split_with_sizes_1[2]
getitem_74 = split_with_sizes_1[3]
getitem_75 = split_with_sizes_1[4]
getitem_76 = split_with_sizes_1[5]
getitem_77 = split_with_sizes_1[10]
getitem_78 = split_with_sizes_1[11]
getitem_79 = split_with_sizes_1[12]
getitem_80 = split_with_sizes_1[13]
getitem_81 = split_with_sizes_1[14]
getitem_82 = split_with_sizes_1[15]
getitem_83 = split_with_sizes_1[16]
getitem_84 = split_with_sizes_1[17]
getitem_85 = split_with_sizes_2[0]
getitem_86 = split_with_sizes_2[1]
getitem_87 = split_with_sizes_2[2]
getitem_88 = split_with_sizes_2[3]
cat = torch.ops.aten.cat.default(
[
z,
getitem_71,
getitem_72,
getitem_73,
getitem_74,
getitem_75,
getitem_76,
getitem_82,
getitem_83,
getitem_84,
y,
getitem_77,
getitem_78,
getitem_79,
getitem_80,
getitem_81,
y,
getitem_85,
getitem_86,
getitem_87,
getitem_88,
z,
],
1,
)
return cat
| TestSplitCatPartial |
python | PyCQA__pylint | doc/data/messages/p/protected-access/bad.py | {
"start": 0,
"end": 103
} | class ____:
def __swallow(self):
pass
jim = Worm()
jim.__swallow() # [protected-access]
| Worm |
python | ray-project__ray | rllib/models/tf/tf_action_dist.py | {
"start": 20550,
"end": 21402
} | class ____(TFActionDistribution):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
return self.inputs
@override(TFActionDistribution)
def logp(self, x: TensorType) -> TensorType:
return tf.zeros_like(self.inputs)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self.inputs
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
return np.prod(action_space.shape, dtype=np.int32)
@OldAPIStack
| Deterministic |
python | pytorch__pytorch | test/distributed/elastic/multiprocessing/api_test.py | {
"start": 5824,
"end": 39233
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
self._start_methods = ["spawn"]
def tearDown(self):
super().tearDown()
shutil.rmtree(self.test_dir)
def log_dir(self):
return tempfile.mkdtemp(dir=self.test_dir)
def assert_in_file(self, expected: list[str], filename: str) -> None:
expected = [f"{line.rstrip()}\n" for line in expected]
with open(filename) as fp:
actual = fp.readlines()
for line in expected:
self.assertIn(line, actual)
def assert_not_in_file(self, lines: list[str], filename: str) -> None:
lines = [f"{line.rstrip()}\n" for line in lines]
with open(filename) as fp:
actual = fp.readlines()
for line in lines:
self.assertNotIn(line, actual)
def assert_pids_noexist(self, pids: dict[int, int]):
for local_rank, pid in pids.items():
with self.assertRaises(
OSError, msg=f"local_rank: {local_rank} pid: {pid} should not exist"
):
os.kill(pid, 0)
def _test_zombie_workflow(
self, entrypoint: Union[str, Callable], signal_to_send: signal.Signals
) -> None:
mp_queue = mp.get_context("spawn").Queue()
child_nproc = 2
mp.spawn(
start_processes_zombie_test,
nprocs=1,
args=(entrypoint, mp_queue, self.log_dir(), child_nproc),
join=False,
)
total_processes = child_nproc + 1
pids = []
for _ in range(total_processes):
pids.append(mp_queue.get(timeout=120))
parent_pid = pids[0]
child_pids = pids[1:]
os.kill(parent_pid, signal.SIGTERM)
# Wait to give time for signal handlers to finish work
time.sleep(5)
for child_pid in child_pids:
# Killing parent should kill all children, we expect that each call to
# os.kill would raise OSError
with self.assertRaises(OSError):
os.kill(child_pid, 0)
# tests incompatible with tsan or asan
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
class StartProcessesAsFuncTest(_StartProcessesTest):
def test_to_map(self):
local_world_size = 2
self.assertEqual(
{0: Std.OUT, 1: Std.OUT}, to_map(Std.OUT, local_world_size)
)
self.assertEqual(
{0: Std.NONE, 1: Std.OUT}, to_map({1: Std.OUT}, local_world_size)
)
self.assertEqual(
{0: Std.ERR, 1: Std.OUT},
to_map({0: Std.ERR, 1: Std.OUT}, local_world_size),
)
def test_invalid_log_dir(self):
with tempfile.NamedTemporaryFile(dir=self.test_dir) as not_a_dir:
cases = {
not_a_dir.name: NotADirectoryError,
}
for log_dir, expected_error in cases.items():
with self.subTest(log_dir=log_dir, expected_error=expected_error):
with self.assertRaises(expected_error):
pc = None
try:
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",)},
envs={0: {"RANK": "0"}},
logs_specs=DefaultLogsSpecs(log_dir=log_dir),
)
finally:
if pc:
pc.close()
def test_args_env_len_mismatch(self):
cases = [
# 1 x args; 2 x envs
{
"args": {0: ("hello",)},
"envs": {0: {"RANK": "0"}, 1: {"RANK": "1"}},
},
# 2 x args; 1 x envs
{
"args": {0: ("hello",), 1: ("world",)},
"envs": {0: {"RANK": "0"}},
},
]
for kwds in cases:
args = kwds["args"]
envs = kwds["envs"]
with self.subTest(args=args, envs=envs):
with self.assertRaises(RuntimeError):
start_processes(
name="echo",
entrypoint=echo1,
args=args,
envs=envs,
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
)
def test_pcontext_wait(self):
pc = start_processes(
name="sleep",
entrypoint=time.sleep,
args={0: (1,)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
start_method="spawn",
)
self.assertIsNone(pc.wait(timeout=0.1, period=0.01))
self.assertIsNotNone(pc.wait(period=0.1))
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_pcontext_wait_on_a_child_thread(self):
asyncio.run(asyncio.to_thread(self.test_pcontext_wait))
def test_multiprocess_context_close(self):
pc = start_processes(
name="sleep",
entrypoint=time.sleep,
args={0: (1,)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
start_method="spawn",
)
pids = pc.pids()
pc.close()
self.assert_pids_noexist(pids)
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_function_with_tensor(self):
for start_method in self._start_methods:
pc = start_processes(
name="dummy_compute",
entrypoint=dummy_compute,
args={0: ()},
envs={0: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
start_method=start_method,
)
results = pc.wait()
self.assert_pids_noexist(pc.pids())
for return_value in results.return_values.values():
self.assertIsInstance(return_value, torch.Tensor)
self.assertEqual((100, 100), return_value.shape)
def test_void_function(self):
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
name="echo",
entrypoint=echo0,
args={0: ("hello",), 1: ("world",)},
envs={0: {}, 1: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
start_method=start_method,
)
results = pc.wait(period=0.1)
self.assertEqual({0: None, 1: None}, results.return_values)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "tests incompatible with asan"
)
def test_function_large_ret_val(self):
# python multiprocessing.queue module uses pipes and actually PipedQueues
# This means that if a single object is greater than a pipe size
# the writer process will block until reader process will start
# reading the pipe.
# This test makes a worker fn to return huge output, around ~10 MB
size = 200000
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
name="echo",
entrypoint=echo_large,
args={0: (size,), 1: (size,), 2: (size,), 3: (size,)},
envs={0: {}, 1: {}, 2: {}, 3: {}},
start_method=start_method,
)
results = pc.wait(period=0.1)
for i in range(pc.nprocs):
self.assertEqual(size, len(results.return_values[i]))
def test_function_raise(self):
"""
run 2x copies of echo2, raise an exception on the first
"""
RAISE = True
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
log_dir = self.log_dir()
pc = start_processes(
name="echo",
entrypoint=echo2,
args={0: ("hello", RAISE), 1: ("world",)},
envs={
0: {"TORCHELASTIC_RUN_ID": "run_id"},
1: {"TORCHELASTIC_RUN_ID": "run_id"},
},
logs_specs=DefaultLogsSpecs(log_dir=log_dir),
start_method=start_method,
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertEqual(1, len(results.failures))
self.assertFalse(results.return_values)
failure = results.failures[0]
error_file = failure.error_file
error_file_data = failure.error_file_data
self.assertEqual(1, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
self.assertTrue(
error_file.startswith(os.path.join(log_dir, "run_id_"))
)
self.assertTrue(error_file.endswith("attempt_0/0/error.json"))
self.assertEqual(
int(error_file_data["message"]["extraInfo"]["timestamp"]),
int(failure.timestamp),
)
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_wait_for_all_child_procs_to_exit(self):
"""
Tests that MultiprocessingContext actually waits for
the child process to exit (not just that the entrypoint fn has
finished running).
"""
mpc = MultiprocessContext(
name="echo",
entrypoint=echo0,
args={},
envs={},
start_method="spawn",
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
)
with (
mock.patch.object(mpc, "_is_done", return_value=True),
mock.patch.object(mpc, "_pc"),
mock.patch.object(
mpc._pc, "join", side_effect=[True, False, False, True]
) as mock_join,
):
mpc._poll()
self.assertEqual(4, mock_join.call_count)
def test_multiprocessing_context_poll_raises_exception(self):
mp_context = MultiprocessContext(
name="test_mp",
entrypoint=echo0,
args={0: (0, 1)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(), redirects=Std.ALL, tee=Std.ALL
),
start_method="spawn",
)
mp_context._pc = mock.Mock()
# Using mock since we cannot just set exitcode on process
mock_process = mock.Mock()
mock_process.exitcode = -1
mp_context._pc.processes = [mock_process]
e = mp.ProcessRaisedException(msg="test msg", error_index=0, error_pid=123)
mp_context._pc.join.side_effect = e
with mock.patch.object(mp_context, "close"):
run_result = mp_context._poll()
self.assertEqual(1, len(run_result.failures))
failure = run_result.failures[0]
self.assertEqual(
"Signal 1 (SIGHUP) received by PID 123", failure.message
)
class StartProcessesAsBinaryTest(_StartProcessesTest):
########################################
# start_processes as binary tests
########################################
def test_subprocess_context_close(self):
pc = start_processes(
name="sleep",
entrypoint=bin("zombie_test.py"),
args={0: (1,)},
envs={0: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
)
pids = pc.pids()
pc.close()
self.assert_pids_noexist(pids)
def test_binary_exit(self):
FAIL = 138
pc = start_processes(
name="echo",
entrypoint=bin("echo4.py"),
args={0: ("--exitcode", FAIL, "foo"), 1: ("--exitcode", 0, "bar")},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ALL},
),
)
results = pc.wait(period=0.1)
self.assertTrue(results.is_failed())
self.assertEqual(2, len(results.failures))
failure = results.failures[0]
self.assertEqual(138, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual("<NONE>", failure.error_file_data["message"])
self.assert_in_file([f"exit {FAIL} from 0"], results.stderrs[0])
self.assert_in_file([], results.stdouts[0])
self.assertFalse(results.stderrs[1])
self.assertFalse(results.stdouts[1])
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
failure = results.failures[1]
self.assertEqual(-15, failure.exitcode)
self.assertEqual("SIGTERM", failure.signal_name())
self.assertEqual("<NONE>", failure.error_file_data["message"])
# Assert that the failure message contains expected substrings
self.assertIn("Signal 15 (SIGTERM) received by PID", failure.message)
def test_binary_raises(self):
pc = start_processes(
name="echo",
entrypoint=bin("echo2.py"),
args={0: ("--raises", "true", "foo"), 1: ("bar",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
failure = results.failures[0]
self.assertEqual(1, failure.exitcode)
self.assertEqual("<NONE>", failure.error_file_data["message"])
self.assertEqual("<N/A>", failure.signal_name())
def test_binary_incorrect_entrypoint(self):
with self.assertRaises(FileNotFoundError):
start_processes(
name="echo",
entrypoint="does_not_exist.py",
args={0: ("foo"), 1: ("bar",)},
envs={0: {}, 1: {}},
logs_specs=DefaultLogsSpecs(log_dir=self.log_dir()),
)
def test_validate_full_rank(self):
with self.assertRaises(RuntimeError):
_validate_full_rank({}, 10, "")
# tests incompatible with tsan or asan, the redirect functionality does not work on macos or windows
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS):
class StartProcessesListAsFuncTest(_StartProcessesTest):
def test_function(self):
for start_method, redirs in product(
self._start_methods, redirects_oss_test()
):
with self.subTest(start_method=start_method, redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects=redirs,
),
start_method=start_method,
)
results = pc.wait(period=0.1)
nprocs = pc.nprocs
self.assert_pids_noexist(pc.pids())
self.assertEqual(
{i: f"hello_{i}" for i in range(nprocs)}, results.return_values
)
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
class StartProcessesListAsBinaryTest(_StartProcessesTest):
########################################
# start_processes as binary tests
########################################
def test_binary(self):
for redirs in redirects_oss_test():
with self.subTest(redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=bin("echo1.py"),
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects=redirs,
),
log_line_prefixes={0: "[rank0]:", 1: "[rank1]:"},
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
# currently binaries return {rank: None}
self.assertEqual(2, len(results.return_values))
self.assertFalse(results.is_failed())
nprocs = pc.nprocs
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
def test_binary_redirect_and_tee(self):
pc = start_processes(
name="trainer",
entrypoint=bin("echo1.py"),
args={0: ("hello",), 1: ("world",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
),
log_line_prefixes={0: "[rank0]:", 1: "[rank1]:"},
start_method="spawn",
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(["hello stdout from 0"], pc.stdouts[0])
self.assert_in_file(["hello stderr from 0"], pc.stderrs[0])
self.assert_in_file(["world stderr from 1"], pc.stderrs[1])
self.assertFalse(pc.stdouts[1])
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_binary_duplicate_log_filters(self):
envs = {0: {"RANK": "0"}, 1: {"RANK": "1"}}
logs_specs = DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
)
logs_dest = logs_specs.reify(envs)
pc = start_processes(
name="trainer",
entrypoint=bin("echo1.py"),
args={0: ("helloA,helloB",), 1: ("worldA,worldB",)},
envs=envs,
logs_specs=logs_specs,
log_line_prefixes={0: "[rank0]:", 1: "[rank1]:"},
duplicate_stdout_filters=["helloA"],
duplicate_stderr_filters=["worldA", "B"],
start_method="spawn",
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(
["[rank0]:helloA stdout from 0"], logs_dest.filtered_stdout
)
self.assert_not_in_file(
["[rank0]:helloB stdout from 0"], logs_dest.filtered_stdout
)
self.assert_in_file(
["[rank1]:worldA stderr from 1"], logs_dest.filtered_stderr
)
self.assert_in_file(
["[rank1]:worldB stderr from 1"], logs_dest.filtered_stderr
)
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
# tests incompatible with tsan or asan, the redirect functionality does not work on macos or windows
if not (TEST_WITH_DEV_DBG_ASAN or IS_WINDOWS or IS_MACOS or IS_CI):
class StartProcessesNotCIAsFuncTest(_StartProcessesTest):
@skip_if_pytest
def test_wrap_bad(self):
none = ""
stdout_log = os.path.join(self.test_dir, "stdout.log")
stderr_log = os.path.join(self.test_dir, "stderr.log")
redirs = [
(none, none),
(none, stderr_log),
(stdout_log, none),
(stdout_log, stderr_log),
]
for stdout_redir, stderr_redir in redirs:
queue = multiprocessing.SimpleQueue()
worker_finished_event_mock = mock.Mock()
_wrap(
local_rank=0,
fn=echo1,
args={0: ("hello",)},
envs={0: {"RANK": "0"}},
stdout_redirects={0: stdout_redir},
stderr_redirects={0: stderr_redir},
ret_vals={0: queue},
queue_finished_reading_event=worker_finished_event_mock,
numa_options=None,
)
self.assertEqual("hello_0", queue.get())
if stdout_redir:
self.assert_in_file(["hello stdout from 0"], stdout_log)
if stderr_redir:
self.assert_in_file(["hello stderr from 0"], stderr_log)
worker_finished_event_mock.wait.assert_called_once()
def test_function_redirect_and_tee(self):
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
name="trainer",
entrypoint=echo1,
args={0: ("hello",), 1: ("world",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
),
start_method="spawn",
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(["hello stdout from 0"], pc.stdouts[0])
self.assert_in_file(["hello stderr from 0"], pc.stderrs[0])
self.assert_in_file(["world stderr from 1"], pc.stderrs[1])
self.assertFalse(pc.stdouts[1])
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_function_duplicate_log_filters(self):
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
envs = {0: {"RANK": "0"}, 1: {"RANK": "1"}}
logs_specs = DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ERR, 1: Std.NONE},
tee={0: Std.OUT, 1: Std.ERR},
)
logs_dest = logs_specs.reify(envs)
pc = start_processes(
name="trainer",
entrypoint=echo1,
args={0: ("helloA,helloB",), 1: ("worldA,worldB",)},
envs=envs,
logs_specs=logs_specs,
duplicate_stdout_filters=["helloA"],
duplicate_stderr_filters=["worldA", "B"],
start_method="spawn",
)
result = pc.wait()
self.assertFalse(result.is_failed())
self.assert_in_file(
["[trainer0]:helloA stdout from 0"], logs_dest.filtered_stdout
)
self.assert_not_in_file(
["[trainer0]:helloB stdout from 0"], logs_dest.filtered_stdout
)
self.assert_in_file(
["[trainer1]:worldA stderr from 1"], logs_dest.filtered_stderr
)
self.assert_in_file(
["[trainer1]:worldB stderr from 1"], logs_dest.filtered_stderr
)
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_function(self):
for start_method, redirs in product(self._start_methods, redirects_all()):
with self.subTest(start_method=start_method, redirs=redirs):
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello",), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
start_method=start_method,
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects=redirs,
),
)
results = pc.wait(period=0.1)
nprocs = pc.nprocs
self.assert_pids_noexist(pc.pids())
self.assertEqual(
{i: f"hello_{i}" for i in range(nprocs)}, results.return_values
)
for i in range(nprocs):
if redirs & Std.OUT != Std.OUT:
self.assertFalse(results.stdouts[i])
if redirs & Std.ERR != Std.ERR:
self.assertFalse(results.stderrs[i])
if redirs & Std.OUT == Std.OUT:
self.assert_in_file(
[f"hello stdout from {i}"], results.stdouts[i]
)
if redirs & Std.ERR == Std.ERR:
self.assert_in_file(
[f"hello stderr from {i}"], results.stderrs[i]
)
def test_function_exit(self):
"""
run 2x copies of echo1 fail (exit) the first
functions that exit from python do not generate an error file
(even if they are decorated with @record)
"""
FAIL = 138
for start_method in self._start_methods:
with self.subTest(start_method=start_method):
pc = start_processes(
name="echo",
entrypoint=echo1,
args={0: ("hello", FAIL), 1: ("hello",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
redirects={0: Std.ERR},
),
start_method=start_method,
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
self.assertFalse(results.return_values)
failure = results.failures[0]
error_file = failure.error_file
self.assertEqual(FAIL, failure.exitcode)
self.assertEqual("<N/A>", failure.signal_name())
self.assertEqual(pc.pids()[0], failure.pid)
self.assertEqual("<N/A>", error_file)
self.assertEqual(
"To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html",
failure.message,
)
self.assertLessEqual(failure.timestamp, int(time.time()))
self.assert_in_file([f"exit {FAIL} from 0"], results.stderrs[0])
self.assertFalse(results.stdouts[0])
self.assertFalse(results.stderrs[1])
self.assertFalse(results.stdouts[1])
for tail_log in pc._tail_logs:
self.assertTrue(tail_log.stopped())
def test_no_zombie_process_function(self):
signals = [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]
for s in signals:
self._test_zombie_workflow(wait_fn, s)
class StartProcessesNotCIAsBinaryTest(_StartProcessesTest):
def test_binary_signal(self):
pc = start_processes(
name="echo",
entrypoint=bin("echo3.py"),
args={0: ("--segfault", "true", "foo"), 1: ("bar",)},
envs={0: {"RANK": "0"}, 1: {"RANK": "1"}},
logs_specs=DefaultLogsSpecs(
log_dir=self.log_dir(),
),
)
results = pc.wait(period=0.1)
self.assert_pids_noexist(pc.pids())
self.assertTrue(results.is_failed())
self.assertEqual(1, len(results.failures))
failure = results.failures[0]
self.assertNotEqual(signal.SIGSEGV, failure.exitcode)
if TEST_WITH_ASAN or TEST_WITH_TSAN:
# ASAN/TSAN exit code is 1.
self.assertEqual("<N/A>", failure.signal_name())
else:
self.assertEqual("SIGSEGV", failure.signal_name())
self.assertEqual("<NONE>", failure.error_file_data["message"])
def test_no_zombie_process_binary(self):
signals = [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]
for s in signals:
self._test_zombie_workflow(bin("zombie_test.py"), s)
class ForkServerTest(
StartProcessesAsFuncTest,
StartProcessesListAsFuncTest,
StartProcessesNotCIAsFuncTest,
):
def setUp(self):
super().setUp()
self._start_methods = ["forkserver"]
self.orig_paralell_env_val = os.environ.get(mp.ENV_VAR_PARALLEL_START)
os.environ[mp.ENV_VAR_PARALLEL_START] = "1"
def tearDown(self):
super().tearDown()
if self.orig_paralell_env_val is None:
del os.environ[mp.ENV_VAR_PARALLEL_START]
else:
os.environ[mp.ENV_VAR_PARALLEL_START] = self.orig_paralell_env_val
if __name__ == "__main__":
run_tests()
| _StartProcessesTest |
python | pypa__pipenv | benchmarks/benchmark.py | {
"start": 243,
"end": 12847
} | class ____:
def __init__(self, benchmark_dir: Path):
self.benchmark_dir = benchmark_dir
self.timings_dir = benchmark_dir / "timings"
self.timings_dir.mkdir(exist_ok=True)
self.requirements_url = "https://raw.githubusercontent.com/getsentry/sentry/51281a6abd8ff4a93d2cebc04e1d5fc7aa9c4c11/requirements-base.txt"
self.test_package = "goodconf"
def run_timed_command(
self, command: List[str], timing_file: str, cwd: Path = None
) -> Tuple[float, int]:
"""Run a command and measure execution time."""
if cwd is None:
cwd = self.benchmark_dir
print(f" Running: {' '.join(command)}", flush=True)
start_time = time.time()
try:
result = subprocess.run(
command, cwd=cwd, capture_output=True, text=True, check=True
)
elapsed = time.time() - start_time
# Write timing info (simplified format for cross-platform compatibility)
timing_path = self.timings_dir / timing_file
with open(timing_path, "w") as f:
f.write(
f"{elapsed:.3f},0,0,0,0,0,0\n"
) # elapsed,system,user,cpu%,maxrss,inputs,outputs
print(f" ✓ Completed in {elapsed:.3f}s")
if result.stdout.strip():
# Show first few lines of output
output_lines = result.stdout.strip().split("\n")[:3]
for line in output_lines:
print(f" {line[:100]}")
if len(result.stdout.strip().split("\n")) > 3:
print(" ...")
return elapsed, result.returncode
except subprocess.CalledProcessError as e:
elapsed = time.time() - start_time
print(f" ✗ Command failed after {elapsed:.3f}s: {' '.join(command)}")
print(f" Return code: {e.returncode}")
if e.stderr.strip():
print(" Error output:")
for line in e.stderr.strip().split("\n")[:5]:
print(f" {line}")
if e.stdout.strip():
print(" Stdout:")
for line in e.stdout.strip().split("\n")[:3]:
print(f" {line}")
raise
def setup_requirements(self):
"""Download and prepare requirements.txt."""
print("Setting up requirements.txt...")
requirements_path = self.benchmark_dir / "requirements.txt"
try:
with urllib.request.urlopen(self.requirements_url) as response:
content = response.read().decode("utf-8")
# Filter out --index-url lines like the original
filtered_lines = [
line
for line in content.splitlines()
if not line.strip().startswith("--index-url")
]
with open(requirements_path, "w") as f:
f.write("\n".join(filtered_lines))
print(f"Downloaded {len(filtered_lines)} requirements")
except Exception as e:
print(f"Failed to download requirements: {e}")
raise
def clean_cache(self):
"""Clean pipenv and pip caches."""
print("Cleaning caches...")
cache_dirs = [Path.home() / ".cache" / "pip", Path.home() / ".cache" / "pipenv"]
for cache_dir in cache_dirs:
if cache_dir.exists():
shutil.rmtree(cache_dir, ignore_errors=True)
def clean_venv(self):
"""Clean virtual environment."""
print("Cleaning virtual environment...")
try:
# Get venv path
result = subprocess.run(
["pipenv", "--venv"],
cwd=self.benchmark_dir,
capture_output=True,
text=True,
check=False,
)
if result.returncode == 0:
venv_path = Path(result.stdout.strip())
if venv_path.exists():
print(f" Removing venv: {venv_path}")
shutil.rmtree(venv_path, ignore_errors=True)
else:
print(" No virtual environment found")
except Exception as e:
print(f" Warning: Could not clean venv: {e}")
pass # Ignore errors if venv doesn't exist
def clean_lock(self):
"""Remove Pipfile.lock."""
print("Cleaning lock file...")
lock_file = self.benchmark_dir / "Pipfile.lock"
if lock_file.exists():
lock_file.unlink()
def benchmark_tooling(self):
"""Benchmark pipenv installation (using current dev version)."""
print("Benchmarking tooling...")
# Install current development version
parent_dir = self.benchmark_dir.parent
elapsed, _ = self.run_timed_command(
[sys.executable, "-m", "pip", "install", "-e", str(parent_dir)], "tooling.txt"
)
print(f"Tooling completed in {elapsed:.3f}s")
def benchmark_import(self):
"""Benchmark importing requirements.txt to Pipfile."""
print("Benchmarking import...")
elapsed, _ = self.run_timed_command(
["pipenv", "install", "-r", "requirements.txt"], "import.txt"
)
print(f"Import completed in {elapsed:.3f}s")
def benchmark_lock(self, timing_file: str):
"""Benchmark lock file generation."""
print(f"Benchmarking lock ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "lock"], timing_file)
print(f"Lock completed in {elapsed:.3f}s")
def benchmark_install(self, timing_file: str):
"""Benchmark package installation."""
print(f"Benchmarking install ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "sync"], timing_file)
print(f"Install completed in {elapsed:.3f}s")
def benchmark_update(self, timing_file: str):
"""Benchmark package updates."""
print(f"Benchmarking update ({timing_file})...")
elapsed, _ = self.run_timed_command(["pipenv", "update"], timing_file)
print(f"Update completed in {elapsed:.3f}s")
def benchmark_add_package(self):
"""Benchmark adding a new package."""
print("Benchmarking add package...")
elapsed, _ = self.run_timed_command(
["pipenv", "install", self.test_package], "add-package.txt"
)
print(f"Add package completed in {elapsed:.3f}s")
def get_pipenv_version(self) -> str:
"""Get pipenv version."""
try:
result = subprocess.run(
["pipenv", "--version"], capture_output=True, text=True, check=True
)
# Extract version from "pipenv, version X.X.X"
return result.stdout.split()[-1]
except Exception:
return "unknown"
def generate_stats(self):
"""Generate CSV stats file."""
print("Generating stats...")
version = self.get_pipenv_version()
timestamp = int(time.time())
stats_file = self.benchmark_dir / "stats.csv"
with open(stats_file, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"tool",
"version",
"timestamp",
"stat",
"elapsed time",
"system",
"user",
"cpu percent",
"max rss",
"inputs",
"outputs",
]
)
stats = [
"tooling",
"import",
"lock-cold",
"lock-warm",
"install-cold",
"install-warm",
"update-cold",
"update-warm",
"add-package",
]
for stat in stats:
timing_file = self.timings_dir / f"{stat}.txt"
if timing_file.exists():
with open(timing_file) as f:
timing_data = f.read().strip()
writer.writerow(["pipenv", version, timestamp, stat, timing_data])
print(f"Stats written to {stats_file}")
def run_full_benchmark(self):
"""Run the complete benchmark suite."""
print("=" * 60)
print("Starting pipenv benchmark suite...")
print("=" * 60)
steps = [
("Setup", "setup_requirements"),
("Tooling", "benchmark_tooling"),
("Import", "benchmark_import"),
("Lock (cold)", "lock_cold"),
("Lock (warm)", "lock_warm"),
("Install (cold)", "install_cold"),
("Install (warm)", "install_warm"),
("Update (cold)", "update_cold"),
("Update (warm)", "update_warm"),
("Add package", "benchmark_add_package"),
("Generate stats", "generate_stats"),
]
for i, (step_name, _) in enumerate(steps, 1):
print(f"\n[{i}/{len(steps)}] {step_name}")
print("-" * 40)
# Setup
print(f"\n[1/{len(steps)}] Setup")
print("-" * 40)
self.setup_requirements()
# Tooling
print(f"\n[2/{len(steps)}] Tooling")
print("-" * 40)
self.benchmark_tooling()
# Import
print(f"\n[3/{len(steps)}] Import")
print("-" * 40)
self.benchmark_import()
# Lock cold
print(f"\n[4/{len(steps)}] Lock (cold)")
print("-" * 40)
self.clean_cache()
self.clean_venv()
self.clean_lock()
self.benchmark_lock("lock-cold.txt")
# Lock warm
print(f"\n[5/{len(steps)}] Lock (warm)")
print("-" * 40)
self.clean_lock()
self.benchmark_lock("lock-warm.txt")
# Install cold
print(f"\n[6/{len(steps)}] Install (cold)")
print("-" * 40)
self.clean_cache()
self.clean_venv()
self.benchmark_install("install-cold.txt")
# Install warm
print(f"\n[7/{len(steps)}] Install (warm)")
print("-" * 40)
self.clean_venv()
self.benchmark_install("install-warm.txt")
# Update cold
print(f"\n[8/{len(steps)}] Update (cold)")
print("-" * 40)
self.clean_cache()
self.benchmark_update("update-cold.txt")
# Update warm
print(f"\n[9/{len(steps)}] Update (warm)")
print("-" * 40)
self.benchmark_update("update-warm.txt")
# Add package
print(f"\n[10/{len(steps)}] Add package")
print("-" * 40)
self.benchmark_add_package()
# Generate stats
print(f"\n[11/{len(steps)}] Generate stats")
print("-" * 40)
self.generate_stats()
print("\n" + "=" * 60)
print("Benchmark suite completed!")
print("=" * 60)
def main():
benchmark_dir = Path(__file__).parent
benchmark = PipenvBenchmark(benchmark_dir)
if len(sys.argv) > 1:
operation = sys.argv[1]
if operation == "setup":
benchmark.setup_requirements()
elif operation == "tooling":
benchmark.benchmark_tooling()
elif operation == "import":
benchmark.benchmark_import()
elif operation == "lock-cold":
benchmark.clean_cache()
benchmark.clean_venv()
benchmark.clean_lock()
benchmark.benchmark_lock("lock-cold.txt")
elif operation == "lock-warm":
benchmark.clean_lock()
benchmark.benchmark_lock("lock-warm.txt")
elif operation == "install-cold":
benchmark.clean_cache()
benchmark.clean_venv()
benchmark.benchmark_install("install-cold.txt")
elif operation == "install-warm":
benchmark.clean_venv()
benchmark.benchmark_install("install-warm.txt")
elif operation == "update-cold":
benchmark.clean_cache()
benchmark.benchmark_update("update-cold.txt")
elif operation == "update-warm":
benchmark.benchmark_update("update-warm.txt")
elif operation == "add-package":
benchmark.benchmark_add_package()
elif operation == "stats":
benchmark.generate_stats()
else:
print(f"Unknown operation: {operation}")
sys.exit(1)
else:
benchmark.run_full_benchmark()
if __name__ == "__main__":
main()
| PipenvBenchmark |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/asb.py | {
"start": 28858,
"end": 30498
} | class ____(BaseOperator):
"""
Delete the topic in the Azure Service Bus namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusTopicDeleteOperator`
:param topic_name: Name of the topic to be deleted.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Delete topic in Service Bus namespace, by connecting to Service Bus Admin client."""
if self.topic_name is None:
raise TypeError("Topic name cannot be None.")
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
with hook.get_conn() as service_mgmt_conn:
topic_properties = service_mgmt_conn.get_topic(self.topic_name)
if topic_properties and topic_properties.name == self.topic_name:
service_mgmt_conn.delete_topic(self.topic_name)
self.log.info("Topic %s deleted.", self.topic_name)
else:
self.log.info("Topic %s does not exist.", self.topic_name)
| AzureServiceBusTopicDeleteOperator |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 5757,
"end": 5847
} | class ____(ClientConnectionError):
"""Server connection errors."""
| ServerConnectionError |
python | kamyu104__LeetCode-Solutions | Python/naming-a-company.py | {
"start": 61,
"end": 567
} | class ____(object):
def distinctNames(self, ideas):
"""
:type ideas: List[str]
:rtype: int
"""
lookup = [set() for _ in xrange(26)]
for x in ideas:
lookup[ord(x[0])-ord('a')].add(x[1:])
result = 0
for i in xrange(len(lookup)):
for j in xrange(i+1, len(lookup)):
common = len(lookup[i]&lookup[j])
result += (len(lookup[i])-common)*(len(lookup[j])-common)
return result*2
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py | {
"start": 1639,
"end": 1886
} | class ____:
names = ("surname",)
__slots__ = (*names, "a")
def __init__(self, name, surname):
self.name = name
self.surname = surname # [assigning-non-slot]
self.setup()
def setup(self):
pass
| StudentG |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 148001,
"end": 148657
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"issue_id",
"repository_id",
"create_labels_if_missing",
"client_mutation_id",
)
issue_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="issueId")
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
create_labels_if_missing = sgqlc.types.Field(
Boolean, graphql_name="createLabelsIfMissing"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| TransferIssueInput |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/associationproxy.py | {
"start": 47245,
"end": 49196
} | class ____(Generic[_IT]):
getter: _GetterProtocol[_IT]
"""A function. Given an associated object, return the 'value'."""
creator: _CreatorProtocol
"""
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
"""
parent: AssociationProxyInstance[_IT]
setter: _SetterProtocol
"""A function. Given an associated object and a value, store that
value on the object.
"""
lazy_collection: _LazyCollectionProtocol[_IT]
"""A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())"""
def __init__(
self,
lazy_collection: _LazyCollectionProtocol[_IT],
creator: _CreatorProtocol,
getter: _GetterProtocol[_IT],
setter: _SetterProtocol,
parent: AssociationProxyInstance[_IT],
):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
if typing.TYPE_CHECKING:
col: Collection[_IT]
else:
col = property(lambda self: self.lazy_collection())
def __len__(self) -> int:
return len(self.col)
def __bool__(self) -> bool:
return bool(self.col)
def __getstate__(self) -> Any:
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state: Any) -> None:
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def clear(self) -> None:
raise NotImplementedError()
| _AssociationCollection |
python | pypa__warehouse | warehouse/accounts/services.py | {
"start": 36900,
"end": 38488
} | class ____:
def __init__(
self,
*,
session,
api_base="https://haveibeenpwned.com/api/v3/breachedaccount/",
api_key=None,
):
self._http = session
self._api_base = api_base
self.api_key = api_key
@classmethod
def create_service(cls, context, request):
hibp_api_key = request.registry.settings.get("hibp.api_key")
return cls(session=request.http, api_key=hibp_api_key)
def get_email_breach_count(self, email: str) -> int | None:
"""
Check if an email has been breached, return the number of breaches.
See https://haveibeenpwned.com/API/v3#BreachesForAccount
"""
# bail early if no api key is set, so we don't send failing requests
if not self.api_key:
return None
try:
resp = self._http.get(
urllib.parse.urljoin(self._api_base, email),
headers={"User-Agent": "PyPI.org", "hibp-api-key": self.api_key},
timeout=(0.25, 0.25),
)
resp.raise_for_status()
except requests.RequestException as exc:
# 404 is expected if the email has **not** been breached
if (
exc.response is not None
and exc.response.status_code == http.HTTPStatus.NOT_FOUND
):
return 0
logger.warning("Error contacting HaveIBeenPwned: %r", exc)
return -1
return len(resp.json())
@implementer(IEmailBreachedService)
| HaveIBeenPwnedEmailBreachedService |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 203,
"end": 305
} | class ____(HaystackError):
"""Raised when a field encounters an error."""
pass
| SearchFieldError |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 51418,
"end": 52361
} | class ____(test_util.TensorFlowTestCase):
def testConvertToTensorRange(self):
values = range(5)
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((5,), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testInputsNearInt64Max(self):
int64_t_max = 2**63 - 1
x = math_ops.range(0, 201, int64_t_max - 200, dtype=dtypes.int64)
self.assertAllEqual((0,), self.evaluate(x)) # just below potential overflow
x = math_ops.range(0, 202, int64_t_max - 200, dtype=dtypes.int64)
self.assertAllEqual(
(0,), self.evaluate(x)) # smallest input with potential overflow
def testInt32Overflow(self):
start = 1136033460
end = -2110457150
step = -1849827689
expected = np.arange(start, end, step)
actual = math_ops.range(start, end, step)
self.assertAllEqual(expected, self.evaluate(actual))
@test_util.run_all_in_graph_and_eager_modes
| RangeTest |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 46496,
"end": 48523
} | class ____(BuiltinFunctionT):
_id = "shift"
_inputs = [("x", (UINT256_T, INT256_T)), ("_shift_bits", IntegerT.any())]
_return_type = UINT256_T
def _try_fold(self, node):
vyper_warn("`shift()` is deprecated! Please use the << or >> operator instead.", node)
validate_call_args(node, 2)
args = [i.get_folded_value() for i in node.args]
if any(not isinstance(i, vy_ast.Int) for i in args):
raise UnfoldableNode
value, shift = [i.value for i in args]
if shift < -256 or shift > 256:
# this validation is performed to prevent the compiler from hanging
# rather than for correctness because the post-folded constant would
# have been validated anyway
raise InvalidLiteral("Shift must be between -256 and 256", node.args[1])
if shift < 0:
value = value >> -shift
else:
value = (value << shift) % (2**256)
return vy_ast.Int.from_node(node, value=value)
def fetch_call_return(self, node):
# return type is the type of the first argument
return self.infer_arg_types(node)[0]
def infer_arg_types(self, node, expected_return_typ=None):
self._validate_arg_types(node)
# return a concrete type instead of SignedIntegerAbstractType
arg_ty = get_possible_types_from_node(node.args[0])[0]
shift_ty = get_possible_types_from_node(node.args[1])[0]
return [arg_ty, shift_ty]
@process_inputs
def build_IR(self, expr, args, kwargs, context):
# "gshr" -- generalized right shift
argty = args[0].typ
GSHR = sar if argty.is_signed else shr
with args[0].cache_when_complex("to_shift") as (b1, arg), args[1].cache_when_complex(
"bits"
) as (b2, bits):
neg_bits = ["sub", 0, bits]
ret = ["if", ["slt", bits, 0], GSHR(neg_bits, arg), shl(bits, arg)]
return b1.resolve(b2.resolve(IRnode.from_list(ret, typ=argty)))
| Shift |
python | spyder-ide__spyder | spyder/plugins/switcher/api.py | {
"start": 184,
"end": 289
} | class ____:
FileSwitcherAction = 'file switcher'
SymbolFinderAction = 'symbol finder'
| SwitcherActions |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 113148,
"end": 118129
} | class ____(ASTBase):
def __init__(self, declSpecs: ASTDeclSpecs, decl: ASTDeclarator) -> None:
assert declSpecs
assert decl
self.declSpecs = declSpecs
self.decl = decl
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTType):
return NotImplemented
return self.declSpecs == other.declSpecs and self.decl == other.decl
def __hash__(self) -> int:
return hash((self.declSpecs, self.decl))
@property
def name(self) -> ASTNestedName:
return self.decl.name
@name.setter
def name(self, name: ASTNestedName) -> None:
self.decl.name = name
@property
def isPack(self) -> bool:
return self.decl.isPack
@property
def function_params(self) -> list[ASTFunctionParameter]:
return self.decl.function_params
@property
def trailingReturn(self) -> ASTType:
return self.decl.trailingReturn
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
if version == 1:
res: list[str] = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
res.extend((
symbol.get_full_nested_name().get_id(version),
self.decl.get_param_id(version),
self.decl.get_modifiers_id(version),
))
if self.declSpecs.leftSpecs.constexpr or (
self.declSpecs.rightSpecs
and self.declSpecs.rightSpecs.constexpr
):
res.append('CE')
elif objectType == 'type': # just the name
res.append(symbol.get_full_nested_name().get_id(version))
else:
raise AssertionError(objectType)
else: # only type encoding
if self.decl.is_function_type():
raise NoOldIdError
res.extend((
self.declSpecs.get_id(version),
self.decl.get_ptr_suffix_id(version),
self.decl.get_param_id(version),
))
return ''.join(res)
# other versions
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
modifiers = self.decl.get_modifiers_id(version)
res.append(symbol.get_full_nested_name().get_id(version, modifiers))
if version >= 4:
# with templates we need to mangle the return type in as well
templ = symbol.declaration.templatePrefix
if templ is not None:
type_id = self.decl.get_ptr_suffix_id(version)
if self.trailingReturn:
return_type_id = self.trailingReturn.get_id(version)
else:
return_type_id = self.declSpecs.get_id(version)
res.append(type_id)
res.append(return_type_id)
res.append(self.decl.get_param_id(version))
elif objectType == 'type': # just the name
res.append(symbol.get_full_nested_name().get_id(version))
else:
raise AssertionError(objectType)
else: # only type encoding
# the 'returnType' of a non-function type is simply just the last
# type, i.e., for 'int*' it is 'int'
return_type_id = self.declSpecs.get_id(version)
type_id = self.decl.get_type_id(version, return_type_id)
res.append(type_id)
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
res = []
decl_specs = transform(self.declSpecs)
res.append(decl_specs)
if self.decl.require_space_after_declSpecs() and len(decl_specs) > 0:
res.append(' ')
res.append(transform(self.decl))
return ''.join(res)
def get_type_declaration_prefix(self) -> str:
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if self.decl.require_space_after_declSpecs() and len(str(self.declSpecs)) > 0:
signode += addnodes.desc_sig_space()
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
if mode == 'markType':
mode = 'noneIsName'
self.decl.describe_signature(signode, mode, env, symbol)
| ASTType |
python | Netflix__metaflow | test/unit/inheritance/test_inheritance.py | {
"start": 386,
"end": 2101
} | class ____:
"""Test comprehensive linear inheritance: FlowSpec -> BaseA -> BaseB -> BaseC -> Flow"""
def test_flow_completes(self, comprehensive_linear_run):
"""Test that the flow completes successfully"""
assert comprehensive_linear_run.successful
assert comprehensive_linear_run.finished
def test_all_parameters_accessible(self, comprehensive_linear_run):
"""Test that parameters from all levels are accessible"""
end_task = comprehensive_linear_run["end"].task
# From BaseA
assert end_task["result_alpha"].data == 10
assert end_task["result_beta"].data == 5
# From BaseC
assert end_task["result_gamma"].data == 2.5
# From final class
assert end_task["result_delta"].data == "final"
def test_all_configs_accessible(self, comprehensive_linear_run):
"""Test that configs from all levels are accessible"""
end_task = comprehensive_linear_run["end"].task
# From BaseB
config_b = end_task["result_config_b"].data
assert config_b["multiplier"] == 3
assert config_b["offset"] == 100
# From BaseC
config_c = end_task["result_config_c"].data
assert config_c["mode"] == "production"
assert config_c["debug"] is False
def test_computation_with_configs(self, comprehensive_linear_run):
"""Test computation using inherited parameters and configs"""
end_task = comprehensive_linear_run["end"].task
# start_value = alpha + beta = 10 + 5 = 15
# processed_value = start_value * multiplier + offset = 15 * 3 + 100 = 145
assert end_task["result_final"].data == 145
| TestComprehensiveLinear |
python | apache__airflow | airflow-ctl/src/airflowctl/api/operations.py | {
"start": 14202,
"end": 17079
} | class ____(BaseOperations):
"""Connection operations."""
def get(self, conn_id: str) -> ConnectionResponse | ServerResponseError:
"""Get a connection from the API server."""
try:
self.response = self.client.get(f"connections/{conn_id}")
return ConnectionResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def list(self) -> ConnectionCollectionResponse | ServerResponseError:
"""List all connections from the API server."""
return super().execute_list(path="connections", data_model=ConnectionCollectionResponse)
def create(
self,
connection: ConnectionBody,
) -> ConnectionResponse | ServerResponseError:
"""Create a connection."""
try:
self.response = self.client.post("connections", json=connection.model_dump(mode="json"))
return ConnectionResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def bulk(self, connections: BulkBodyConnectionBody) -> BulkResponse | ServerResponseError:
"""CRUD multiple connections."""
try:
self.response = self.client.patch("connections", json=connections.model_dump(mode="json"))
return BulkResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def create_defaults(self) -> None | ServerResponseError:
"""Create default connections."""
try:
self.response = self.client.post("connections/defaults")
return None
except ServerResponseError as e:
raise e
def delete(self, conn_id: str) -> str | ServerResponseError:
"""Delete a connection."""
try:
self.client.delete(f"connections/{conn_id}")
return conn_id
except ServerResponseError as e:
raise e
def update(
self,
connection: ConnectionBody,
) -> ConnectionResponse | ServerResponseError:
"""Update a connection."""
try:
self.response = self.client.patch(
f"connections/{connection.connection_id}", json=connection.model_dump(mode="json")
)
return ConnectionResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def test(
self,
connection: ConnectionBody,
) -> ConnectionTestResponse | ServerResponseError:
"""Test a connection."""
try:
self.response = self.client.post("connections/test", json=connection.model_dump(mode="json"))
return ConnectionTestResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
| ConnectionsOperations |
python | plotly__plotly.py | plotly/graph_objs/scatter/marker/colorbar/_title.py | {
"start": 233,
"end": 4021
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter.marker.colorbar"
_path_str = "scatter.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter.marker
.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | google__pytype | pytype/tests/test_attr2.py | {
"start": 27838,
"end": 32364
} | class ____(test_base.BaseTest):
"""Tests for @attr.s in pyi files."""
def test_basic(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
@attr.s
class A:
x: int
y: str
""",
)
self.Check(
"""
import foo
x = foo.A(10, 'hello')
""",
pythonpath=[d.path],
)
def test_docstring(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
from typing import Union
@attr.s
class A:
__doc__: str # should be filtered out
x: int
y: str
""",
)
self.Check(
"""
import foo
x = foo.A(10, 'hello')
""",
pythonpath=[d.path],
)
def test_type_mismatch(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
@attr.s
class A:
x: int
y: str
""",
)
self.CheckWithErrors(
"""
import foo
x = foo.A(10, 20) # wrong-arg-types
""",
pythonpath=[d.path],
)
def test_subclass(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
@attr.s
class A:
x: bool
y: int
""",
)
ty = self.Infer(
"""
import attr
import foo
@attr.s(auto_attribs=True)
class Foo(foo.A):
z: str = "hello"
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
import foo
@attr.s
class Foo(foo.A):
z: str = ...
def __init__(self, x: bool, y: int, z: str = ...) -> None: ...
""",
)
def test_subclass_from_same_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
@attr.s
class A:
x: bool
y: int
@attr.s
class B(A):
z: str
""",
)
ty = self.Infer(
"""
import attr
import foo
@attr.s(auto_attribs=True)
class Foo(foo.B):
a: str = "hello"
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
import foo
@attr.s
class Foo(foo.B):
a: str = ...
def __init__(self, x: bool, y: int, z: str, a: str = ...) -> None: ...
""",
)
def test_subclass_from_different_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"bar.pyi",
"""
import attr
@attr.s
class A:
x: bool
y: int
""",
)
d.create_file(
"foo.pyi",
"""
import attr
import bar
@attr.s
class B(bar.A):
z: str
""",
)
ty = self.Infer(
"""
import attr
import foo
@attr.attrs(auto_attribs=True)
class Foo(foo.B):
a: str = "hello"
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
import foo
@attr.s
class Foo(foo.B):
a: str = ...
def __init__(self, x: bool, y: int, z: str, a: str = ...) -> None: ...
""",
)
def test_subclass_with_kwonly(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import attr
@attr.s
class A:
x: bool
y: int
def __init__(self, x: bool, *, y: int = ...): ...
""",
)
ty = self.Infer(
"""
import attr
import foo
@attr.s(auto_attribs=True)
class Foo(foo.A):
z: str
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import attr
from typing import Union
import foo
@attr.s
class Foo(foo.A):
z: str
def __init__(self, x: bool, z: str, *, y: int = ...) -> None: ...
""",
)
| TestPyiAttrs |
python | huggingface__transformers | src/transformers/models/qwen3_moe/configuration_qwen3_moe.py | {
"start": 897,
"end": 10213
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3MoeModel`]. It is used to instantiate a
Qwen3MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [Qwen/Qwen3-15B-A2B](https://huggingface.co/Qwen/Qwen3-15B-A2B).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Qwen3MoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen3MoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
use_sliding_window (`bool`, *optional*, defaults to `False`):
Whether to use sliding window attention.
sliding_window (`int`, *optional*, defaults to 4096):
Sliding window attention (SWA) window size. If not specified, will default to `4096`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
decoder_sparse_step (`int`, *optional*, defaults to 1):
The frequency of the MoE layer.
moe_intermediate_size (`int`, *optional*, defaults to 768):
Intermediate size of the routed expert.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 128):
Number of routed experts.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
mlp_only_layers (`list[int]`, *optional*, defaults to `[]`):
Indicate which layers use Qwen3MoeMLP rather than Qwen3MoeSparseMoeBlock
The list contains layer index, from 0 to num_layers-1 if we have num_layers layers
If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity.
```python
>>> from transformers import Qwen3MoeModel, Qwen3MoeConfig
>>> # Initializing a Qwen3MoE style configuration
>>> configuration = Qwen3MoeConfig()
>>> # Initializing a model from the Qwen3-15B-A2B" style configuration
>>> model = Qwen3MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_moe"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Qwen3Moe`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151936,
hidden_size: Optional[int] = 2048,
intermediate_size: Optional[int] = 6144,
num_hidden_layers: Optional[int] = 24,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 4,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
use_sliding_window: Optional[bool] = False,
sliding_window: Optional[int] = 4096,
attention_dropout: Optional[float] = 0.0,
decoder_sparse_step: Optional[int] = 1,
moe_intermediate_size: Optional[int] = 768,
num_experts_per_tok: Optional[int] = 8,
num_experts: Optional[int] = 128,
norm_topk_prob: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.001,
mlp_only_layers: Optional[bool] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window if use_sliding_window else None
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
# MoE arguments
self.decoder_sparse_step = decoder_sparse_step
self.moe_intermediate_size = moe_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.norm_topk_prob = norm_topk_prob
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers
super().__init__(
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Qwen3MoeConfig"]
| Qwen3MoeConfig |
python | numpy__numpy | numpy/lib/tests/test_recfunctions.py | {
"start": 43278,
"end": 43973
} | class ____:
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
from datetime import date
obj = date(2000, 1, 1)
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| TestAppendFieldsObj |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 27031,
"end": 27142
} | class ____(BaseBuilder):
__tag__ = "w:oomChild"
__nspfxs__ = ("w",)
__attrs__ = ()
| CT_OomChildBuilder |
python | sqlalchemy__sqlalchemy | test/engine/test_pool.py | {
"start": 26027,
"end": 28960
} | class ____(PoolTestBase):
"""test for :ticket:`2964`, where the pool would not mutex the
initialization of the dialect.
Unfortunately, as discussed in :ticket:`6337`, this test suite did not
ensure that the ``Engine`` itself actually uses the "first_connect" event,
so when :ticket:`5497` came along, the "first_connect" event was no longer
used and no test detected the re-introduction of the exact same race
condition, which was now worse as the un-initialized dialect would now
pollute the SQL cache causing the application to not work at all.
A new suite has therefore been added in test/engine/test_execute.py->
OnConnectTest::test_initialize_connect_race to ensure that the engine
in total synchronizes the "first_connect" process, which now works
using a new events feature _exec_w_sync_on_first_run.
"""
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
barrier.wait()
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
barrier = threading.Barrier(5)
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
| PoolFirstConnectSyncTest |
python | wandb__wandb | tests/unit_tests/conftest.py | {
"start": 382,
"end": 2037
} | class ____:
def __init__(self, queue: "Queue") -> None:
self.records = []
while not queue.empty():
self.records.append(queue.get())
def __len__(self) -> int:
return len(self.records)
def __getitem__(self, name: str) -> Generator:
for record in self.records:
yield from self.resolve_item(record, name)
@staticmethod
def resolve_item(obj, attr: str, sep: str = ".") -> List:
for name in attr.split(sep):
if not obj.HasField(name):
return []
obj = getattr(obj, name)
return [obj]
@staticmethod
def dictify(obj, key: str = "key", value: str = "value_json") -> Dict:
return {getattr(item, key): getattr(item, value) for item in obj}
@property
def config(self) -> List:
return [self.dictify(_c.update) for _c in self["config"]]
@property
def history(self) -> List:
return [self.dictify(_h.item) for _h in self["history"]]
@property
def partial_history(self) -> List:
return [self.dictify(_h.item) for _h in self["request.partial_history"]]
@property
def preempting(self) -> List:
return list(self["preempting"])
@property
def summary(self) -> List:
return list(self["summary"])
@property
def files(self) -> List:
return list(self["files"])
@property
def metric(self):
return list(self["metric"])
@pytest.fixture
def parse_records() -> Generator[Callable, None, None]:
def records_parser_fn(q: "Queue") -> RecordsUtil:
return RecordsUtil(q)
yield records_parser_fn
| RecordsUtil |
python | streamlit__streamlit | lib/tests/streamlit/temporary_directory_test.py | {
"start": 777,
"end": 1147
} | class ____(unittest.TestCase):
"""Test temp directory context manager."""
@tempdir()
def test_temp_directory(self, dir):
"""Test that the directory only exists inside the context."""
with TemporaryDirectory(dir=dir.path) as temp_fname:
assert os.path.exists(temp_fname)
assert not os.path.exists(temp_fname)
| TemporaryFileTest |
python | openai__openai-python | src/openai/_exceptions.py | {
"start": 4616,
"end": 4836
} | class ____(OpenAIError):
def __init__(self) -> None:
super().__init__(
f"Could not parse response content as the request was rejected by the content filter",
)
| ContentFilterFinishReasonError |
python | coleifer__peewee | tests/models.py | {
"start": 177168,
"end": 177256
} | class ____(TestModel):
name = CharField()
price = IntegerField(default=0)
| C_Product |
python | walkccc__LeetCode | solutions/3489. Zero Array Transformation IV/3489.py | {
"start": 0,
"end": 483
} | class ____:
def minZeroArray(self, nums: list[int], queries: list[list[int]]) -> int:
if all(num == 0 for num in nums):
return 0
n = len(nums)
subsetSums = [{0} for _ in range(n)]
for k, (l, r, val) in enumerate(queries):
for i in range(l, r + 1):
newSums = {subsetSum + val for subsetSum in subsetSums[i]}
subsetSums[i].update(newSums)
if all(nums[i] in subsetSums[i] for i in range(n)):
return k + 1
return -1
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 493462,
"end": 494196
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for CheckSuite."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("CheckSuiteEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("CheckSuite"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| CheckSuiteConnection |
python | pypa__warehouse | tests/unit/cache/test_http.py | {
"start": 849,
"end": 3376
} | class ____:
def test_cache_public(self):
response_obj = pretend.stub(
cache_control=pretend.stub(public=None, max_age=None)
)
request_obj = pretend.stub(registry=pretend.stub(settings={}))
context_obj = pretend.stub()
@cache_control(12)
def view(context, request):
assert context is context_obj
assert request is request_obj
return response_obj
response = view(context_obj, request_obj)
assert response is response_obj
assert response.cache_control.public
assert response.cache_control.max_age == 12
def test_cache_private(self):
response_obj = pretend.stub(
cache_control=pretend.stub(private=None, max_age=None)
)
request_obj = pretend.stub(registry=pretend.stub(settings={}))
context_obj = pretend.stub()
@cache_control(12, public=False)
def view(context, request):
assert context is context_obj
assert request is request_obj
return response_obj
response = view(context_obj, request_obj)
assert response is response_obj
assert response.cache_control.private
assert response.cache_control.max_age == 12
def test_no_cache(self):
response_obj = pretend.stub(
cache_control=pretend.stub(
no_cache=None, no_store=None, must_revalidate=None
)
)
request_obj = pretend.stub(registry=pretend.stub(settings={}))
context_obj = pretend.stub()
@cache_control(False)
def view(context, request):
assert context is context_obj
assert request is request_obj
return response_obj
response = view(context_obj, request_obj)
assert response is response_obj
assert response.cache_control.no_cache
assert response.cache_control.no_store
assert response.cache_control.must_revalidate
def test_bypass_cache(self):
response_obj = pretend.stub()
request_obj = pretend.stub(
registry=pretend.stub(settings={"pyramid.prevent_http_cache": True})
)
context_obj = pretend.stub()
@cache_control(12)
def view(context, request):
assert context is context_obj
assert request is request_obj
return response_obj
response = view(context_obj, request_obj)
assert response is response_obj
| TestCacheControl |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadCall4.py | {
"start": 595,
"end": 3424
} | class ____(Enum):
x00 = 0
x01 = 0
x02 = 0
x03 = 0
x04 = 0
x05 = 0
x06 = 0
x07 = 0
x08 = 0
x09 = 0
x10 = 0
x11 = 0
x12 = 0
x13 = 0
x14 = 0
x15 = 0
x16 = 0
x17 = 0
x18 = 0
x19 = 0
x20 = 0
x21 = 0
x22 = 0
x23 = 0
x24 = 0
x25 = 0
x26 = 0
x27 = 0
x28 = 0
x29 = 0
x30 = 0
x31 = 0
x32 = 0
x33 = 0
x34 = 0
x35 = 0
x36 = 0
x37 = 0
x38 = 0
x39 = 0
x40 = 0
x41 = 0
x42 = 0
x43 = 0
x44 = 0
x45 = 0
x46 = 0
x47 = 0
x48 = 0
x49 = 0
x50 = 0
x51 = 0
x52 = 0
x53 = 0
x54 = 0
x55 = 0
x56 = 0
x57 = 0
x58 = 0
x59 = 0
x60 = 0
x61 = 0
x62 = 0
x63 = 0
x64 = 0
x65 = 0
x66 = 0
x67 = 0
x68 = 0
x69 = 0
LargeUnion = (
Literal[
"a",
"b",
"c",
"d",
"e",
"f",
"g",
1,
2,
3,
4,
5,
6,
7,
8,
]
| LargeEnum
)
@overload
def overloaded2(a: LargeUnion, b: Literal[2]) -> str: ...
@overload
def overloaded2(a: LargeUnion, b: Literal[3]) -> str: ...
@overload
def overloaded2(a: LargeUnion, b: Literal[4]) -> float: ...
@overload
def overloaded2(a: LargeUnion, b: Literal[9]) -> float: ...
@overload
def overloaded2(a: LargeUnion, b: Literal[10]) -> float: ...
def overloaded2(a: LargeUnion, b: LargeUnion | Literal[9, 10]) -> str | float: ...
def func2(a: LargeUnion, b: Literal[2, 3, 4], c: Literal[2, 3, 4, 9, 10] | LargeEnum):
v1 = overloaded2("a", 2)
reveal_type(v1, expected_text="str")
v2 = overloaded2(a, b)
reveal_type(v2, expected_text="str | float")
# This should generate an error because the expansion of union types
# will exceed the max number of expansions (256).
v3 = overloaded2(a, c)
reveal_type(v2, expected_text="str | float")
_T2 = TypeVar("_T2", str, bytes)
@overload
def overloaded3(x: str) -> str: ...
@overload
def overloaded3(x: bytes) -> bytes: ...
def overloaded3(x: str | bytes) -> str | bytes: ...
def func3(y: _T2):
overloaded3(y)
_T3 = TypeVar("_T3")
def func5(a: _T3) -> _T3:
return a
@overload
def overloaded4(b: str) -> str: ...
@overload
def overloaded4(b: int) -> int: ...
def overloaded4(b: str | int) -> str | int: ...
def func6(x: str | int) -> None:
y: str | int = overloaded4(func5(x))
@overload
def overloaded5(pattern: AnyStr) -> AnyStr: ...
@overload
def overloaded5(pattern: int) -> int: ...
def overloaded5(pattern: AnyStr | int) -> AnyStr | int:
return 0
def func7(a: str | bytes) -> str | bytes:
return overloaded5(a)
def func8(a: AnyStr | str | bytes) -> str | bytes:
return overloaded5(a)
| LargeEnum |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 12048,
"end": 12198
} | class ____(SubclassSelectorAbstractModel):
concrete_field = models.CharField(max_length=30, default="test_cf")
| SubclassSelectorAbstractConcreteModel |
python | scikit-learn__scikit-learn | sklearn/linear_model/_omp.py | {
"start": 21893,
"end": 30243
} | class ____(MultiOutputMixin, RegressorMixin, LinearModel):
"""Orthogonal Matching Pursuit model (OMP).
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
n_nonzero_coefs : int, default=None
Desired number of non-zero entries in the solution. Ignored if `tol` is set.
When `None` and `tol` is also `None`, this value is either set to 10% of
`n_features` or 1, whichever is greater.
tol : float, default=None
Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
precompute : 'auto' or bool, default='auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when :term:`n_targets` or
:term:`n_samples` is very large.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formula).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
n_nonzero_coefs_ : int or None
The number of non-zero coefficients in the solution or `None` when `tol` is
set. If `n_nonzero_coefs` is None and `tol` is None this value is either set
to 10% of `n_features` or 1, whichever is greater.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems.
orthogonal_mp_gram : Solves n_targets Orthogonal Matching Pursuit
problems using only the Gram matrix X.T * X and the product X.T * y.
lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm.
Lars : Least Angle Regression model a.k.a. LAR.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
sklearn.decomposition.sparse_encode : Generic sparse coding.
Each column of the result is the solution to a Lasso problem.
OrthogonalMatchingPursuitCV : Cross-validated
Orthogonal Matching Pursuit model (OMP).
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
Examples
--------
>>> from sklearn.linear_model import OrthogonalMatchingPursuit
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = OrthogonalMatchingPursuit().fit(X, y)
>>> reg.score(X, y)
0.9991
>>> reg.predict(X[:1,])
array([-78.3854])
"""
_parameter_constraints: dict = {
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left"), None],
"fit_intercept": ["boolean"],
"precompute": [StrOptions({"auto"}), "boolean"],
}
def __init__(
self,
*,
n_nonzero_coefs=None,
tol=None,
fit_intercept=True,
precompute="auto",
):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.precompute = precompute
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = validate_data(
self, X, y, multi_output=True, y_numeric=True, dtype=FLOAT_DTYPES
)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(
X,
y,
None,
self.precompute,
self.fit_intercept,
copy=True,
check_gram=False,
)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
elif self.tol is not None:
self.n_nonzero_coefs_ = None
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X,
y,
n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol,
precompute=False,
copy_X=True,
return_n_iter=True,
)
else:
norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram,
Xy=Xy,
n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol,
norms_squared=norms_sq,
copy_Gram=True,
copy_Xy=True,
return_n_iter=True,
)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(
X_train,
y_train,
X_test,
y_test,
copy=True,
fit_intercept=True,
max_iter=100,
):
"""Compute the residues on left-out data for a full LARS path.
Parameters
----------
X_train : ndarray of shape (n_samples, n_features)
The data to fit the LARS on.
y_train : ndarray of shape (n_samples)
The target variable to fit LARS on.
X_test : ndarray of shape (n_samples, n_features)
The data to compute the residues on.
y_test : ndarray of shape (n_samples)
The target variable to compute the residues on.
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
max_iter : int, default=100
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : ndarray of shape (n_samples, max_features)
Residues of the prediction on the test data.
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
coefs = orthogonal_mp(
X_train,
y_train,
n_nonzero_coefs=max_iter,
tol=None,
precompute=False,
copy_X=False,
return_path=True,
)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
| OrthogonalMatchingPursuit |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/metadata/pipeline.py | {
"start": 1951,
"end": 3820
} | class ____(SimpleDockerStep):
# When the metadata service exits with this code, it means the metadata is valid but the upload was skipped because the metadata is already uploaded
skipped_exit_code = 5
def __init__(
self,
context: ConnectorContext,
metadata_bucket_name: str,
metadata_service_gcs_credentials: Secret,
docker_hub_username: Secret,
docker_hub_password: Secret,
pre_release: bool = False,
pre_release_tag: Optional[str] = None,
) -> None:
title = f"Upload metadata for {context.connector.technical_name} v{context.connector.version}"
command_to_run = [
"metadata_service",
"upload",
str(context.connector.metadata_file_path),
DOCS_DIRECTORY_ROOT_PATH,
metadata_bucket_name,
]
if pre_release and pre_release_tag:
command_to_run += ["--prerelease", pre_release_tag]
super().__init__(
title=title,
context=context,
paths_to_mount=[
MountPath(GIT_DIRECTORY_ROOT_PATH),
MountPath(DOCS_DIRECTORY_ROOT_PATH),
MountPath(context.connector.code_directory),
],
internal_tools=[
MountPath(INTERNAL_TOOL_PATHS.METADATA_SERVICE.value),
],
secret_env_variables={
"DOCKER_HUB_USERNAME": docker_hub_username,
"DOCKER_HUB_PASSWORD": docker_hub_password,
"GCS_CREDENTIALS": metadata_service_gcs_credentials,
},
env_variables={
# The cache buster ensures we always run the upload command (in case of remote bucket change)
"CACHEBUSTER": str(uuid.uuid4()),
},
command=command_to_run,
)
| MetadataUpload |
python | modin-project__modin | modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/virtual_partition.py | {
"start": 8937,
"end": 9104
} | class ____(PandasOnUnidistDataframeVirtualPartition):
axis = 0
@_inherit_docstrings(PandasOnUnidistDataframeVirtualPartition)
| PandasOnUnidistDataframeColumnPartition |
python | ray-project__ray | python/ray/data/collate_fn.py | {
"start": 4684,
"end": 5354
} | class ____(CollateFn["pyarrow.Table"]):
"""Collate function that takes pyarrow.Table as the input batch type.
Arrow tables with chunked arrays can be efficiently transferred to GPUs without
combining the chunks with the `arrow_batch_to_tensors` utility function.
See `DefaultCollateFn` for example.
"""
def __call__(self, batch: "pyarrow.Table") -> "CollatedData":
"""Convert a batch of pyarrow.Table to collated format.
Args:
batch: The input pyarrow.Table batch to collate.
Returns:
The collated data in the format expected by the model.
"""
...
@DeveloperAPI
| ArrowBatchCollateFn |
python | django__django | tests/prefetch_related/models.py | {
"start": 2947,
"end": 3088
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().prefetch_related("qualifications")
| TeacherManager |
python | ipython__ipython | docs/sphinxext/apigen.py | {
"start": 974,
"end": 2622
} | class ____(ast.NodeVisitor):
"""Scan a module for top-level functions and classes.
Skips objects with an @undoc decorator, or a name starting with '_'.
"""
def __init__(self):
ast.NodeVisitor.__init__(self)
self.classes = []
self.classes_seen = set()
self.functions = []
@staticmethod
def has_undoc_decorator(node):
return any(isinstance(d, ast.Name) and d.id == 'undoc' \
for d in node.decorator_list)
def visit_If(self, node):
if isinstance(node.test, ast.Compare) \
and isinstance(node.test.left, ast.Name) \
and node.test.left.id == '__name__':
return # Ignore classes defined in "if __name__ == '__main__':"
self.generic_visit(node)
def visit_FunctionDef(self, node):
if not (node.name.startswith('_') or self.has_undoc_decorator(node)) \
and node.name not in self.functions:
self.functions.append(node.name)
def visit_ClassDef(self, node):
if (
not (node.name.startswith("_") or self.has_undoc_decorator(node))
and node.name not in self.classes_seen
):
cls = Obj(name=node.name, sphinx_options={})
cls.has_init = any(
isinstance(n, ast.FunctionDef) and n.name == "__init__"
for n in node.body
)
self.classes.append(cls)
self.classes_seen.add(node.name)
def scan(self, mod):
self.visit(mod)
return self.functions, self.classes
# Functions and classes
| FuncClsScanner |
python | huggingface__transformers | src/transformers/models/swin/modeling_swin.py | {
"start": 36111,
"end": 39852
} | class ____(SwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
r"""
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether or not to create and apply mask tokens in the embedding layer.
"""
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = SwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = SwinEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, SwinModelOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output, input_dimensions = self.embeddings(
pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return SwinModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).
<Tip>
Note that we provide a script to pre-train this model on custom data in our [examples
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
</Tip>
"""
)
| SwinModel |
python | huggingface__transformers | tests/models/git/test_modeling_git.py | {
"start": 4403,
"end": 7090
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as GIT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (GitVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = GitVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=GitVisionConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="GIT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/git-base"
model = GitVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| GitVisionModelTest |
python | doocs__leetcode | solution/2200-2299/2217.Find Palindrome With Fixed Length/Solution.py | {
"start": 0,
"end": 441
} | class ____:
def kthPalindrome(self, queries: List[int], intLength: int) -> List[int]:
l = (intLength + 1) >> 1
start, end = 10 ** (l - 1), 10**l - 1
ans = []
for q in queries:
v = start + q - 1
if v > end:
ans.append(-1)
continue
s = str(v)
s += s[::-1][intLength % 2 :]
ans.append(int(s))
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride2.py | {
"start": 788,
"end": 1919
} | class ____(Base1):
def f1(self, arg0: int = 0, *, kwarg0: int, kwarg1: int = 0) -> None: ...
# This should generate an error because of a positional parameter mismatch.
def f2(self, arg0: int, *, kwarg0: int, kwarg1: int = 0) -> None: ...
# This should generate an error because of a missing kwarg1.
def f3(self, arg0: int = 0, *, kwarg0: int, kwarg1: int) -> None: ...
# This should generate an error because kwarg0 is the wrong type.
def f4(self, arg0: int = 0, *kwarg0: str) -> None: ...
def g1(self, xxx: int, /, b: str, *, kwarg0: int) -> None: ...
def g2(self, __a: int, b: str, *, kwarg0: int) -> None: ...
# This should generate an error because of a name mismatch between b and c.
def g3(self, __a: int, c: str, *, kwarg0: int) -> None: ...
# This should generate an error because of a type mismatch for b.
def g4(self, __a: int, b: int, *, kwarg0: int) -> None: ...
def g5(self, __a: int, b: str = "hi", *, kwarg0: int) -> None: ...
def g6(self, __a: int, b: str, c: str = "hi", *, kwarg0: int) -> None: ...
P = ParamSpec("P")
R = TypeVar("R")
| Derived1 |
python | allegroai__clearml | clearml/backend_interface/task/development/worker.py | {
"start": 380,
"end": 6483
} | class ____(object):
property_abort_callback_completed = "_abort_callback_completed"
property_abort_callback_timeout = "_abort_callback_timeout"
property_abort_poll_freq = "_abort_poll_freq"
prefix = attr.ib(type=str, default="MANUAL:")
report_stdout = deferred_config("development.worker.log_stdout", True)
report_period = deferred_config(
"development.worker.report_period_sec",
30.0,
transform=lambda x: float(max(x, 1.0)),
)
ping_period = deferred_config(
"development.worker.ping_period_sec",
30.0,
transform=lambda x: float(max(x, 1.0)),
)
def __init__(self) -> None:
self._dev_stop_signal = None
self._thread = None
self._exit_event = SafeEvent()
self._task = None
self._support_ping = False
self._poll_freq = None
self._abort_cb = None
self._abort_cb_timeout = None
self._cb_completed = None
def ping(self, timestamp: Optional[Any] = None) -> bool:
try:
if self._task:
self._task.send(tasks.PingRequest(self._task.id))
except Exception: # noqa
return False
return True
def register(self, task: Any, stop_signal_support: Optional[bool] = None) -> bool:
if self._thread:
return True
if (stop_signal_support is None and TaskStopSignal.enabled) or stop_signal_support is True:
self._dev_stop_signal = TaskStopSignal(task=task)
self._support_ping = hasattr(tasks, "PingRequest")
# if there is nothing to monitor, leave
if not self._support_ping and not self._dev_stop_signal:
return
self._task = task
self._exit_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
return True
def register_abort_callback(
self,
callback_function: Callable,
execution_timeout: Union[float, int],
poll_freq: Union[float, int, None],
) -> None:
if not self._task:
return
self._poll_freq = float(poll_freq) if poll_freq else None
self._abort_cb = callback_function
self._abort_cb_timeout = float(execution_timeout)
if not callback_function:
# noinspection PyProtectedMember
self._task._set_runtime_properties({DevWorker.property_abort_callback_timeout: float(-1)})
return
# noinspection PyProtectedMember
self._task._set_runtime_properties(
{
self.property_abort_callback_timeout: float(execution_timeout),
self.property_abort_poll_freq: float(poll_freq),
self.property_abort_callback_completed: "",
}
)
def _inner_abort_cb_wrapper(self) -> None:
# store the task object because we might nullify it
task = self._task
# call the user abort callback
try:
if self._abort_cb:
self._abort_cb()
self._cb_completed = True
except SystemError:
# we will get here if we killed the thread externally,
# we should not try to mark as completed, just leave the thread
return
except BaseException as ex: # noqa
if task and task.log:
task.log.warning("### TASK STOPPING - USER ABORTED - CALLBACK EXCEPTION: {} ###".format(ex))
# set runtime property, abort completed for the agent to know we are done
if task:
# noinspection PyProtectedMember
task._set_runtime_properties({self.property_abort_callback_completed: 1})
def _launch_abort_cb(self) -> None:
timeout = self._abort_cb_timeout or 300.0
if self._task and self._task.log:
self._task.log.warning(
"### TASK STOPPING - USER ABORTED - LAUNCHING CALLBACK (timeout {} sec) ###".format(timeout)
)
tic = time()
timed_out = False
try:
callback_thread = Thread(target=self._inner_abort_cb_wrapper)
callback_thread.daemon = True
callback_thread.start()
callback_thread.join(timeout=timeout)
if callback_thread.is_alive():
kill_thread(callback_thread, wait=False)
timed_out = True
except: # noqa
# something went wrong no just leave the process
pass
if self._task and self._task.log:
self._task.log.warning(
"### TASK STOPPING - USER ABORTED - CALLBACK {} ({:.2f} sec) ###".format(
"TIMED OUT" if timed_out else ("COMPLETED" if self._cb_completed else "FAILED"),
time() - tic,
)
)
def _daemon(self) -> None:
last_ping = time()
while self._task is not None:
try:
wait_timeout = min(float(self.ping_period), float(self.report_period))
if self._poll_freq:
wait_timeout = min(self._poll_freq, wait_timeout)
if self._exit_event.wait(wait_timeout):
return
# send ping request
if self._support_ping and (time() - last_ping) >= float(self.ping_period):
self.ping()
last_ping = time()
if self._dev_stop_signal:
stop_reason = self._dev_stop_signal.test()
if stop_reason and self._task:
# call abort callback
if self._abort_cb:
self._launch_abort_cb()
# noinspection PyProtectedMember
self._task._dev_mode_stop_task(stop_reason)
except Exception: # noqa
pass
def unregister(self) -> bool:
self._dev_stop_signal = None
self._task = None
self._thread = None
self._exit_event.set()
return True
| DevWorker |
python | pypa__warehouse | tests/unit/utils/test_sns.py | {
"start": 2342,
"end": 13207
} | class ____:
@pytest.mark.parametrize(
("topics", "data", "error"),
[
([], {}, "Unknown SignatureVersion"),
([], {"SignatureVersion": "1"}, "Unknown SignatureVersion"),
([], {"SignatureVersion": "3"}, "Unknown SignatureVersion"),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "http://sns.us-west-2.amazonaws.com/cert.pem",
},
"Invalid scheme for SigningCertURL",
),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.attacker.com/cert.pem",
},
"Invalid location for SigningCertURL",
),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": "SNYwQnC0BxjSo2E4aZFRiA==",
"Type": "Who Knows?",
},
"Invalid Type",
),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": "SNYwQnC0BxjSo2E4aZFRiA==",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "This is My Topic",
},
"Invalid Signature",
),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "Wat?",
"TopicArn": "This is My Topic",
},
"Unknown Timestamp format",
),
(
[],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
(
datetime.datetime.now(datetime.UTC)
- datetime.timedelta(days=1)
).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
),
"TopicArn": "This is My Topic",
},
"Message has expired",
),
(
["The topic I expected"],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Signature": VALID_SIGNATURE,
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "This topic I got but didn't expect",
},
"Invalid TopicArn",
),
],
)
def test_invalid(self, sns_certificate, sns_privatekey, topics, data, error):
response = pretend.stub(raise_for_status=lambda: None, content=sns_certificate)
session = pretend.stub(get=lambda url: response)
verifier = MessageVerifier(topics=topics, session=session)
if data.get("Signature") is VALID_SIGNATURE:
private_key = load_pem_private_key(sns_privatekey, password=None)
signature_bytes = private_key.sign(
verifier._get_data_to_sign(data),
PKCS1v15(),
hashes.SHA256(),
)
data["Signature"] = base64.b64encode(signature_bytes)
with pytest.raises(InvalidMessageError, match=error):
verifier.verify(data)
@pytest.mark.parametrize(
("topics", "data"),
[
(
["valid topic"],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "valid topic",
},
),
(
["valid topic", "another valid topic"],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "another valid topic",
},
),
(
["valid topic"],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "Notification",
"Subject": "This is a subject",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "valid topic",
},
),
(
["valid topic"],
{
"SignatureVersion": "2",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/cert.pem",
"Type": "SubscriptionConfirmation",
"Message": "This is My Message",
"MessageId": "1",
"SubscribeURL": "https://example.com/subscribe",
"Token": "1234",
"Timestamp": (
datetime.datetime.now(datetime.UTC).strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
),
"TopicArn": "valid topic",
},
),
],
)
def test_valid(self, sns_certificate, sns_privatekey, topics, data):
response = pretend.stub(raise_for_status=lambda: None, content=sns_certificate)
session = pretend.stub(get=lambda url: response)
verifier = MessageVerifier(topics=topics, session=session)
private_key = load_pem_private_key(sns_privatekey, password=None)
signature_bytes = private_key.sign(
verifier._get_data_to_sign(data),
PKCS1v15(),
hashes.SHA256(),
)
data["Signature"] = base64.b64encode(signature_bytes)
verifier.verify(data)
@pytest.mark.parametrize(
("data", "expected"),
[
(
{
"Type": "SubscriptionConfirmation",
"Message": "This is My Message",
"MessageId": "1",
"SubscribeURL": "https://example.com/subscribe",
"Token": "1234",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
SubscribeURL
https://example.com/subscribe
Timestamp
2018-04-08T17:01:40.114582Z
Token
1234
TopicArn
valid topic
Type
SubscriptionConfirmation
""",
),
(
{
"Type": "Notification",
"Subject": "This is a subject",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
Subject
This is a subject
Timestamp
2018-04-08T17:01:40.114582Z
TopicArn
valid topic
Type
Notification
""",
),
(
{
"Type": "Notification",
"Message": "This is My Message",
"MessageId": "1",
"Timestamp": "2018-04-08T17:01:40.114582Z",
"TopicArn": "valid topic",
},
"""
Message
This is My Message
MessageId
1
Timestamp
2018-04-08T17:01:40.114582Z
TopicArn
valid topic
Type
Notification
""",
),
],
)
def test_signature_data(self, data, expected):
# We have this method tested specifically, because the above tests
# don't actually test if this method is functioning correctly,
# since it uses it for the input and the expected.
verifier = MessageVerifier(topics=[], session=pretend.stub())
expected = textwrap.dedent(expected).lstrip().encode("utf8")
assert verifier._get_data_to_sign(data) == expected
| TestMessageVerifier |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType7.py | {
"start": 714,
"end": 1001
} | class ____(Generic[_T2, _T2A]):
def __init__(self, a: _T2, b: _T2A):
self._a1: dict[str, _T2A] = {"a": b}
self._a2: dict[str, _T2] = {"a": a}
self._b: tuple[_T2, ...] = (a, a, a)
self._c: tuple[_T2, _T2] = (a, a)
self._d: list[_T2] = [a]
| Class2A |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 9865,
"end": 11069
} | class ____:
"""
This class represents replica info in orm format api, which is deprecated in milvus client api.
use `ReplicaInfo` instead.
"""
def __init__(
self,
group_id: int,
shards: List[str],
group_nodes: List[tuple],
resource_group: str,
num_outbound_node: dict,
) -> None:
self._id = group_id
self._shards = shards
self._group_nodes = tuple(group_nodes)
self._resource_group = resource_group
self._num_outbound_node = num_outbound_node
def __repr__(self) -> str:
return (
f"Group: <group_id:{self.id}>, <group_nodes:{self.group_nodes}>, "
f"<shards:{self.shards}>, <resource_group: {self.resource_group}>, "
f"<num_outbound_node: {self.num_outbound_node}>"
)
@property
def id(self):
return self._id
@property
def group_nodes(self):
return self._group_nodes
@property
def shards(self):
return self._shards
@property
def resource_group(self):
return self._resource_group
@property
def num_outbound_node(self):
return self._num_outbound_node
| Group |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.