language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 19641,
"end": 21330
} | class ____(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.input_unit is None:
return None
return {self.inputs[0]: self.factor.input_unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function."""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box # noqa: B018
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(
self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box()
)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function."""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter."""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"factor": outputs_unit[self.outputs[0]]}
| Scale |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_isin.py | {
"start": 151,
"end": 7599
} | class ____:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
msg = (
r"only list-like or dict-like objects are allowed "
r"to be passed to DataFrame.isin\(\), you passed a 'str'"
)
with pytest.raises(TypeError, match=msg):
df.isin("a")
with pytest.raises(TypeError, match=msg):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected.loc[[1, 3], "A"] = True
expected.loc[[0, 2], "B"] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
msg = r"cannot compute isin with a duplicate axis\."
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
# just index duped
df2 = DataFrame(
[[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=["A", "B"],
index=[0, 0, 1, 1],
)
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
# cols and index:
df2.columns = ["B", "B"]
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = DataFrame(
{"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
)
s = Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc["a", "A"] = True
expected.loc["d"] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples(
[
(0, "a", "foo"),
(0, "a", "bar"),
(0, "b", "bar"),
(0, "b", "baz"),
(2, "a", "foo"),
(2, "a", "bar"),
(2, "c", "bar"),
(2, "c", "baz"),
(1, "b", "foo"),
(1, "b", "bar"),
(1, "c", "bar"),
(1, "c", "baz"),
]
)
df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
df2 = DataFrame(
{
"A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
"B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
}
)
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=["A", "B"], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH#15473
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
df3 = DataFrame()
expected = DataFrame({"date": [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
DataFrame({"a": [1, 2, 3]}, dtype="category"),
Series([1, 2, 3], dtype="category"),
],
)
def test_isin_category_frame(self, values):
# GH#34256
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = DataFrame({"a": [True, True, True], "b": [False, False, False]})
result = df.isin(values)
tm.assert_frame_equal(result, expected)
def test_isin_read_only(self):
# https://github.com/pandas-dev/pandas/issues/37174
arr = np.array([1, 2, 3])
arr.setflags(write=False)
df = DataFrame([1, 2, 3])
result = df.isin(arr)
expected = DataFrame([True, True, True])
tm.assert_frame_equal(result, expected)
def test_isin_not_lossy(self):
# GH 53514
val = 1666880195890293744
df = DataFrame({"a": [val], "b": [1.0]})
result = df.isin([val])
expected = DataFrame({"a": [True], "b": [False]})
tm.assert_frame_equal(result, expected)
| TestDataFrameIsIn |
python | agronholm__apscheduler | src/apscheduler/_enums.py | {
"start": 66,
"end": 475
} | class ____(Enum):
"""
Specifies what the scheduler should be doing when it's running.
.. attribute:: scheduler
processes due schedules, but won't run jobs
.. attribute:: worker
runs due jobs, but won't process schedules
.. attribute:: both
processes schedules and runs due jobs
"""
scheduler = auto()
worker = auto()
both = auto()
| SchedulerRole |
python | apache__avro | lang/py/avro/datafile.py | {
"start": 9919,
"end": 14329
} | class ____(_DataFileMetadata):
"""Read files written by DataFileWriter."""
__slots__ = (
"_datum_decoder",
"_datum_reader",
"_file_length",
"_raw_decoder",
"_reader",
"block_count",
"sync_marker",
)
_datum_decoder: Optional[avro.io.BinaryDecoder]
_datum_reader: avro.io.DatumReader
_file_length: int
_raw_decoder: avro.io.BinaryDecoder
_reader: IO[bytes]
block_count: int
sync_marker: bytes
# TODO(hammer): allow user to specify expected schema?
# TODO(hammer): allow user to specify the encoder
def __init__(self, reader: IO[AnyStr], datum_reader: avro.io.DatumReader) -> None:
if hasattr(reader, "mode") and "b" not in reader.mode:
warnings.warn(avro.errors.AvroWarning(f"Reader binary data from a reader {reader!r} that's opened for text"))
bytes_reader = cast(IO[bytes], getattr(reader, "buffer", reader))
self._reader = bytes_reader
self._raw_decoder = avro.io.BinaryDecoder(bytes_reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
# read the header: magic, meta, sync
self._read_header()
# get file length
self._file_length = self.determine_file_length()
# get ready to read
self.block_count = 0
self.datum_reader.writers_schema = avro.schema.parse(self.schema)
def __iter__(self) -> "DataFileReader":
return self
@property
def reader(self) -> IO[bytes]:
return self._reader
@property
def raw_decoder(self) -> avro.io.BinaryDecoder:
return self._raw_decoder
@property
def datum_decoder(self) -> Optional[avro.io.BinaryDecoder]:
return self._datum_decoder
@property
def datum_reader(self) -> avro.io.DatumReader:
return self._datum_reader
@property
def file_length(self) -> int:
return self._file_length
def determine_file_length(self) -> int:
"""
Get file length and leave file cursor where we found it.
"""
remember_pos = self.reader.tell()
self.reader.seek(0, 2)
file_length = self.reader.tell()
self.reader.seek(remember_pos)
return file_length
def is_EOF(self) -> bool:
return self.reader.tell() == self.file_length
def _read_header(self) -> None:
# seek to the beginning of the file to get magic block
self.reader.seek(0, 0)
# read header into a dict
header = cast(HeaderType, self.datum_reader.read_data(META_SCHEMA, META_SCHEMA, self.raw_decoder))
if header.get("magic") != MAGIC:
raise avro.errors.AvroException(f"Not an Avro data file: {header.get('magic')!r} doesn't match {MAGIC!r}.")
self._meta = header["meta"]
self.sync_marker = header["sync"]
def _read_block_header(self) -> None:
self.block_count = self.raw_decoder.read_long()
codec = avro.codecs.get_codec(self.codec)
self._datum_decoder = codec.decompress(self.raw_decoder)
def _skip_sync(self) -> bool:
"""
Check if the next bytes match the sync marker.
If not, rewind the read position.
"""
pos = self.reader.tell()
if self.reader.read(SYNC_SIZE) == self.sync_marker:
return True
self.reader.seek(pos) # Reset position if sync doesn't match
return False
def __next__(self) -> object:
"""Return the next datum in the file."""
while self.block_count == 0:
if self.is_EOF() or (self._skip_sync() and self.is_EOF()):
raise StopIteration
self._read_block_header()
if self.datum_decoder is None:
raise avro.errors.DataFileException("DataFile is not ready to read because it has no decoder")
datum = self.datum_reader.read(self.datum_decoder)
self.block_count -= 1
return datum
def close(self) -> None:
"""Close this reader."""
self.reader.close()
def __enter__(self) -> "DataFileReader":
return self
def __exit__(self, type_: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
"""Perform a close if there's no exception."""
if type_ is None:
self.close()
| DataFileReader |
python | tensorflow__tensorflow | tensorflow/python/ops/conv2d_benchmark.py | {
"start": 3483,
"end": 8215
} | class ____(test.Benchmark):
"""Benchmark conv2d!"""
def _run_graph(self, device, dtype, data_format, input_shape, filter_shape,
strides, padding, num_iters, warmup_iters):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
dtype: Data type for the convolution.
data_format: A string from: "NHWC" or "NCHW". Data format for input and
output data.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use. num_iters: Number of iterations to run the
benchmark.
num_iters: number of iterations to run conv2d.
warmup_iters: number of iterations for warmup runs.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
warmup_outputs, outputs = build_graph(device, dtype, data_format,
input_shape, filter_shape, strides,
padding, num_iters, warmup_iters)
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.opt_level = -1
rewrite_options = config.graph_options.rewrite_options
# Disable layout optimizer to not change input data_format.
rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.ON if FLAGS.enable_layout_optimizer
else rewriter_config_pb2.RewriterConfig.OFF)
# Convolution ops are effectively noop in the test graph as we are not
# fetching the convolution outputs. Disable dependency optimizer to not
# remove the conv ops.
rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(graph=graph, config=config) as session:
# TODO(hinsu): Use run_op_benchmark method from test.Benchmark to run
# benchmark along with warmup.
variables.global_variables_initializer().run()
# warmup runs
session.run(warmup_outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
print("%s %s %s inputshape:%s filtershape:%s strides:%s padding:%s "
"%d iters: %.8f sec" %
(device, str(dtype), data_format, str(input_shape).replace(
" ", ""), str(filter_shape).replace(" ", ""),
str(strides).replace(" ", ""), padding, num_iters, duration))
name_template = (
"conv2d_{device}_{datatype}_{data_format}_input_shape_{inputshape}_"
"filter_shape_{filtershape}_strides_{strides}_padding_{padding}")
self.report_benchmark(
name=name_template.format(
device=device,
datatype=str(dtype),
data_format=str(data_format),
inputshape=str(input_shape).replace(" ", ""),
filtershape=str(filter_shape).replace(" ", ""),
strides=str(strides).replace(" ", ""),
padding=padding).replace(" ", ""),
iters=num_iters,
wall_time=duration)
return duration
def benchmark_conv2d(self):
print("conv2d benchmark:")
data_types = [dtypes.float32, dtypes.float16]
data_formats = ["NHWC", "NCHW"]
in_channels = list(range(1, 10)) + list(range(10, 20, 2)) + list(
range(20, 33, 4))
out_channels = [4, 16, 32]
hw_strides = [[2, 2]]
paddings = ["VALID", "SAME"]
args_lists = [
data_types, data_formats, in_channels, out_channels, hw_strides,
paddings
]
for args in itertools.product(*args_lists):
dtype, data_format, in_channel, out_channel, hw_stride, padding = args
# Keep batch size same as out channels just to reduce the number of
# different configurations to benchmark.
batch_size = out_channel
h, w, fh, fw = 500, 500, 3, 3
if data_format == "NHWC":
ishape = [batch_size, h, w, in_channel]
stride = [1] + hw_stride + [1]
elif data_format == "NCHW":
ishape = [batch_size, in_channel, h, w]
stride = [1, 1] + hw_stride
else:
raise ValueError("Unknown data_format: " + str(data_format))
fshape = [fh, fw, in_channel, out_channel]
num_iters = 80
warmup_iters = 2
self._run_graph("gpu", dtype, data_format, ishape, fshape, stride,
padding, num_iters, warmup_iters)
if __name__ == "__main__":
test.main()
| Conv2DBenchmark |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 17265,
"end": 17788
} | class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.snapshot = channel.unary_unary(
'/jina.JinaExecutorSnapshot/snapshot',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=jina__pb2.SnapshotStatusProto.FromString,
)
| JinaExecutorSnapshotStub |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/event_logs.py | {
"start": 987,
"end": 1620
} | class ____(BaseModel):
"""Event Log Response."""
id: int = Field(alias="event_log_id")
dttm: datetime = Field(alias="when")
dag_id: str | None
task_id: str | None
run_id: str | None
map_index: int | None
try_number: int | None
event: str
logical_date: datetime | None
owner: str | None
extra: str | None
dag_display_name: str | None = Field(
validation_alias=AliasPath("dag_model", "dag_display_name"), default=None
)
task_display_name: str | None = Field(
validation_alias=AliasPath("task_instance", "task_display_name"), default=None
)
| EventLogResponse |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-buy-pens-and-pencils.py | {
"start": 1577,
"end": 1921
} | class ____(object):
def waysToBuyPensPencils(self, total, cost1, cost2):
"""
:type total: int
:type cost1: int
:type cost2: int
:rtype: int
"""
if cost1 < cost2:
cost1, cost2 = cost2, cost1
return sum((total-i*cost1)//cost2+1 for i in xrange(total//cost1+1))
| Solution2 |
python | getsentry__sentry | src/sentry/api/serializers/models/activity.py | {
"start": 724,
"end": 6060
} | class ____(Serializer):
def __init__(self, environment_func=None):
self.environment_func = environment_func
def get_attrs(self, item_list, user, **kwargs):
from sentry.api.serializers.models.group import GroupSerializer
# TODO(dcramer); assert on relations
user_ids = [i.user_id for i in item_list if i.user_id]
user_list = []
if user_ids:
user_list = user_service.serialize_many(
filter={"user_ids": user_ids}, as_user=serialize_generic_user(user)
)
users = {u["id"]: u for u in user_list}
# If an activity is created by the proxy user of a Sentry App, attach it to the payload
sentry_apps_list: list[RpcSentryApp] = []
if user_ids:
sentry_apps_list = app_service.get_sentry_apps_by_proxy_users(proxy_user_ids=user_ids)
# Minimal Sentry App serialization to keep the payload minimal
sentry_apps = {
str(app.proxy_user_id): {
"id": str(app.id),
"name": app.name,
"slug": app.slug,
"avatars": serialize(app.avatars, user, serializer=SentryAppAvatarSerializer()),
}
for app in sentry_apps_list
if app.proxy_user_id
}
commit_ids = {
i.data["commit"]
for i in item_list
if i.type == ActivityType.SET_RESOLVED_IN_COMMIT.value
}
if commit_ids:
commit_list = list(Commit.objects.filter(id__in=commit_ids))
commits_by_id = {
c.id: d
for c, d in zip(
commit_list,
serialize(commit_list, user, serializer=CommitWithReleaseSerializer()),
)
}
commits = {
i: commits_by_id.get(i.data["commit"])
for i in item_list
if i.type == ActivityType.SET_RESOLVED_IN_COMMIT.value
}
else:
commits = {}
pull_request_ids = {
i.data["pull_request"]
for i in item_list
if i.type == ActivityType.SET_RESOLVED_IN_PULL_REQUEST.value
}
if pull_request_ids:
pull_request_list = list(PullRequest.objects.filter(id__in=pull_request_ids))
pull_requests_by_id = {
c.id: d for c, d in zip(pull_request_list, serialize(pull_request_list, user))
}
pull_requests = {
i: pull_requests_by_id.get(i.data["pull_request"])
for i in item_list
if i.type == ActivityType.SET_RESOLVED_IN_PULL_REQUEST.value
}
else:
pull_requests = {}
groups = {
k: serialize(v, user=user, serializer=GroupSerializer(collapse=["stats"]))
for k, v in Group.objects.in_bulk(
{
i.data["source_id"]
for i in item_list
if i.type == ActivityType.UNMERGE_DESTINATION.value
}
| {
i.data["destination_id"]
for i in item_list
if i.type == ActivityType.UNMERGE_SOURCE.value
}
).items()
}
return {
item: {
"user": users.get(str(item.user_id)) if item.user_id else None,
"sentry_app": sentry_apps.get(str(item.user_id)) if item.user_id else None,
"source": (
groups.get(item.data["source_id"])
if item.type == ActivityType.UNMERGE_DESTINATION.value
else None
),
"destination": (
groups.get(item.data["destination_id"])
if item.type == ActivityType.UNMERGE_SOURCE.value
else None
),
"commit": commits.get(item),
"pull_request": pull_requests.get(item),
}
for item in item_list
}
def serialize(self, obj: Activity, attrs, user, **kwargs):
if obj.type == ActivityType.SET_RESOLVED_IN_COMMIT.value:
data = {"commit": attrs["commit"]}
elif obj.type == ActivityType.SET_RESOLVED_IN_PULL_REQUEST.value:
data = {"pullRequest": attrs["pull_request"]}
elif obj.type == ActivityType.UNMERGE_DESTINATION.value:
data = {"fingerprints": obj.data["fingerprints"], "source": attrs["source"]}
elif obj.type == ActivityType.UNMERGE_SOURCE.value:
data = {"fingerprints": obj.data["fingerprints"], "destination": attrs["destination"]}
else:
data = obj.data or {}
# XXX: We had a problem where Users were embedded into the mentions
# attribute of group notes which needs to be removed
# While group_note update has been fixed there are still many skunky comments
# in the database.
data.pop("mentions", None)
return {
"id": str(obj.id),
"user": attrs["user"],
"sentry_app": attrs["sentry_app"],
"type": obj.get_type_display(),
"data": data,
"dateCreated": obj.datetime,
}
| ActivitySerializer |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 2044,
"end": 3547
} | class ____(FlowRunOrchestrationPolicy):
"""
Orchestration rules that run against flow-run-state transitions in priority order.
"""
@staticmethod
def priority() -> list[
Union[
type[BaseUniversalTransform[orm_models.FlowRun, core.FlowRunPolicy]],
type[BaseOrchestrationRule[orm_models.FlowRun, core.FlowRunPolicy]],
]
]:
return cast(
list[
Union[
type[
BaseUniversalTransform[orm_models.FlowRun, core.FlowRunPolicy]
],
type[BaseOrchestrationRule[orm_models.FlowRun, core.FlowRunPolicy]],
]
],
[
PreventDuplicateTransitions,
HandleFlowTerminalStateTransitions,
EnforceCancellingToCancelledTransition,
BypassCancellingFlowRunsWithNoInfra,
PreventPendingTransitions,
CopyDeploymentConcurrencyLeaseID,
SecureFlowConcurrencySlots,
RemoveDeploymentConcurrencyLeaseForOldClientVersions,
EnsureOnlyScheduledFlowsMarkedLate,
HandlePausingFlows,
HandleResumingPausedFlows,
CopyScheduledTime,
WaitForScheduledTime,
RetryFailedFlows,
InstrumentFlowRunStateTransitions,
ReleaseFlowConcurrencySlots,
],
)
| CoreFlowPolicy |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 1888,
"end": 2223
} | class ____(BaseModel):
keys: List[str]
"""The combination of keys the model is requesting to be pressed.
This is an array of strings, each representing a key.
"""
type: Literal["keypress"]
"""Specifies the event type.
For a keypress action, this property is always set to `keypress`.
"""
| ActionKeypress |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/execution/middleware.py | {
"start": 152,
"end": 1725
} | class ____(object):
def __init__(self, *middlewares, **kwargs):
self.middlewares = middlewares
self.wrap_in_promise = kwargs.get('wrap_in_promise', True)
self._middleware_resolvers = list(get_middleware_resolvers(middlewares))
self._cached_resolvers = {}
def get_field_resolver(self, field_resolver):
if field_resolver not in self._cached_resolvers:
self._cached_resolvers[field_resolver] = middleware_chain(
field_resolver,
self._middleware_resolvers,
wrap_in_promise=self.wrap_in_promise,
)
return self._cached_resolvers[field_resolver]
middlewares = MiddlewareManager
def get_middleware_resolvers(middlewares):
for middleware in middlewares:
# If the middleware is a function instead of a class
if inspect.isfunction(middleware):
yield middleware
if not hasattr(middleware, MIDDLEWARE_RESOLVER_FUNCTION):
continue
yield getattr(middleware, MIDDLEWARE_RESOLVER_FUNCTION)
def middleware_chain(func, middlewares, wrap_in_promise):
if not middlewares:
return func
if wrap_in_promise:
middlewares = chain((func, make_it_promise), middlewares)
else:
middlewares = chain((func,), middlewares)
last_func = None
for middleware in middlewares:
last_func = partial(middleware, last_func) if last_func else middleware
return last_func
def make_it_promise(next, *a, **b):
return Promise.resolve(next(*a, **b))
| MiddlewareManager |
python | pytest-dev__pytest | src/_pytest/python.py | {
"start": 66875,
"end": 67195
} | class ____(Function):
"""This class is a stop gap solution until we evolve to have actual function
definition nodes and manage to get rid of ``metafunc``."""
def runtest(self) -> None:
raise RuntimeError("function definitions are not supposed to be run as tests")
setup = runtest
| FunctionDefinition |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/5.2_Prioritized_Replay_DQN/RL_brain.py | {
"start": 297,
"end": 2873
} | class ____(object):
"""
This SumTree code is a modified version and the original code is from:
https://github.com/jaara/AI-blog/blob/master/SumTree.py
Story data with its priority in the tree.
"""
data_pointer = 0
def __init__(self, capacity):
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, data):
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = data # update data_frame
self.update(tree_idx, p) # update tree_frame
self.data_pointer += 1
if self.data_pointer >= self.capacity: # replace when exceed the capacity
self.data_pointer = 0
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2
self.tree[tree_idx] += change
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
@property
def total_p(self):
return self.tree[0] # the root
| SumTree |
python | TheAlgorithms__Python | data_structures/binary_tree/lazy_segment_tree.py | {
"start": 50,
"end": 4847
} | class ____:
def __init__(self, size: int) -> None:
self.size = size
# approximate the overall size of segment tree with given value
self.segment_tree = [0 for i in range(4 * size)]
# create array to store lazy update
self.lazy = [0 for i in range(4 * size)]
self.flag = [0 for i in range(4 * size)] # flag for lazy update
def left(self, idx: int) -> int:
"""
>>> segment_tree = SegmentTree(15)
>>> segment_tree.left(1)
2
>>> segment_tree.left(2)
4
>>> segment_tree.left(12)
24
"""
return idx * 2
def right(self, idx: int) -> int:
"""
>>> segment_tree = SegmentTree(15)
>>> segment_tree.right(1)
3
>>> segment_tree.right(2)
5
>>> segment_tree.right(12)
25
"""
return idx * 2 + 1
def build(
self, idx: int, left_element: int, right_element: int, a: list[int]
) -> None:
if left_element == right_element:
self.segment_tree[idx] = a[left_element - 1]
else:
mid = (left_element + right_element) // 2
self.build(self.left(idx), left_element, mid, a)
self.build(self.right(idx), mid + 1, right_element, a)
self.segment_tree[idx] = max(
self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
)
def update(
self, idx: int, left_element: int, right_element: int, a: int, b: int, val: int
) -> bool:
"""
update with O(lg n) (Normal segment tree without lazy update will take O(nlg n)
for each update)
update(1, 1, size, a, b, v) for update val v to [a,b]
"""
if self.flag[idx] is True:
self.segment_tree[idx] = self.lazy[idx]
self.flag[idx] = False
if left_element != right_element:
self.lazy[self.left(idx)] = self.lazy[idx]
self.lazy[self.right(idx)] = self.lazy[idx]
self.flag[self.left(idx)] = True
self.flag[self.right(idx)] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
self.segment_tree[idx] = val
if left_element != right_element:
self.lazy[self.left(idx)] = val
self.lazy[self.right(idx)] = val
self.flag[self.left(idx)] = True
self.flag[self.right(idx)] = True
return True
mid = (left_element + right_element) // 2
self.update(self.left(idx), left_element, mid, a, b, val)
self.update(self.right(idx), mid + 1, right_element, a, b, val)
self.segment_tree[idx] = max(
self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)]
)
return True
# query with O(lg n)
def query(
self, idx: int, left_element: int, right_element: int, a: int, b: int
) -> int | float:
"""
query(1, 1, size, a, b) for query max of [a,b]
>>> A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
>>> segment_tree = SegmentTree(15)
>>> segment_tree.build(1, 1, 15, A)
>>> segment_tree.query(1, 1, 15, 4, 6)
7
>>> segment_tree.query(1, 1, 15, 7, 11)
14
>>> segment_tree.query(1, 1, 15, 7, 12)
15
"""
if self.flag[idx] is True:
self.segment_tree[idx] = self.lazy[idx]
self.flag[idx] = False
if left_element != right_element:
self.lazy[self.left(idx)] = self.lazy[idx]
self.lazy[self.right(idx)] = self.lazy[idx]
self.flag[self.left(idx)] = True
self.flag[self.right(idx)] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
mid = (left_element + right_element) // 2
q1 = self.query(self.left(idx), left_element, mid, a, b)
q2 = self.query(self.right(idx), mid + 1, right_element, a, b)
return max(q1, q2)
def __str__(self) -> str:
return str([self.query(1, 1, self.size, i, i) for i in range(1, self.size + 1)])
if __name__ == "__main__":
A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
size = 15
segt = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| SegmentTree |
python | facebook__pyre-check | tools/generate_taint_models/get_filtered_sources.py | {
"start": 372,
"end": 1188
} | class ____(ModelGenerator[Model]):
def __init__(
self,
superset_generator: ModelGenerator[Model],
subset_generator: ModelGenerator[Model],
) -> None:
self.superset_generator = superset_generator
self.subset_generator = subset_generator
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return []
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[Model]:
LOG.info("Computing models for the superset...")
superset_models = self.superset_generator.generate_models()
LOG.info("Computing models for the subset...")
subset_models = self.subset_generator.generate_models()
return set(superset_models) - set(subset_models)
| FilteredSourceGenerator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methods1.py | {
"start": 946,
"end": 1046
} | class ____:
def __call__(self) -> None:
print("CallableA.__call__:", f"{self=}")
| CallableA |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_batch.py | {
"start": 4431,
"end": 5335
} | class ____:
@mock.patch(CLOUD_BATCH_HOOK_PATH)
def test_execute(self, hook_mock):
filter = "filter_description"
limit = 2
operator = CloudBatchListJobsOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter, limit=limit
)
operator.execute(context=mock.MagicMock())
hook_mock.return_value.list_jobs.assert_called_once_with(
region=REGION, project_id=PROJECT_ID, filter=filter, limit=limit
)
@mock.patch(CLOUD_BATCH_HOOK_PATH)
def test_execute_with_invalid_limit(self, hook_mock):
filter = "filter_description"
limit = -1
with pytest.raises(expected_exception=AirflowException):
CloudBatchListJobsOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, filter=filter, limit=limit
)
| TestCloudBatchListJobsOperator |
python | sqlalchemy__sqlalchemy | test/orm/dml/test_orm_upd_del_assorted.py | {
"start": 5362,
"end": 13023
} | class ____(fixtures.TestBase):
__sparse_driver_backend__ = True
@testing.variation("populate_existing", [True, False])
@testing.variation(
"use_onupdate",
[
"none",
"server",
"callable",
"clientsql",
("computed", testing.requires.computed_columns),
],
)
@testing.variation(
"use_returning",
[
("returning", testing.requires.update_returning),
("defaults", testing.requires.update_returning),
"none",
],
)
@testing.variation("synchronize", ["auto", "fetch", "evaluate"])
@testing.variation("pk_order", ["first", "middle"])
def test_update_populate_existing(
self,
decl_base,
populate_existing,
use_onupdate,
use_returning,
synchronize,
pk_order,
):
"""test #11912 and #11917"""
class Employee(ComparableEntity, decl_base):
__tablename__ = "employee"
if pk_order.first:
uuid: Mapped[uuid.UUID] = mapped_column(primary_key=True)
user_name: Mapped[str] = mapped_column(String(200), nullable=False)
if pk_order.middle:
uuid: Mapped[uuid.UUID] = mapped_column(primary_key=True)
if use_onupdate.server:
some_server_value: Mapped[str] = mapped_column(
server_onupdate=FetchedValue()
)
elif use_onupdate.callable:
some_server_value: Mapped[str] = mapped_column(
onupdate=lambda: "value 2"
)
elif use_onupdate.clientsql:
some_server_value: Mapped[str] = mapped_column(
onupdate=literal("value 2")
)
elif use_onupdate.computed:
some_server_value: Mapped[str] = mapped_column(
String(255),
Computed(user_name + " computed value"),
nullable=True,
)
else:
some_server_value: Mapped[str]
decl_base.metadata.create_all(testing.db)
s = fixture_session()
uuid1 = uuid.uuid4()
if use_onupdate.computed:
server_old_value, server_new_value = (
"e1 old name computed value",
"e1 new name computed value",
)
e1 = Employee(uuid=uuid1, user_name="e1 old name")
else:
server_old_value, server_new_value = ("value 1", "value 2")
e1 = Employee(
uuid=uuid1,
user_name="e1 old name",
some_server_value="value 1",
)
s.add(e1)
s.flush()
stmt = (
update(Employee)
.values(user_name="e1 new name")
.where(Employee.uuid == uuid1)
)
if use_returning.returning:
stmt = stmt.returning(Employee)
elif use_returning.defaults:
# NOTE: the return_defaults case here has not been analyzed for
# #11912 or #11917. future enhancements may change its behavior
stmt = stmt.return_defaults()
# perform out of band UPDATE on server value to simulate
# a computed col
if use_onupdate.none or use_onupdate.server:
s.connection().execute(
update(Employee.__table__).values(some_server_value="value 2")
)
execution_options = {}
if populate_existing:
execution_options["populate_existing"] = True
if synchronize.evaluate:
execution_options["synchronize_session"] = "evaluate"
if synchronize.fetch:
execution_options["synchronize_session"] = "fetch"
if use_returning.returning:
rows = s.scalars(stmt, execution_options=execution_options)
else:
s.execute(stmt, execution_options=execution_options)
if (
use_onupdate.clientsql
or use_onupdate.server
or use_onupdate.computed
):
if not use_returning.defaults:
# if server-side onupdate was generated, the col should have
# been expired
assert "some_server_value" not in e1.__dict__
# and refreshes when called. this is even if we have RETURNING
# rows we didn't fetch yet.
eq_(e1.some_server_value, server_new_value)
else:
# using return defaults here is not expiring. have not
# researched why, it may be because the explicit
# return_defaults interferes with the ORMs call
assert "some_server_value" in e1.__dict__
eq_(e1.some_server_value, server_old_value)
elif use_onupdate.callable:
if not use_returning.defaults or not synchronize.fetch:
# for python-side onupdate, col is populated with local value
assert "some_server_value" in e1.__dict__
# and is refreshed
eq_(e1.some_server_value, server_new_value)
else:
assert "some_server_value" in e1.__dict__
# and is not refreshed
eq_(e1.some_server_value, server_old_value)
else:
# no onupdate, then the value was not touched yet,
# even if we used RETURNING with populate_existing, because
# we did not fetch the rows yet
assert "some_server_value" in e1.__dict__
eq_(e1.some_server_value, server_old_value)
# now see if we can fetch rows
if use_returning.returning:
if populate_existing or not use_onupdate.none:
eq_(
set(rows),
{
Employee(
uuid=uuid1,
user_name="e1 new name",
some_server_value=server_new_value,
),
},
)
else:
# if no populate existing and no server default, that column
# is not touched at all
eq_(
set(rows),
{
Employee(
uuid=uuid1,
user_name="e1 new name",
some_server_value=server_old_value,
),
},
)
if use_returning.defaults:
# as mentioned above, the return_defaults() case here remains
# unanalyzed.
if synchronize.fetch or (
use_onupdate.clientsql
or use_onupdate.server
or use_onupdate.computed
or use_onupdate.none
):
eq_(e1.some_server_value, server_old_value)
else:
eq_(e1.some_server_value, server_new_value)
elif (
populate_existing and use_returning.returning
) or not use_onupdate.none:
eq_(e1.some_server_value, server_new_value)
else:
# no onupdate specified, and no populate existing with returning,
# the attribute is not refreshed
eq_(e1.some_server_value, server_old_value)
# do a full expire, now the new value is definitely there
s.commit()
s.expire_all()
eq_(e1.some_server_value, server_new_value)
| OnUpdatePopulationTest |
python | ZoranPandovski__al-go-rithms | data_structures/zigzag traversal/zigzagtraversal_iterative.py | {
"start": 0,
"end": 2033
} | class ____:
"""
A Node has data variable and pointers to its left and right nodes.
"""
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def make_tree() -> Node:
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
return root
def zigzag_iterative(root: Node):
"""
ZigZag traverse by iterative method: Print node left to right and right to left, alternatively.
"""
if root == None:
return
# two stacks to store alternate levels
s1 = [] # For levels to be printed from right to left
s2 = [] # For levels to be printed from left to right
# append first level to first stack 's1'
s1.append(root)
# Keep printing while any of the stacks has some nodes
while not len(s1) == 0 or not len(s2) == 0:
# Print nodes of current level from s1 and append nodes of next level to s2
while not len(s1) == 0:
temp = s1[-1]
s1.pop()
print(temp.data, end = " ")
# Note that is left is appended before right
if temp.left:
s2.append(temp.left)
if temp.right:
s2.append(temp.right)
# Print nodes of current level from s2 and append nodes of next level to s1
while not len(s2) == 0:
temp = s2[-1]
s2.pop()
print(temp.data, end = " ")
# Note that is rightt is appended before left
if temp.right:
s1.append(temp.right)
if temp.left:
s1.append(temp.left)
def main(): # Main function for testing.
"""
Create binary tree.
"""
root = make_tree()
print("\nZigzag order traversal(iterative) is: ")
zigzag_iterative(root)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| Node |
python | huggingface__transformers | src/transformers/models/gemma2/modular_gemma2.py | {
"start": 22231,
"end": 25049
} | class ____(GemmaForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = Gemma2Model(config)
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Gemma2ForCausalLM
>>> model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-9b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
if self.config.final_logit_softcapping is not None:
logits = logits / self.config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * self.config.final_logit_softcapping
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Gemma2ForCausalLM |
python | getsentry__sentry | tests/sentry/snuba/metrics/test_datasource.py | {
"start": 556,
"end": 2215
} | class ____(BaseMetricsLayerTestCase, TestCase):
@property
def now(self) -> datetime:
return BaseMetricsLayerTestCase.MOCK_DATETIME
def test_get_tag_values_with_mri(self) -> None:
releases = ["1.0", "2.0"]
for release in ("1.0", "2.0"):
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
tags={"release": release},
value=1,
)
values = get_tag_values(
[self.project], "release", [TransactionMRI.DURATION.value], UseCaseID.TRANSACTIONS
)
for release in releases:
assert {"key": "release", "value": release} in values
def test_get_tag_values_with_public_name(self) -> None:
satisfactions = ["miserable", "satisfied", "tolerable"]
for satisfaction in satisfactions:
self.store_performance_metric(
name=TransactionMRI.MEASUREMENTS_LCP.value,
tags={"satisfaction": satisfaction},
value=1,
)
# Valid public metric name.
values = get_tag_values(
[self.project],
"satisfaction",
[TransactionMetricKey.MEASUREMENTS_LCP.value],
UseCaseID.TRANSACTIONS,
)
for satisfaction in satisfactions:
assert {"key": "satisfaction", "value": satisfaction} in values
# Invalid public metric name.
values = get_tag_values(
[self.project],
"satisfaction",
["transaction.measurements"],
UseCaseID.TRANSACTIONS,
)
assert values == []
| DatasourceTestCase |
python | pytorch__pytorch | torch/utils/hipify/hipify_python.py | {
"start": 3197,
"end": 3566
} | class ____(Exception):
# Exception raised for errors in the input.
def __init__(self, message) -> None:
super().__init__(message)
self.message = message
def __str__(self) -> str:
return f"Input error: {self.message}"
def openf(filename, mode):
return open(filename, mode, errors='ignore')
# Color coding for printing
| InputError |
python | huggingface__transformers | src/transformers/models/llama/configuration_llama.py | {
"start": 1097,
"end": 9352
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
e.g. [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import LlamaModel, LlamaConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LlamaConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `LlamaModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
pretraining_tp: Optional[int] = 1,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["LlamaConfig"]
| LlamaConfig |
python | geekcomputers__Python | Emoji Dictionary/QT_GUI.py | {
"start": 180,
"end": 4189
} | class ____(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# Load the UI file
uic.loadUi(os.path.join(os.path.dirname(__file__), "QT_GUI.ui"), self)
self.pushButton_4.clicked.connect(self.close)
self.pushButton_2.clicked.connect(lambda: search_emoji())
self.pushButton_3.clicked.connect(lambda: clear_text())
cells = [
[
"🐒",
"🐕",
"🐎",
"🐪",
"🐁",
"🐘",
"🦘",
"🦈",
"🐓",
"🐝",
"👀",
"🦴",
"👩🏿",
"🤝",
"🧑",
"🏾",
"👱🏽",
"♀",
"🎞",
"🎨",
"⚽",
],
[
"🍕",
"🍗",
"🍜",
"☕",
"🍴",
"🍉",
"🍓",
"🌴",
"🌵",
"🛺",
"🚲",
"🛴",
"🚉",
"🚀",
"✈",
"🛰",
"🚦",
"🏳",
"🌈",
"🌎",
"🧭",
],
[
"🔥",
"❄",
"🌟",
"🌞",
"🌛",
"🌝",
"🌧",
"🧺",
"🧷",
"🪒",
"⛲",
"🗼",
"🕌",
"👁",
"🗨",
"💬",
"™",
"💯",
"🔕",
"💥",
"❤",
],
["😀", "🥰", "😴", "🤓", "🤮", "🤬", "😨", "🤑", "😫", "😎"],
]
def emoji_wight_btn():
if self.emoji_widget.isVisible():
self.emoji_widget.hide()
else:
self.emoji_widget.show()
def search_emoji():
word = self.lineEdit.text()
print(f"Field Text: {word}")
if word == "":
self.textEdit.setText("You have entered no emoji.")
else:
means = demojize(word)
self.textEdit.setText(
"Meaning of Emoji : "
+ str(word)
+ "\n\n"
+ means.replace("::", ":\n: ")
)
def add_input_emoji(emoji):
self.lineEdit.setText(self.lineEdit.text() + emoji)
def clear_text():
self.lineEdit.setText("")
self.textEdit.setText("")
self.emoji_buttons = []
self.emoji_layout = QGridLayout()
self.emoji_widget = QWidget()
self.emoji_widget.setLayout(self.emoji_layout)
self.frame_2.layout().addWidget(self.emoji_widget)
self.emoji_widget.hide()
self.pushButton.clicked.connect(lambda: emoji_wight_btn())
for row_idx, row in enumerate(cells):
for col_idx, emoji in enumerate(row):
button = QPushButton(emoji)
button.setFixedSize(40, 40)
button.setFont(QFont("Arial", 20))
button.setStyleSheet("""
QPushButton {
background-color: #ffffff;
border: 1px solid #e0e0e0;
border-radius: 5px;
}
QPushButton:hover {
background-color: #f0f0f0;
}
""")
button.clicked.connect(lambda checked, e=emoji: add_input_emoji(e))
self.emoji_layout.addWidget(button, row_idx, col_idx)
self.emoji_buttons.append(button)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| MainWindow |
python | bokeh__bokeh | src/bokeh/core/property/container.py | {
"start": 8043,
"end": 9675
} | class ____(Dict):
""" Accept a Python dictionary suitable as the ``data`` attribute of a
:class:`~bokeh.models.sources.ColumnDataSource`.
This class is a specialization of ``Dict`` that handles efficiently
encoding columns that are NumPy arrays.
"""
def make_descriptors(self, base_name):
""" Return a list of ``ColumnDataPropertyDescriptor`` instances to
install on a class, in order to delegate attribute access to this
property.
Args:
base_name (str) : the name of the property these descriptors are for
Returns:
list[ColumnDataPropertyDescriptor]
The descriptors returned are collected by the ``MetaHasProps``
metaclass and added to ``HasProps`` subclasses during class creation.
"""
return [ ColumnDataPropertyDescriptor(base_name, self) ]
def _hinted_value(self, value: Any, hint: DocumentPatchedEvent | None) -> Any:
from ...document.events import ColumnDataChangedEvent, ColumnsStreamedEvent
if isinstance(hint, ColumnDataChangedEvent):
return { col: hint.model.data[col] for col in hint.cols }
if isinstance(hint, ColumnsStreamedEvent):
return hint.data
return value
def wrap(self, value):
""" Some property types need to wrap their values in special containers, etc.
"""
if isinstance(value, dict):
if isinstance(value, PropertyValueColumnData):
return value
else:
return PropertyValueColumnData(value)
else:
return value
| ColumnData |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 3008,
"end": 4622
} | class ____:
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
def __init__(self, fingerprint: bytes) -> None:
digestlen = len(fingerprint)
hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError("fingerprint has invalid length")
elif hashfunc is md5 or hashfunc is sha1:
raise ValueError("md5 and sha1 are insecure and not supported. Use sha256.")
self._hashfunc = hashfunc
self._fingerprint = fingerprint
@property
def fingerprint(self) -> bytes:
return self._fingerprint
def check(self, transport: asyncio.Transport) -> None:
if not transport.get_extra_info("sslcontext"):
return
sslobj = transport.get_extra_info("ssl_object")
cert = sslobj.getpeercert(binary_form=True)
got = self._hashfunc(cert).digest()
if got != self._fingerprint:
host, port, *_ = transport.get_extra_info("peername")
raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
if ssl is not None:
SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint)
else: # pragma: no cover
SSL_ALLOWED_TYPES = (bool,) # type: ignore[unreachable]
_CONNECTION_CLOSED_EXCEPTION = ClientConnectionError("Connection closed")
_SSL_SCHEMES = frozenset(("https", "wss"))
# ConnectionKey is a NamedTuple because it is used as a key in a dict
# and a set in the connector. Since a NamedTuple is a tuple it uses
# the fast native tuple __hash__ and __eq__ implementation in CPython.
| Fingerprint |
python | lazyprogrammer__machine_learning_examples | svm_class/linear_svm_gradient.py | {
"start": 591,
"end": 4508
} | class ____:
def __init__(self, C=1.0):
self.C = C
def _objective(self, margins):
return 0.5 * self.w.dot(self.w) + self.C * np.maximum(0, 1 - margins).sum()
def fit(self, X, Y, lr=1e-5, n_iters=400):
N, D = X.shape
self.N = N
self.w = np.random.randn(D)
self.b = 0
# gradient descent
losses = []
for _ in range(n_iters):
margins = Y * self._decision_function(X)
loss = self._objective(margins)
losses.append(loss)
idx = np.where(margins < 1)[0]
grad_w = self.w - self.C * Y[idx].dot(X[idx])
self.w -= lr * grad_w
grad_b = -self.C * Y[idx].sum()
self.b -= lr * grad_b
self.support_ = np.where((Y * self._decision_function(X)) <= 1)[0]
print("num SVs:", len(self.support_))
print("w:", self.w)
print("b:", self.b)
# hist of margins
# m = Y * self._decision_function(X)
# plt.hist(m, bins=20)
# plt.show()
plt.plot(losses)
plt.title("loss per iteration")
plt.show()
def _decision_function(self, X):
return X.dot(self.w) + self.b
def predict(self, X):
return np.sign(self._decision_function(X))
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
def plot_decision_boundary(model, X, Y, resolution=100, colors=('b', 'k', 'r')):
np.warnings.filterwarnings('ignore')
fig, ax = plt.subplots()
# Generate coordinate grid of shape [resolution x resolution]
# and evaluate the model over the entire space
x_range = np.linspace(X[:,0].min(), X[:,0].max(), resolution)
y_range = np.linspace(X[:,1].min(), X[:,1].max(), resolution)
grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range]
grid = np.array(grid).reshape(len(x_range), len(y_range))
# Plot decision contours using grid and
# make a scatter plot of training data
ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1),
linestyles=('--', '-', '--'), colors=colors)
ax.scatter(X[:,0], X[:,1],
c=Y, lw=0, alpha=0.3, cmap='seismic')
# Plot support vectors (non-zero alphas)
# as circled points (linewidth > 0)
mask = model.support_
ax.scatter(X[:,0][mask], X[:,1][mask],
c=Y[mask], cmap='seismic')
# debug
ax.scatter([0], [0], c='black', marker='x')
# debug
# x_axis = np.linspace(X[:,0].min(), X[:,0].max(), 100)
# w = model.w
# b = model.b
# # w[0]*x + w[1]*y + b = 0
# y_axis = -(w[0]*x_axis + b)/w[1]
# plt.plot(x_axis, y_axis, color='purple')
# margin_p = (1 - w[0]*x_axis - b)/w[1]
# plt.plot(x_axis, margin_p, color='orange')
# margin_n = -(1 + w[0]*x_axis + b)/w[1]
# plt.plot(x_axis, margin_n, color='orange')
plt.show()
def clouds():
X, Y = get_clouds()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, 1e-3, 200
def medical():
data = load_breast_cancer()
X, Y = data.data, data.target
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, 1e-3, 200
if __name__ == '__main__':
Xtrain, Xtest, Ytrain, Ytest, lr, n_iters = clouds()
print("Possible labels:", set(Ytrain))
# make sure the targets are (-1, +1)
Ytrain[Ytrain == 0] = -1
Ytest[Ytest == 0] = -1
# scale the data
scaler = StandardScaler()
Xtrain = scaler.fit_transform(Xtrain)
Xtest = scaler.transform(Xtest)
# now we'll use our custom implementation
model = LinearSVM(C=1.0)
t0 = datetime.now()
model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters)
print("train duration:", datetime.now() - t0)
t0 = datetime.now()
print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0)
t0 = datetime.now()
print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0)
if Xtrain.shape[1] == 2:
plot_decision_boundary(model, Xtrain, Ytrain)
| LinearSVM |
python | getsentry__sentry | src/sentry/exceptions.py | {
"start": 466,
"end": 525
} | class ____(SuspiciousOperation):
pass
| RestrictedIPAddress |
python | numba__numba | numba/core/errors.py | {
"start": 23714,
"end": 23761
} | class ____(TypingError):
pass
| NumbaValueError |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-pairs-in-array.py | {
"start": 480,
"end": 763
} | class ____(object):
def numberOfPairs(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
cnt = collections.Counter(nums)
pair_cnt = sum(x//2 for x in cnt.itervalues())
return [pair_cnt, len(nums)-2*pair_cnt]
| Solution2 |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/axes_grid.py | {
"start": 277,
"end": 679
} | class ____:
def __init__(self, *args, orientation, **kwargs):
self.orientation = orientation
super().__init__(*args, **kwargs)
def colorbar(self, mappable, **kwargs):
return self.get_figure(root=False).colorbar(
mappable, cax=self, location=self.orientation, **kwargs)
_cbaraxes_class_factory = cbook._make_class_factory(CbarAxesBase, "Cbar{}")
| CbarAxesBase |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 14006,
"end": 14518
} | class ____(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.BaseTuple) and isinstance(rhs, types.BaseTuple):
for u, v in zip(lhs, rhs):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key, (u, v), {})
if res is None:
break
else:
return signature(types.boolean, lhs, rhs)
@infer_global(operator.eq)
| TupleCompare |
python | numpy__numpy | numpy/lib/_index_tricks_impl.py | {
"start": 20816,
"end": 22763
} | class ____:
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
shape : ints, or a single tuple of ints
The size of each dimension of the array can be passed as
individual parameters or as the elements of a tuple.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> import numpy as np
Dimensions as individual arguments
>>> for index in np.ndindex(3, 2, 1):
... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
Same dimensions - but in a tuple ``(3, 2, 1)``
>>> for index in np.ndindex((3, 2, 1)):
... print(index)
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
if min(shape, default=0) < 0:
raise ValueError("negative dimensions are not allowed")
self._iter = product(*map(range, shape))
def __iter__(self):
return self
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
return next(self._iter)
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
| ndindex |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 2780,
"end": 3603
} | class ____:
"""False positive tests for 4657"""
__attr_a = None
__attr_b = 'b'
@classmethod
def load_attrs(cls):
"""Load attributes."""
cls.__attr_a = 'a'
@property
def attr_a(self):
"""Get a."""
return self.__attr_a
@property
def attr_b(self):
"""Get b."""
return self.__attr_b
# Test cases where we assign self.attr, but try to
# access cls.attr
def __init__(self):
self.__attr_c = "this is an unused private instance attribute" # [unused-private-member]
@property
def attr_c(self):
"""Get c."""
return cls.__attr_c # [undefined-variable]
# https://github.com/pylint-dev/pylint/issues/4668
# Attributes assigned within __new__() has to be processed as part of the class
| FalsePositive4657 |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 519,
"end": 2033
} | class ____(Exception):
"""Super class of all ray exception types."""
def to_bytes(self):
# Extract exc_info from exception object.
exc_info = (type(self), self, self.__traceback__)
formatted_exception_string = "\n".join(format_exception(*exc_info))
return RayException(
language=PYTHON,
serialized_exception=pickle.dumps(self),
formatted_exception_string=formatted_exception_string,
).SerializeToString()
@staticmethod
def from_bytes(b):
ray_exception = RayException()
ray_exception.ParseFromString(b)
return RayError.from_ray_exception(ray_exception)
@staticmethod
def from_ray_exception(ray_exception):
if ray_exception.language == PYTHON:
try:
return pickle.loads(ray_exception.serialized_exception)
except Exception:
# formatted_exception_string is set in to_bytes() above by calling
# traceback.format_exception() on the original exception. It contains
# the string representation and stack trace of the original error.
original_stacktrace = getattr(
ray_exception,
"formatted_exception_string",
"No formatted exception string available.",
)
return UnserializableException(original_stacktrace)
else:
return CrossLanguageError(ray_exception)
@PublicAPI
| RayError |
python | huggingface__transformers | src/transformers/models/zamba/configuration_zamba.py | {
"start": 819,
"end": 11292
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ZambaModel`]. It is used to instantiate a
Zamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Zamba-v0.1 model.
[Zyphra/Zamba-7B-v1](https://huggingface.co/Zyphra/Zamba-7B-v1)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Zamba model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ZambaModel`]
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
hidden_size (`int`, *optional*, defaults to 3712):
Dimension of the hidden representations.
attention_hidden_size (`int`, *optional*):
Dimension of the hidden representations of the inputs to the Attention layer.
intermediate_size (`int`, *optional*, defaults to 14848):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 76):
Number of hidden layers in the model.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
attention_head_dim (`int`, *optional*):
Dimension of the attention head in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
n_mamba_heads (`int`, *optional*, defaults to 2):
Number of mamba heads for each mamba layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder.
hidden_mamba_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the mamba layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 4096):
This value doesn't have any real effect. The maximum sequence length that this model is intended to be
used with. It can be used with longer sequences, but performance may degrade.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attn_layer_period (`int`, *optional*, defaults to 6):
Once in this many layers, we will have a shared attention layer
attn_layer_offset (`int`, *optional*, defaults to 4):
Offset of the shared attention layer
use_mamba_kernels (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
`causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
`True` and kernels are not available
mamba_d_state (`int`, *optional*, defaults to 16):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj_bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj_bias`.
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
"""
model_type = "zamba"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32000,
tie_word_embeddings=True,
hidden_size=3712,
attention_hidden_size=None,
intermediate_size=14848,
num_hidden_layers=76,
num_attention_heads=16,
attention_head_dim=None,
num_key_value_heads=16,
n_mamba_heads=2,
hidden_act="gelu",
hidden_mamba_act="silu",
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=True,
num_logits_to_keep=1,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
max_position_embeddings=4096,
attention_dropout=0.0,
attn_layer_period=6,
attn_layer_offset=4,
use_mamba_kernels=True,
mamba_d_state=16,
mamba_d_conv=4,
mamba_expand=2,
mamba_dt_rank="auto",
time_step_min=0.001,
time_step_max=0.1,
time_step_floor=1e-4,
mamba_conv_bias=True,
mamba_proj_bias=False,
**kwargs,
):
self.vocab_size = vocab_size
self.tie_word_embeddings = tie_word_embeddings
self.hidden_size = hidden_size
if attention_hidden_size is None:
self.attention_hidden_size = 2 * hidden_size
else:
self.attention_hidden_size = attention_hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if attention_head_dim is None:
self.attention_head_dim = 2 * self.hidden_size // self.num_attention_heads
else:
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.num_key_value_heads = num_key_value_heads
self.n_mamba_heads = n_mamba_heads
self.hidden_act = hidden_act
self.hidden_mamba_act = hidden_mamba_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.attn_layer_period = attn_layer_period
self.attn_layer_offset = attn_layer_offset
self.use_mamba_kernels = use_mamba_kernels
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_floor = time_step_floor
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.layers_block_type = self._layers_block_type(num_hidden_layers, attn_layer_period, attn_layer_offset)
assert (self.mamba_expand * self.hidden_size) % self.n_mamba_heads == 0, (
"`intermediate_size` should be divisible by `n_mamba_heads`."
)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def _layers_block_type(self, num_hidden_layers, attn_layer_period, attn_layer_offset):
layers = [
"mamba",
"mamba",
"hybrid",
] + ["hybrid" if i % attn_layer_period == attn_layer_offset else "mamba" for i in range(num_hidden_layers - 3)]
return layers
__all__ = ["ZambaConfig"]
| ZambaConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 123075,
"end": 123663
} | class ____(sgqlc.types.Enum):
"""The possible states for a workflow.
Enumeration Choices:
* `ACTIVE`: The workflow is active.
* `DELETED`: The workflow was deleted from the git repository.
* `DISABLED_FORK`: The workflow was disabled by default on a fork.
* `DISABLED_INACTIVITY`: The workflow was disabled for inactivity
in the repository.
* `DISABLED_MANUALLY`: The workflow was disabled manually.
"""
__schema__ = github_schema
__choices__ = ("ACTIVE", "DELETED", "DISABLED_FORK", "DISABLED_INACTIVITY", "DISABLED_MANUALLY")
| WorkflowState |
python | huggingface__transformers | tests/models/vitmatte/test_image_processing_vitmatte.py | {
"start": 1374,
"end": 3124
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_rescale=True,
rescale_factor=0.5,
do_pad=True,
size_divisor=10,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.size_divisor = size_divisor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
"size_divisor": self.size_divisor,
}
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| VitMatteImageProcessingTester |
python | pypa__pip | src/pip/_vendor/urllib3/util/retry.py | {
"start": 2775,
"end": 22050
} | class ____(object):
"""Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param int other:
How many times to retry on other errors.
Other errors are errors that are not connect, read, redirect or status errors.
These errors might be raised after the request was sent to the server, so the
request might have side-effects.
Set to ``0`` to fail on the first retry of this type.
If ``total`` is not set, it's a good idea to set this to 0 to account
for unexpected edge cases and avoid infinite retry loops.
:param iterable allowed_methods:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
Set to a ``False`` value to retry on any verb.
.. warning::
Previously this parameter was named ``method_whitelist``, that
usage is deprecated in v1.26.0 and will be removed in v2.0.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``allowed_methods``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
#: Default methods to be used for ``allowed_methods``
DEFAULT_ALLOWED_METHODS = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
#: Default status codes to be used for ``status_forcelist``
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Default headers to be used for ``remove_headers_on_redirect``
DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(
["Cookie", "Authorization", "Proxy-Authorization"]
)
#: Maximum backoff time.
DEFAULT_BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
other=None,
allowed_methods=_Default,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=_Default,
# TODO: Deprecated, remove in v2.0
method_whitelist=_Default,
):
if method_whitelist is not _Default:
if allowed_methods is not _Default:
raise ValueError(
"Using both 'allowed_methods' and "
"'method_whitelist' together is not allowed. "
"Instead only use 'allowed_methods'"
)
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
stacklevel=2,
)
allowed_methods = method_whitelist
if allowed_methods is _Default:
allowed_methods = self.DEFAULT_ALLOWED_METHODS
if remove_headers_on_redirect is _Default:
remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
self.total = total
self.connect = connect
self.read = read
self.status = status
self.other = other
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.allowed_methods = allowed_methods
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
other=self.other,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
# TODO: If already given in **kw we use what's given to us
# If not given we need to figure out what to pass. We decide
# based on whether our class has the 'method_whitelist' property
# and if so we pass the deprecated 'method_whitelist' otherwise
# we use 'allowed_methods'. Remove in v2.0
if "method_whitelist" not in kw and "allowed_methods" not in kw:
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
params["method_whitelist"] = self.allowed_methods
else:
params["allowed_methods"] = self.allowed_methods
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
"""Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
"""Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate_tz(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
if retry_date_tuple[9] is None: # Python 2
# Assume UTC if no timezone was specified
# On Python2.7, parsedate_tz returns None for a timezone offset
# instead of 0 if no timezone is given, where mktime_tz treats
# a None timezone offset as local time.
retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
retry_date = email.utils.mktime_tz(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
"""Get the value of Retry-After in seconds."""
retry_after = response.headers.get("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
"""Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
"""Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
"""Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
"""Checks if a given HTTP method should be retried upon, depending if
it is included in the allowed_methods
"""
# TODO: For now favor if the Retry implementation sets its own method_whitelist
# property outside of our constructor to avoid breaking custom implementations.
if "method_whitelist" in self.__dict__:
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
allowed_methods = self.method_whitelist
else:
allowed_methods = self.allowed_methods
if allowed_methods and method.upper() not in allowed_methods:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
"""Is this method/status code retryable? (Based on allowlists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
"""Are we out of retries?"""
retry_counts = (
self.total,
self.connect,
self.read,
self.redirect,
self.status,
self.other,
)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
"""Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
other = self.other
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif error:
# Other retry?
if other is not None:
other -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and the given method is in the allowed_methods
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
other=other,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
def __getattr__(self, item):
if item == "method_whitelist":
# TODO: Remove this deprecated alias in v2.0
warnings.warn(
"Using 'method_whitelist' with Retry is deprecated and "
"will be removed in v2.0. Use 'allowed_methods' instead",
DeprecationWarning,
)
return self.allowed_methods
try:
return getattr(super(Retry, self), item)
except AttributeError:
return getattr(Retry, item)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| Retry |
python | sympy__sympy | sympy/multipledispatch/conflict.py | {
"start": 39,
"end": 2117
} | class ____(Warning):
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
return len(a) == len(b) and all(map(issubclass, a, b))
def consistent(a, b):
""" It is possible for an argument list to satisfy both A and B """
return (len(a) == len(b) and
all(issubclass(aa, bb) or issubclass(bb, aa)
for aa, bb in zip(a, b)))
def ambiguous(a, b):
""" A is consistent with B but neither is strictly more specific """
return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a))
def ambiguities(signatures):
""" All signature pairs such that A is ambiguous with B """
signatures = list(map(tuple, signatures))
return {(a, b) for a in signatures for b in signatures
if hash(a) < hash(b)
and ambiguous(a, b)
and not any(supercedes(c, a) and supercedes(c, b)
for c in signatures)}
def super_signature(signatures):
""" A signature that would break ambiguities """
n = len(signatures[0])
assert all(len(s) == n for s in signatures)
return [max([type.mro(sig[i]) for sig in signatures], key=len)[0]
for i in range(n)]
def edge(a, b, tie_breaker=hash):
""" A should be checked before B
Tie broken by tie_breaker, defaults to ``hash``
"""
if supercedes(a, b):
if supercedes(b, a):
return tie_breaker(a) > tie_breaker(b)
else:
return True
return False
def ordering(signatures):
""" A sane ordering of signatures to check, first to last
Topoological sort of edges as given by ``edge`` and ``supercedes``
"""
signatures = list(map(tuple, signatures))
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
edges = groupby(lambda x: x[0], edges)
for s in signatures:
if s not in edges:
edges[s] = []
edges = {k: [b for a, b in v] for k, v in edges.items()}
return _toposort(edges)
| AmbiguityWarning |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 2620,
"end": 2878
} | class ____(Enum):
# Move model to DEVICE before passing to the FSDP constructor
DEVICE_BEFORE = auto()
# Move model to DEVICE after passing to the FSDP constructor
DEVICE_AFTER = auto()
# Keep on CPU
DEVICE_NEVER = auto()
| DEVICEInitMode |
python | jazzband__django-waffle | waffle/admin.py | {
"start": 497,
"end": 2028
} | class ____(admin.ModelAdmin):
search_fields = ('name', 'note')
def get_actions(self, request: HttpRequest) -> dict[str, Any]:
actions = super().get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def _add_log_entry(user, model, description, action_flag):
LogEntry.objects.create(
user=user,
content_type=ContentType.objects.get_for_model(type(model)),
object_id=model.id,
object_repr=model.name + " " + description,
action_flag=action_flag
)
@admin.action(
description=_('Enable selected flags for everyone'),
permissions=('change',),
)
def enable_for_all(ma, request, qs):
# Iterate over all objects to cause cache invalidation.
for f in qs.all():
_add_log_entry(request.user, f, "on", CHANGE)
f.everyone = True
f.save()
@admin.action(
description=_('Disable selected flags for everyone'),
permissions=('change',),
)
def disable_for_all(ma, request, qs):
# Iterate over all objects to cause cache invalidation.
for f in qs.all():
_add_log_entry(request.user, f, "off", CHANGE)
f.everyone = False
f.save()
@admin.action(
description=_('Delete selected'),
permissions=('delete',),
)
def delete_individually(ma, request, qs):
# Iterate over all objects to cause cache invalidation.
for f in qs.all():
_add_log_entry(request.user, f, "deleted", DELETION)
f.delete()
| BaseAdmin |
python | walkccc__LeetCode | solutions/1541. Minimum Insertions to Balance a Parentheses String/1541.py | {
"start": 0,
"end": 619
} | class ____:
def minInsertions(self, s: str) -> int:
neededRight = 0 # Increment by 2 for each '('.
missingLeft = 0 # Increment by 1 for each missing '('.
missingRight = 0 # Increment by 1 for each missing ')'.
for c in s:
if c == '(':
if neededRight % 2 == 1:
# e.g. '()(...'
missingRight += 1
neededRight -= 1
neededRight += 2
else: # c == ')'
neededRight -= 1
if neededRight < 0:
# e.g. '()))...'
missingLeft += 1
neededRight += 2
return neededRight + missingLeft + missingRight
| Solution |
python | python-poetry__poetry | src/poetry/utils/env/env_manager.py | {
"start": 1438,
"end": 2500
} | class ____(TOMLFile):
"""
This file contains one section per project with the project's base env name
as section name. Each section contains the minor and patch version of the
python executable used to create the currently active virtualenv.
Example:
[poetry-QRErDmmj]
minor = "3.9"
patch = "3.9.13"
[poetry-core-m5r7DkRA]
minor = "3.11"
patch = "3.11.6"
"""
def remove_section(self, name: str, minor: str | None = None) -> str | None:
"""
Remove a section from the envs file.
If "minor" is given, the section is only removed if its minor value
matches "minor".
Returns the "minor" value of the removed section.
"""
envs = self.read()
current_env = envs.get(name)
if current_env is not None and (not minor or current_env["minor"] == minor):
del envs[name]
self.write(envs)
minor = current_env["minor"]
assert isinstance(minor, str)
return minor
return None
| EnvsFile |
python | scipy__scipy | scipy/linalg/tests/test_solvers.py | {
"start": 13749,
"end": 23491
} | class ____:
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
# [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
None),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
None),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13,
14, 13, 13, 14, 12, 2, 4, 6, 10)
max_tol = [1.5 * 10**-ind for ind in min_decimal]
# relaxed tolerance in gh-18012 after bump to OpenBLAS
max_tol[11] = 2.5e-13
# relaxed tolerance in gh-20335 for linux-aarch64 build on Cirrus
# with OpenBLAS from ubuntu jammy
max_tol[15] = 2.0e-2
# relaxed tolerance in gh-20335 for OpenBLAS 3.20 on ubuntu jammy
# bump not needed for OpenBLAS 3.26
max_tol[16] = 2.0e-4
@pytest.mark.parametrize("j, case", enumerate(cases))
def test_solve_discrete_are(self, j, case):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
atol = self.max_tol[j]
x = solve_discrete_are(a, b, q, r)
bH = b.conj().T
xa, xb = x @ a, x @ b
res = a.conj().T @ xa - x + q
res -= a.conj().T @ xb @ (solve(r + bH @ xb, bH) @ xa)
# changed from
# assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
# in gh-18012 as it's easier to relax a tolerance and allclose is
# preferred
assert_allclose(res, np.zeros_like(res), atol=atol)
def test_infeasible(self):
# An infeasible example taken from https://arxiv.org/abs/1505.04861v1
A = np.triu(np.ones((3, 3)))
A[0, 1] = -1
B = np.array([[1, 1, 0], [0, 0, 1]]).T
Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
R = np.diag([-10, 0.1])
assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
| TestSolveDiscreteAre |
python | joke2k__faker | faker/providers/address/id_ID/__init__.py | {
"start": 45,
"end": 11521
} | class ____(AddressProvider):
building_number_formats = ("###", "##", "#")
city_formats = ("{{city_name}}",)
postcode_formats = ("#####",)
street_name_formats = (
"{{street_prefix_short}} {{street}}",
"{{street_prefix_long}} {{street}}",
)
street_address_formats = ("{{street_name}} No. {{building_number}}",)
address_formats = (
"{{street_address}}\n{{city}}, {{state}} {{postcode}}",
"{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}",
)
# From
# http://elibrary.dephub.go.id/elibrary/media/catalog/0010-021500000000135/swf/618/Lampiran%20E%20Data%20Bandung.pdf
# https://www.surabaya.go.id/id/info-penting/47601/daftar-nama-jalan-dan-status-ja
# https://www.streetdirectory.com/indonesia/jakarta/asia_travel/street/popular/
streets = (
"Abdul Muis",
"Antapani Lama",
"Asia Afrika",
"Astana Anyar",
"BKR",
"Cihampelas",
"Cikapayang",
"Cikutra Barat",
"Cikutra Timur",
"Ciumbuleuit",
"Ciwastra",
"Dipatiukur",
"Dipenogoro",
"Dr. Djunjunan",
"Gardujati",
"Gedebage Selatan",
"Gegerkalong Hilir",
"HOS. Cokroaminoto",
"Ir. H. Djuanda",
"Jakarta",
"Jamika",
"Jend. A. Yani",
"Jend. Sudirman",
"K.H. Wahid Hasyim",
"Kebonjati",
"Kiaracondong",
"Laswi",
"Lembong",
"Merdeka",
"Moch. Ramdan",
"Moch. Toha",
"Pacuan Kuda",
"Pasir Koja",
"Pasirkoja",
"Pasteur",
"Pelajar Pejuang",
"Peta",
"PHH. Mustofa",
"Rajawali Barat",
"Rajawali Timur",
"Raya Setiabudhi",
"Raya Ujungberung",
"Rumah Sakit",
"Sadang Serang",
"Sentot Alibasa",
"Setiabudhi",
"Siliwangi",
"Soekarno Hatta",
"Sukabumi",
"Sukajadi",
"Suniaraja",
"Surapati",
"Tubagus Ismail",
"Veteran",
"W.R. Supratman",
"Bangka Raya",
"Cempaka",
"Cihampelas",
"Erlangga",
"Rawamangun",
"Waringin",
"Ronggowarsito",
"Rajiman",
"Yos Sudarso",
"S. Parman",
"Monginsidi",
"M.T Haryono",
"Ahmad Dahlan",
"Jayawijaya",
"R.E Martadinata",
"M.H Thamrin",
"Stasiun Wonokromo",
"Ahmad Yani",
"Joyoboyo",
"Indragiri",
"Kutai",
"Kutisari Selatan",
"Rungkut Industri",
"Kendalsari",
"Wonoayu",
"Medokan Ayu",
"KH Amin Jasuta",
"H.J Maemunah",
"Suryakencana",
"Kapten Muslihat",
"Otto Iskandardinata",
"Tebet Barat Dalam",
)
street_prefixes_long = (
"Jalan",
"Gang",
)
street_prefixes_short = (
"Jl.",
"Gg.",
)
# From
# https://id.wikipedia.org/wiki/Daftar_kabupaten_dan_kota_di_Indonesia#Daftar_kota
cities = (
"Ambon",
"Balikpapan",
"Banda Aceh",
"Bandar Lampung",
"Bandung",
"Banjar",
"Banjarbaru",
"Banjarmasin",
"Batam",
"Batu",
"Bau-Bau",
"Bekasi",
"Bengkulu",
"Bima",
"Binjai",
"Bitung",
"Blitar",
"Bogor",
"Bontang",
"Bukittinggi",
"Cilegon",
"Cimahi",
"Cirebon",
"Denpasar",
"Depok",
"Dumai",
"Gorontalo",
"Jambi",
"Jayapura",
"Kediri",
"Kendari",
"Kota Administrasi Jakarta Barat",
"Kota Administrasi Jakarta Pusat",
"Kota Administrasi Jakarta Selatan",
"Kota Administrasi Jakarta Timur",
"Kota Administrasi Jakarta Utara",
"Kotamobagu",
"Kupang",
"Langsa",
"Lhokseumawe",
"Lubuklinggau",
"Madiun",
"Magelang",
"Makassar",
"Malang",
"Manado",
"Mataram",
"Medan",
"Metro",
"Meulaboh",
"Mojokerto",
"Padang",
"Padang Sidempuan",
"Padangpanjang",
"Pagaralam",
"Palangkaraya",
"Palembang",
"Palopo",
"Palu",
"Pangkalpinang",
"Parepare",
"Pariaman",
"Pasuruan",
"Payakumbuh",
"Pekalongan",
"Pekanbaru",
"Pematangsiantar",
"Pontianak",
"Prabumulih",
"Probolinggo",
"Purwokerto",
"Sabang",
"Salatiga",
"Samarinda",
"Sawahlunto",
"Semarang",
"Serang",
"Sibolga",
"Singkawang",
"Solok",
"Sorong",
"Subulussalam",
"Sukabumi",
"Sungai Penuh",
"Surabaya",
"Surakarta",
"Tangerang",
"Tangerang Selatan",
"Tanjungbalai",
"Tanjungpinang",
"Tarakan",
"Tasikmalaya",
"Tebingtinggi",
"Tegal",
"Ternate",
"Tidore Kepulauan",
"Tomohon",
"Tual",
"Yogyakarta",
)
# From https://id.wikipedia.org/wiki/Daftar_provinsi_di_Indonesia
states = (
"Aceh",
"Bali",
"Banten",
"Bengkulu",
"DI Yogyakarta",
"DKI Jakarta",
"Gorontalo",
"Jambi",
"Jawa Barat",
"Jawa Tengah",
"Jawa Timur",
"Kalimantan Barat",
"Kalimantan Selatan",
"Kalimantan Tengah",
"Kalimantan Timur",
"Kalimantan Utara",
"Kepulauan Bangka Belitung",
"Kepulauan Riau",
"Lampung",
"Maluku",
"Maluku Utara",
"Nusa Tenggara Barat",
"Nusa Tenggara Timur",
"Papua",
"Papua Barat",
"Riau",
"Sulawesi Barat",
"Sulawesi Selatan",
"Sulawesi Tengah",
"Sulawesi Tenggara",
"Sulawesi Utara",
"Sumatera Barat",
"Sumatera Selatan",
"Sumatera Utara",
)
# https://id.wikipedia.org/wiki/Daftar_provinsi_di_Indonesia
states_abbr = (
"AC",
"BA",
"BT",
"BE",
"YO",
"JK",
"GO",
"JA",
"JB",
"JT",
"JI",
"KB",
"KS",
"KT",
"KI",
"KU",
"BB",
"KR",
"LA",
"MA",
"MU",
"NB",
"NT",
"PA",
"PB",
"RI",
"SR",
"SN",
"ST",
"SG",
"SU",
"SB",
"SS",
"SU",
)
# From https://id.wikipedia.org/wiki/Daftar_negara-negara_di_dunia
countries = (
"Afganistan",
"Afrika Selatan",
"Afrika Tengah",
"Albania",
"Aljazair",
"Amerika Serikat",
"Andorra",
"Angola",
"Antigua dan Barbuda",
"Arab Saudi",
"Argentina",
"Armenia",
"Australia",
"Austria",
"Azerbaijan",
"Bahama",
"Bahrain",
"Bangladesh",
"Barbados",
"Belanda",
"Belarus",
"Belgia",
"Belize",
"Benin",
"Bhutan",
"Bolivia",
"Bosnia dan Herzegovina",
"Botswana",
"Brasil",
"Britania Raya",
"Brunei",
"Bulgaria",
"Burkina Faso",
"Burundi",
"Ceko",
"Chad",
"Chili",
"Denmark",
"Djibouti",
"Dominika",
"Ekuador",
"El Salvador",
"Eritrea",
"Estonia",
"Ethiopia",
"Federasi Mikronesia",
"Fiji",
"Filipina",
"Finlandia",
"Gabon",
"Gambia",
"Georgia",
"Ghana",
"Grenada",
"Guatemala",
"Guinea",
"Guinea Khatulistiwa",
"Guinea-Bissau",
"Guyana",
"Haiti",
"Honduras",
"Hongaria",
"India",
"Indonesia",
"Irak",
"Iran",
"Islandia",
"Israel",
"Italia",
"Jamaika",
"Jepang",
"Jerman",
"Kamboja",
"Kamerun",
"Kanada",
"Kazakhstan",
"Kenya",
"Kepulauan Marshall",
"Kepulauan Solomon",
"Kirgizstan",
"Kiribati",
"Kolombia",
"Komoro",
"Korea Selatan",
"Korea Utara",
"Kosta Rika",
"Kroasia",
"Kuba",
"Kuwait",
"Laos",
"Latvia",
"Lebanon",
"Lesotho",
"Liberia",
"Libya",
"Liechtenstein",
"Lituania",
"Luksemburg",
"Madagaskar",
"Makedonia Utara",
"Maladewa",
"Malawi",
"Malaysia",
"Mali",
"Malta",
"Maroko",
"Mauritania",
"Mauritius",
"Meksiko",
"Mesir",
"Moldova",
"Monako",
"Mongolia",
"Montenegro",
"Mozambik",
"Myanmar",
"Namibia",
"Nauru",
"Nepal",
"Niger",
"Nigeria",
"Nikaragua",
"Norwegia",
"Oman",
"Pakistan",
"Palau",
"Panama",
"Pantai Gading",
"Papua Nugini",
"Paraguay",
"Perancis",
"Peru",
"Polandia",
"Portugal",
"Qatar",
"Republik Demokratik Kongo",
"Republik Dominika",
"Republik Irlandia",
"Republik Kongo",
"Republik Rakyat Tiongkok",
"Rumania",
"Rusia",
"Rwanda",
"Saint Kitts dan Nevis",
"Saint Lucia",
"Saint Vincent dan Grenadine",
"Samoa",
"San Marino",
"São Tomé dan Príncipe",
"Selandia Baru",
"Senegal",
"Serbia",
"Seychelles",
"Sierra Leone",
"Singapura",
"Siprus",
"Slovenia",
"Slowakia",
"Somalia",
"Spanyol",
"Sri Lanka",
"Sudan",
"Sudan Selatan",
"Suriah",
"Suriname",
"Swaziland",
"Swedia",
"Swiss",
"Tajikistan",
"Tanjung Verde",
"Tanzania",
"Thailand",
"Timor Leste",
"Togo",
"Tonga",
"Trinidad dan Tobago",
"Tunisia",
"Turki",
"Turkmenistan",
"Tuvalu",
"Uganda",
"Ukraina",
"Uni Emirat Arab",
"Uruguay",
"Uzbekistan",
"Vanuatu",
"Vatikan",
"Venezuela",
"Vietnam",
"Yaman",
"Yordania",
"Yunani",
"Zambia",
"Zimbabwe",
)
def street(self) -> str:
return self.random_element(self.streets)
def street_prefix_short(self) -> str:
return self.random_element(self.street_prefixes_short)
def street_prefix_long(self) -> str:
return self.random_element(self.street_prefixes_long)
def city_name(self) -> str:
return self.random_element(self.cities)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit
def state_abbr(self) -> str:
return self.random_element(self.states_abbr)
def country(self) -> str:
return self.random_element(self.countries)
| Provider |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-bst-in-binary-tree.py | {
"start": 1301,
"end": 2013
} | class ____(object):
def maxSumBST(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def dfs(node, result):
if not node:
return True, 0, float("inf"), float("-inf")
lvalid, lsum, lmin, lmax = dfs(node.left, result)
rvalid, rsum, rmin, rmax = dfs(node.right, result)
if lvalid and rvalid and lmax < node.val < rmin:
total = lsum + node.val + rsum
result[0] = max(result[0], total)
return True, total, min(lmin, node.val), max(node.val, rmax)
return False, 0, 0, 0
result = [0]
dfs(root, result)
return result[0]
| Solution2 |
python | tiangolo__fastapi | tests/test_pydantic_v1_v2_multifile/modelsv2b.py | {
"start": 277,
"end": 325
} | class ____(BaseModel):
dup_name2: str
| ItemInList |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 414,
"end": 1565
} | class ____(BaseForm):
is_company = forms.CharField(label="company", required=False, widget=forms.CheckboxInput())
email = forms.EmailField(
label="email", max_length=30, required=True, widget=forms.TextInput(), help_text="Insert your email"
)
password1 = forms.CharField(label="password", max_length=30, required=True, widget=forms.PasswordInput())
password2 = forms.CharField(label="re-enter password", max_length=30, required=True, widget=forms.PasswordInput())
first_name = forms.CharField(label="first name", max_length=5, required=True, widget=forms.TextInput())
last_name = forms.CharField(label="last name", max_length=5, required=True, widget=forms.TextInput())
datetime_field = forms.SplitDateTimeField(label="date time", widget=forms.SplitDateTimeWidget())
def clean(self):
super().clean()
password1 = self.cleaned_data.get("password1", None)
password2 = self.cleaned_data.get("password2", None)
if not password1 and not password2 or password1 != password2:
raise forms.ValidationError("Passwords dont match")
return self.cleaned_data
| SampleForm |
python | pytorch__pytorch | test/dynamo/cpython/3_13/seq_tests.py | {
"start": 3668,
"end": 17280
} | class ____(__TestCase):
# The type to be tested
type2test = None
def test_constructors(self):
l0 = []
l1 = [0]
l2 = [0, 1]
u = self.type2test()
u0 = self.type2test(l0)
u1 = self.type2test(l1)
u2 = self.type2test(l2)
uu = self.type2test(u)
uu0 = self.type2test(u0)
uu1 = self.type2test(u1)
uu2 = self.type2test(u2)
v = self.type2test(tuple(u))
with torch._dynamo.error_on_graph_break(False):
class OtherSeq:
def __init__(self, initseq):
self.__data = initseq
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
s = OtherSeq(u0)
v0 = self.type2test(s)
self.assertEqual(len(v0), len(s))
s = "this is also a sequence"
vv = self.type2test(s)
self.assertEqual(len(vv), len(s))
# Create from various iteratables
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (Sequence, IterFunc, IterGen,
itermulti, iterfunc):
self.assertEqual(self.type2test(g(s)), self.type2test(s))
self.assertEqual(self.type2test(IterFuncStop(s)), self.type2test())
self.assertEqual(self.type2test(c for c in "123"), self.type2test("123"))
self.assertRaises(TypeError, self.type2test, IterNextOnly(s))
self.assertRaises(TypeError, self.type2test, IterNoNext(s))
self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s))
# Issue #23757
self.assertEqual(self.type2test(LyingTuple((2,))), self.type2test((1,)))
self.assertEqual(self.type2test(LyingList([2])), self.type2test([1]))
with self.assertRaises(TypeError):
self.type2test(unsupported_arg=[])
def test_truth(self):
self.assertFalse(self.type2test())
self.assertTrue(self.type2test([42]))
def test_getitem(self):
u = self.type2test([0, 1, 2, 3, 4])
for i in range(len(u)):
self.assertEqual(u[i], i)
self.assertEqual(u[int(i)], i)
for i in range(-len(u), -1):
self.assertEqual(u[i], len(u)+i)
self.assertEqual(u[int(i)], len(u)+i)
self.assertRaises(IndexError, u.__getitem__, -len(u)-1)
self.assertRaises(IndexError, u.__getitem__, len(u))
self.assertRaises(ValueError, u.__getitem__, slice(0,10,0))
u = self.type2test()
self.assertRaises(IndexError, u.__getitem__, 0)
self.assertRaises(IndexError, u.__getitem__, -1)
self.assertRaises(TypeError, u.__getitem__)
a = self.type2test([10, 11])
self.assertEqual(a[0], 10)
self.assertEqual(a[1], 11)
self.assertEqual(a[-2], 10)
self.assertEqual(a[-1], 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
def test_getslice(self):
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
self.assertEqual(u[0:0], self.type2test())
self.assertEqual(u[1:2], self.type2test([1]))
self.assertEqual(u[-2:-1], self.type2test([3]))
self.assertEqual(u[-1000:1000], u)
self.assertEqual(u[1000:-1000], self.type2test([]))
self.assertEqual(u[:], u)
self.assertEqual(u[1:None], self.type2test([1, 2, 3, 4]))
self.assertEqual(u[None:3], self.type2test([0, 1, 2]))
# Extended slices
self.assertEqual(u[::], u)
self.assertEqual(u[::2], self.type2test([0, 2, 4]))
self.assertEqual(u[1::2], self.type2test([1, 3]))
self.assertEqual(u[::-1], self.type2test([4, 3, 2, 1, 0]))
self.assertEqual(u[::-2], self.type2test([4, 2, 0]))
self.assertEqual(u[3::-2], self.type2test([3, 1]))
self.assertEqual(u[3:3:-2], self.type2test([]))
self.assertEqual(u[3:2:-2], self.type2test([3]))
self.assertEqual(u[3:1:-2], self.type2test([3]))
self.assertEqual(u[3:0:-2], self.type2test([3, 1]))
self.assertEqual(u[::-100], self.type2test([4]))
self.assertEqual(u[100:-100:], self.type2test([]))
self.assertEqual(u[-100:100:], u)
self.assertEqual(u[100:-100:-1], u[::-1])
self.assertEqual(u[-100:100:-1], self.type2test([]))
self.assertEqual(u[-100:100:2], self.type2test([0, 2, 4]))
# Test extreme cases with long ints
a = self.type2test([0,1,2,3,4])
self.assertEqual(a[ -pow(2,128): 3 ], self.type2test([0,1,2]))
self.assertEqual(a[ 3: pow(2,145) ], self.type2test([3,4]))
self.assertEqual(a[3::sys.maxsize], self.type2test([3]))
def test_contains(self):
u = self.type2test([0, 1, 2])
for i in u:
self.assertIn(i, u)
for i in min(u)-1, max(u)+1:
self.assertNotIn(i, u)
self.assertRaises(TypeError, u.__contains__)
def test_contains_fake(self):
# Sequences must use rich comparison against each item
# (unless "is" is true, or an earlier item answered)
# So ALWAYS_EQ must be found in all non-empty sequences.
self.assertNotIn(ALWAYS_EQ, self.type2test([]))
self.assertIn(ALWAYS_EQ, self.type2test([1]))
self.assertIn(1, self.type2test([ALWAYS_EQ]))
self.assertNotIn(NEVER_EQ, self.type2test([]))
self.assertNotIn(ALWAYS_EQ, self.type2test([NEVER_EQ]))
self.assertIn(NEVER_EQ, self.type2test([ALWAYS_EQ]))
def test_contains_order(self):
# Sequences must test in-order. If a rich comparison has side
# effects, these will be visible to tests against later members.
# In this test, the "side effect" is a short-circuiting raise.
with torch._dynamo.error_on_graph_break(False):
class DoNotTestEq(Exception):
pass
class StopCompares:
def __eq__(self, other):
raise DoNotTestEq
checkfirst = self.type2test([1, StopCompares()])
self.assertIn(1, checkfirst)
checklast = self.type2test([StopCompares(), 1])
self.assertRaises(DoNotTestEq, checklast.__contains__, 1)
def test_len(self):
self.assertEqual(len(self.type2test()), 0)
self.assertEqual(len(self.type2test([])), 0)
self.assertEqual(len(self.type2test([0])), 1)
self.assertEqual(len(self.type2test([0, 1, 2])), 3)
def test_minmax(self):
u = self.type2test([0, 1, 2])
self.assertEqual(min(u), 0)
self.assertEqual(max(u), 2)
def test_addmul(self):
u1 = self.type2test([0])
u2 = self.type2test([0, 1])
self.assertEqual(u1, u1 + self.type2test())
self.assertEqual(u1, self.type2test() + u1)
self.assertEqual(u1 + self.type2test([1]), u2)
self.assertEqual(self.type2test([-1]) + u1, self.type2test([-1, 0]))
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2+u2, u2*3)
self.assertEqual(u2+u2+u2, 3*u2)
with torch._dynamo.error_on_graph_break(False):
class subclass(self.type2test):
pass
u3 = subclass([0, 1])
self.assertEqual(u3, u3*1)
self.assertIsNot(u3, u3*1)
def test_iadd(self):
u = self.type2test([0, 1])
u += self.type2test()
self.assertEqual(u, self.type2test([0, 1]))
u += self.type2test([2, 3])
self.assertEqual(u, self.type2test([0, 1, 2, 3]))
u += self.type2test([4, 5])
self.assertEqual(u, self.type2test([0, 1, 2, 3, 4, 5]))
u = self.type2test("spam")
u += self.type2test("eggs")
self.assertEqual(u, self.type2test("spameggs"))
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides are not recognized by __iter__
with torch._dynamo.error_on_graph_break(False):
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(next(iter(T((1,2)))), 1)
def test_repeat(self):
for m in range(4):
s = tuple(range(m))
for n in range(-3, 5):
self.assertEqual(self.type2test(s*n), self.type2test(s)*n)
self.assertEqual(self.type2test(s)*(-4), self.type2test([]))
self.assertEqual(id(s), id(s*1))
def test_bigrepeat(self):
if sys.maxsize <= 2147483647:
x = self.type2test([0])
x *= 2**16
self.assertRaises(MemoryError, x.__mul__, 2**16)
if hasattr(x, '__imul__'):
self.assertRaises(MemoryError, x.__imul__, 2**16)
def test_subscript(self):
a = self.type2test([10, 11])
self.assertEqual(a.__getitem__(0), 10)
self.assertEqual(a.__getitem__(1), 11)
self.assertEqual(a.__getitem__(-2), 10)
self.assertEqual(a.__getitem__(-1), 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
self.assertEqual(a.__getitem__(slice(0,1)), self.type2test([10]))
self.assertEqual(a.__getitem__(slice(1,2)), self.type2test([11]))
self.assertEqual(a.__getitem__(slice(0,2)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(0,3)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(3,5)), self.type2test([]))
self.assertRaises(ValueError, a.__getitem__, slice(0, 10, 0))
self.assertRaises(TypeError, a.__getitem__, 'x')
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertEqual(a.count(ALWAYS_EQ), 9)
self.assertEqual(self.type2test([ALWAYS_EQ, ALWAYS_EQ]).count(1), 2)
self.assertEqual(self.type2test([ALWAYS_EQ, ALWAYS_EQ]).count(NEVER_EQ), 2)
self.assertEqual(self.type2test([NEVER_EQ, NEVER_EQ]).count(ALWAYS_EQ), 0)
self.assertRaises(TypeError, a.count)
with torch._dynamo.error_on_graph_break(False):
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertEqual(u.index(ALWAYS_EQ), 0)
self.assertEqual(self.type2test([ALWAYS_EQ, ALWAYS_EQ]).index(1), 0)
self.assertEqual(self.type2test([ALWAYS_EQ, ALWAYS_EQ]).index(NEVER_EQ), 0)
self.assertRaises(ValueError, self.type2test([NEVER_EQ, NEVER_EQ]).index, ALWAYS_EQ)
self.assertRaises(TypeError, u.index)
with torch._dynamo.error_on_graph_break(False):
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
def test_pickle(self):
lst = self.type2test([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
lst2 = pickle.loads(pickle.dumps(lst, proto))
self.assertEqual(lst2, lst)
self.assertNotEqual(id(lst2), id(lst))
@support.suppress_immortalization()
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.type2test)
support.check_free_after_iterating(self, reversed, self.type2test)
| CommonTest |
python | pypa__setuptools | setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py | {
"start": 1480,
"end": 1612
} | class ____(JsonSchemaException):
"""
Exception raised by generator of validation function.
"""
| JsonSchemaDefinitionException |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 36373,
"end": 39454
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(6473281180)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=self.rng), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=self.rng), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=self.rng), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=self.rng)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1464878613)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
| TestGenInvGauss |
python | pypa__warehouse | tests/unit/test_sanity.py | {
"start": 228,
"end": 838
} | class ____:
def test_valid(self):
request = Request({"QUERY_STRING": ":action=browse", "PATH_INFO": "/pypi"})
sanity.junk_encoding(request)
def test_invalid_qsl(self):
request = Request({"QUERY_STRING": "%Aaction=browse"})
with pytest.raises(HTTPBadRequest, match="Invalid bytes in query string."):
sanity.junk_encoding(request)
def test_invalid_path(self):
request = Request({"PATH_INFO": "/projects/abouÅt"})
with pytest.raises(HTTPBadRequest, match="Invalid bytes in URL."):
sanity.junk_encoding(request)
| TestJunkEncoding |
python | realpython__materials | gemini-cli/todolist/src/todolist/cli.py | {
"start": 313,
"end": 3133
} | class ____:
add: TaskCallback
remove: TaskCallback
done: TaskCallback
undo: TaskCallback
rename: Callable[[str, str, str], None]
show: ListCallback
clear: ListCallback
lists: PlainCallback
export: PlainCallback
@property
def task_callbacks(self) -> tuple[TaskCallback, ...]:
return (
self.add,
self.remove,
self.done,
self.undo,
)
@property
def list_callbacks(self) -> tuple[ListCallback, ...]:
return (
self.show,
self.clear,
)
@property
def plain_callbacks(self) -> tuple[PlainCallback, ...]:
return (
self.lists,
self.export,
)
def process_cli(callbacks: Callbacks) -> None:
parser = build_parser(callbacks)
args = parser.parse_args()
if args.command:
args.callback(
**{
name: getattr(args, name)
for name in signature(args.callback).parameters
if hasattr(args, name)
}
)
else:
parser.print_help()
def build_parser(callbacks: Callbacks) -> ArgumentParser:
parser = ArgumentParser(description="A command-line task manager")
subparsers = parser.add_subparsers(title="commands", dest="command")
for cb in callbacks.task_callbacks:
subparser = subparsers.add_parser(cb.__name__, help=cb.__doc__)
subparser.set_defaults(callback=cb)
add_tasks_positional(subparser)
add_list_option(subparser)
# Rename
subparser = subparsers.add_parser("rename", help=callbacks.rename.__doc__)
subparser.add_argument("old", type=normalize, help="original task name")
subparser.add_argument("new", type=normalize, help="new task name")
subparser.set_defaults(callback=callbacks.rename)
add_list_option(subparser)
for cb in callbacks.list_callbacks:
subparser = subparsers.add_parser(cb.__name__, help=cb.__doc__)
subparser.set_defaults(callback=cb)
add_list_option(subparser)
for cb in callbacks.plain_callbacks:
subparser = subparsers.add_parser(cb.__name__, help=cb.__doc__)
subparser.set_defaults(callback=cb)
return parser
def add_tasks_positional(parser: ArgumentParser) -> None:
parser.add_argument(
"tasks",
nargs="+",
type=normalize,
help="one or more tasks (e.g., 'eggs', 'bacon')",
)
def add_list_option(parser: ArgumentParser) -> None:
parser.add_argument(
"-l",
"--list",
dest="list_name",
metavar="name",
help="optional name of the task list (e.g., 'shopping')",
default="default",
type=normalize,
)
def normalize(name: str) -> str:
return name.strip().title()
| Callbacks |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 526,
"end": 677
} | class ____(GQLResult):
typename__: Typename[Literal["ArtifactSequence"]] = "ArtifactSequence"
id: GQLId
name: str
| ArtifactSequenceScopeFields |
python | pandas-dev__pandas | pandas/core/methods/describe.py | {
"start": 3347,
"end": 10528
} | class ____(NDFrameDescriberAbstract):
"""Class responsible for creating dataobj description.
Parameters
----------
obj : DataFrame
DataFrame to be described.
include : 'all', list-like of dtypes or None
A white list of data types to include in the result.
exclude : list-like of dtypes or None
A black list of data types to omit from the result.
"""
obj: DataFrame
def __init__(
self,
obj: DataFrame,
*,
include: str | Sequence[str] | None,
exclude: str | Sequence[str] | None,
) -> None:
self.include = include
self.exclude = exclude
if obj.ndim == 2 and obj.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
super().__init__(obj)
def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame:
data = self._select_data()
ldesc: list[Series] = []
for _, series in data.items():
describe_func = select_describe_func(series)
ldesc.append(describe_func(series, percentiles))
col_names = reorder_columns(ldesc)
d = concat(
[x.reindex(col_names) for x in ldesc],
axis=1,
ignore_index=True,
sort=False,
)
d.columns = data.columns.copy()
return d
def _select_data(self) -> DataFrame:
"""Select columns to be described."""
if (self.include is None) and (self.exclude is None):
# when some numerics are found, keep only numerics
default_include: list[npt.DTypeLike] = [np.number, "datetime"]
data = self.obj.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self.obj
elif self.include == "all":
if self.exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self.obj
else:
data = self.obj.select_dtypes(
include=self.include,
exclude=self.exclude,
)
if len(data.columns) == 0:
msg = "No columns match the specified include or exclude data types"
raise ValueError(msg)
return data
def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]:
"""Set a convenient order for rows for display."""
names: list[Hashable] = []
seen_names: set[Hashable] = set()
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in seen_names:
seen_names.add(name)
names.append(name)
return names
def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:
"""Describe series containing numerical data.
Parameters
----------
series : Series
Series to be described.
percentiles : list-like of numbers
The percentiles to include in the output.
"""
from pandas import Series
formatted_percentiles = format_percentiles(percentiles)
if len(percentiles) == 0:
quantiles = []
else:
quantiles = series.quantile(percentiles).tolist()
stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ quantiles
+ [series.max()]
)
# GH#48340 - always return float on non-complex numeric data
dtype: DtypeObj | None
if isinstance(series.dtype, ExtensionDtype):
if isinstance(series.dtype, ArrowDtype):
if series.dtype.kind == "m":
# GH53001: describe timedeltas with object dtype
dtype = None
else:
import pyarrow as pa
dtype = ArrowDtype(pa.float64())
else:
dtype = Float64Dtype()
elif series.dtype.kind in "iufb":
# i.e. numeric but exclude complex dtype
dtype = np.dtype("float")
else:
dtype = None
return Series(d, index=stat_index, name=series.name, dtype=dtype)
def describe_categorical_1d(
data: Series,
percentiles_ignored: Sequence[float],
) -> Series:
"""Describe series containing categorical data.
Parameters
----------
data : Series
Series to be described.
percentiles_ignored : list-like of numbers
Ignored, but in place to unify interface.
"""
names = ["count", "unique", "top", "freq"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
if count_unique > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
dtype = None
else:
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
top, freq = np.nan, np.nan
dtype = "object"
result = [data.count(), count_unique, top, freq]
from pandas import Series
return Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
"""Describe series containing datetime64 dtype.
Parameters
----------
data : Series
Series to be described.
percentiles : list-like of numbers
The percentiles to include in the output.
"""
# GH-30164
from pandas import Series
formatted_percentiles = format_percentiles(percentiles)
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return Series(d, index=stat_index, name=data.name)
def select_describe_func(
data: Series,
) -> Callable:
"""Select proper function for describing series based on data type.
Parameters
----------
data : Series
Series to be described.
"""
if is_bool_dtype(data.dtype):
return describe_categorical_1d
elif is_numeric_dtype(data):
return describe_numeric_1d
elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype):
return describe_timestamp_1d
elif data.dtype.kind == "m":
return describe_numeric_1d
else:
return describe_categorical_1d
def _refine_percentiles(
percentiles: Sequence[float] | np.ndarray | None,
) -> npt.NDArray[np.float64]:
"""
Ensure that percentiles are unique and sorted.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output.
"""
if percentiles is None:
return np.array([0.25, 0.5, 0.75])
percentiles = np.asarray(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
assert percentiles is not None
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
return unique_pcts
| DataFrameDescriber |
python | apache__airflow | airflow-core/tests/unit/always/test_providers_manager.py | {
"start": 1787,
"end": 19496
} | class ____:
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog, cleanup_providers_manager):
self._caplog = caplog
def test_providers_are_loaded(self):
with self._caplog.at_level(logging.WARNING):
self._caplog.clear()
provider_manager = ProvidersManager()
provider_list = list(provider_manager.providers.keys())
# No need to sort the list - it should be sorted alphabetically !
for provider in provider_list:
package_name = provider_manager.providers[provider].data["package-name"]
version = provider_manager.providers[provider].version
assert re.search(r"[0-9]*\.[0-9]*\.[0-9]*.*", version)
assert package_name == provider
# just a coherence check - no exact number as otherwise we would have to update
# several tests if we add new connections/provider which is not ideal
assert len(provider_list) > 65
assert self._caplog.records == []
def test_hooks_deprecation_warnings_generated(self):
providers_manager = ProvidersManager()
providers_manager._provider_dict["test-package"] = ProviderInfo(
version="0.0.1",
data={"hook-class-names": ["airflow.providers.sftp.hooks.sftp.SFTPHook"]},
)
with pytest.warns(expected_warning=DeprecationWarning, match="hook-class-names") as warning_records:
providers_manager._discover_hooks()
assert warning_records
def test_hooks_deprecation_warnings_not_generated(self):
with warnings.catch_warnings(record=True) as warning_records:
providers_manager = ProvidersManager()
providers_manager._provider_dict["apache-airflow-providers-sftp"] = ProviderInfo(
version="0.0.1",
data={
"hook-class-names": ["airflow.providers.sftp.hooks.sftp.SFTPHook"],
"connection-types": [
{
"hook-class-name": "airflow.providers.sftp.hooks.sftp.SFTPHook",
"connection-type": "sftp",
}
],
},
)
providers_manager._discover_hooks()
assert [w.message for w in warning_records if "hook-class-names" in str(w.message)] == []
def test_warning_logs_generated(self):
providers_manager = ProvidersManager()
providers_manager._hooks_lazy_dict = LazyDictWithCache()
with self._caplog.at_level(logging.WARNING):
providers_manager._provider_dict["apache-airflow-providers-sftp"] = ProviderInfo(
version="0.0.1",
data={
"hook-class-names": ["airflow.providers.sftp.hooks.sftp.SFTPHook"],
"connection-types": [
{
"hook-class-name": "airflow.providers.sftp.hooks.sftp.SFTPHook",
"connection-type": "wrong-connection-type",
}
],
},
)
providers_manager._discover_hooks()
_ = providers_manager._hooks_lazy_dict["wrong-connection-type"]
assert len(self._caplog.entries) == 1
assert "Inconsistency!" in self._caplog[0]["event"]
assert "sftp" not in providers_manager.hooks
def test_warning_logs_not_generated(self):
with self._caplog.at_level(logging.WARNING):
providers_manager = ProvidersManager()
providers_manager._provider_dict["apache-airflow-providers-sftp"] = ProviderInfo(
version="0.0.1",
data={
"hook-class-names": ["airflow.providers.sftp.hooks.sftp.SFTPHook"],
"connection-types": [
{
"hook-class-name": "airflow.providers.sftp.hooks.sftp.SFTPHook",
"connection-type": "sftp",
}
],
},
)
providers_manager._discover_hooks()
_ = providers_manager._hooks_lazy_dict["sftp"]
assert not self._caplog.records
assert "sftp" in providers_manager.hooks
def test_already_registered_conn_type_in_provide(self):
with self._caplog.at_level(logging.WARNING):
providers_manager = ProvidersManager()
providers_manager._provider_dict["apache-airflow-providers-dummy"] = ProviderInfo(
version="0.0.1",
data={
"connection-types": [
{
"hook-class-name": "airflow.providers.dummy.hooks.dummy.DummyHook",
"connection-type": "dummy",
},
{
"hook-class-name": "airflow.providers.dummy.hooks.dummy.DummyHook2",
"connection-type": "dummy",
},
],
},
)
providers_manager._discover_hooks()
_ = providers_manager._hooks_lazy_dict["dummy"]
assert len(self._caplog.records) == 1
msg = self._caplog.messages[0]
assert msg.startswith("The connection type 'dummy' is already registered")
assert (
"different class names: 'airflow.providers.dummy.hooks.dummy.DummyHook'"
" and 'airflow.providers.dummy.hooks.dummy.DummyHook2'."
) in msg
def test_providers_manager_register_plugins(self):
providers_manager = ProvidersManager()
providers_manager._provider_dict = LazyDictWithCache()
providers_manager._provider_dict["apache-airflow-providers-apache-hive"] = ProviderInfo(
version="0.0.1",
data={
"plugins": [
{
"name": "plugin1",
"plugin-class": "airflow.providers.apache.hive.plugins.hive.HivePlugin",
}
]
},
)
providers_manager._discover_plugins()
assert len(providers_manager._plugins_set) == 1
assert providers_manager._plugins_set.pop() == PluginInfo(
name="plugin1",
plugin_class="airflow.providers.apache.hive.plugins.hive.HivePlugin",
provider_name="apache-airflow-providers-apache-hive",
)
def test_providers_manager_register_dialects(self):
providers_manager = ProvidersManager()
providers_manager._provider_dict = LazyDictWithCache()
providers_manager._provider_dict["airflow.providers.common.sql"] = ProviderInfo(
version="1.19.0",
data={
"dialects": [
{
"dialect-type": "default",
"dialect-class-name": "airflow.providers.common.sql.dialects.dialect.Dialect",
}
]
},
)
providers_manager._discover_hooks()
assert len(providers_manager._dialect_provider_dict) == 1
assert providers_manager._dialect_provider_dict.popitem() == (
"default",
DialectInfo(
name="default",
dialect_class_name="airflow.providers.common.sql.dialects.dialect.Dialect",
provider_name="airflow.providers.common.sql",
),
)
def test_hooks(self):
with warnings.catch_warnings(record=True) as warning_records:
with self._caplog.at_level(logging.WARNING):
provider_manager = ProvidersManager()
connections_list = list(provider_manager.hooks.keys())
assert len(connections_list) > 60
if len(self._caplog.records) != 0:
for record in self._caplog.records:
print(record.message, file=sys.stderr)
print(record.exc_info, file=sys.stderr)
raise AssertionError("There are warnings generated during hook imports. Please fix them")
assert [w.message for w in warning_records if "hook-class-names" in str(w.message)] == []
@skip_if_not_on_main
@pytest.mark.execution_timeout(150)
def test_hook_values(self):
provider_dependencies = json.loads(
(AIRFLOW_ROOT_PATH / "generated" / "provider_dependencies.json").read_text()
)
python_version = f"{sys.version_info.major}.{sys.version_info.minor}"
excluded_providers: list[str] = []
for provider_name, provider_info in provider_dependencies.items():
if python_version in provider_info.get("excluded-python-versions", []):
excluded_providers.append(f"apache-airflow-providers-{provider_name.replace('.', '-')}")
with warnings.catch_warnings(record=True) as warning_records:
with self._caplog.at_level(logging.WARNING):
provider_manager = ProvidersManager()
connections_list = list(provider_manager.hooks.values())
assert len(connections_list) > 60
if len(self._caplog.records) != 0:
real_warning_count = 0
for record in self._caplog.entries:
# When there is error importing provider that is excluded the provider name is in the message
if any(excluded_provider in record["event"] for excluded_provider in excluded_providers):
continue
print(record["event"], file=sys.stderr)
print(record.get("exc_info"), file=sys.stderr)
real_warning_count += 1
if real_warning_count:
if PY313:
only_ydb_and_yandexcloud_warnings = True
for record in warning_records:
if "ydb" in str(record.message) or "yandexcloud" in str(record.message):
continue
only_ydb_and_yandexcloud_warnings = False
if only_ydb_and_yandexcloud_warnings:
print(
"Only warnings from ydb and yandexcloud providers are generated, "
"which is expected in Python 3.13+",
file=sys.stderr,
)
return
raise AssertionError("There are warnings generated during hook imports. Please fix them")
assert [w.message for w in warning_records if "hook-class-names" in str(w.message)] == []
def test_connection_form_widgets(self):
provider_manager = ProvidersManager()
connections_form_widgets = list(provider_manager.connection_form_widgets.keys())
# Connection form widgets use flask_appbuilder widgets, so they're only available when it's installed
try:
import flask_appbuilder # noqa: F401
assert len(connections_form_widgets) > 29
except ImportError:
assert len(connections_form_widgets) == 0
def test_field_behaviours(self):
provider_manager = ProvidersManager()
connections_with_field_behaviours = list(provider_manager.field_behaviours.keys())
# Field behaviours are often related to connection forms, only available when flask_appbuilder is installed
try:
import flask_appbuilder # noqa: F401
assert len(connections_with_field_behaviours) > 16
except ImportError:
assert len(connections_with_field_behaviours) == 0
def test_extra_links(self):
provider_manager = ProvidersManager()
extra_link_class_names = list(provider_manager.extra_links_class_names)
assert len(extra_link_class_names) > 6
def test_logging(self):
provider_manager = ProvidersManager()
logging_class_names = list(provider_manager.logging_class_names)
assert len(logging_class_names) > 5
def test_secrets_backends(self):
provider_manager = ProvidersManager()
secrets_backends_class_names = list(provider_manager.secrets_backend_class_names)
assert len(secrets_backends_class_names) > 4
def test_trigger(self):
provider_manager = ProvidersManager()
trigger_class_names = list(provider_manager.trigger)
assert len(trigger_class_names) > 10
def test_notification(self):
provider_manager = ProvidersManager()
notification_class_names = list(provider_manager.notification)
assert len(notification_class_names) > 5
def test_auth_managers(self):
provider_manager = ProvidersManager()
auth_manager_class_names = list(provider_manager.auth_managers)
assert len(auth_manager_class_names) > 0
def test_dialects(self):
provider_manager = ProvidersManager()
dialect_class_names = list(provider_manager.dialects)
assert len(dialect_class_names) == 3
assert dialect_class_names == ["default", "mssql", "postgresql"]
@patch("airflow.providers_manager.import_string")
def test_optional_feature_no_warning(self, mock_importlib_import_string):
with self._caplog.at_level(logging.WARNING):
mock_importlib_import_string.side_effect = AirflowOptionalProviderFeatureException()
providers_manager = ProvidersManager()
providers_manager._hook_provider_dict["test_connection"] = HookClassProvider(
package_name="test_package", hook_class_name="HookClass"
)
providers_manager._import_hook(
hook_class_name=None, provider_info=None, package_name=None, connection_type="test_connection"
)
assert self._caplog.messages == []
@patch("airflow.providers_manager.import_string")
def test_optional_feature_debug(self, mock_importlib_import_string):
with self._caplog.at_level(logging.INFO):
mock_importlib_import_string.side_effect = AirflowOptionalProviderFeatureException()
providers_manager = ProvidersManager()
providers_manager._hook_provider_dict["test_connection"] = HookClassProvider(
package_name="test_package", hook_class_name="HookClass"
)
providers_manager._import_hook(
hook_class_name=None, provider_info=None, package_name=None, connection_type="test_connection"
)
assert self._caplog.messages == [
"Optional provider feature disabled when importing 'HookClass' from 'test_package' package"
]
@pytest.mark.parametrize(
("value", "expected_outputs"),
[
("a", "a"),
(1, 1),
(None, None),
(lambda: 0, 0),
(lambda: None, None),
(lambda: "z", "z"),
],
)
def test_lazy_cache_dict_resolving(value, expected_outputs):
lazy_cache_dict = LazyDictWithCache()
lazy_cache_dict["key"] = value
assert lazy_cache_dict["key"] == expected_outputs
# Retrieve it again to see if it is correctly returned again
assert lazy_cache_dict["key"] == expected_outputs
def test_lazy_cache_dict_raises_error():
def raise_method():
raise RuntimeError("test")
lazy_cache_dict = LazyDictWithCache()
lazy_cache_dict["key"] = raise_method
with pytest.raises(RuntimeError, match="test"):
_ = lazy_cache_dict["key"]
def test_lazy_cache_dict_del_item():
lazy_cache_dict = LazyDictWithCache()
def answer():
return 42
lazy_cache_dict["spam"] = answer
assert "spam" in lazy_cache_dict._raw_dict
assert "spam" not in lazy_cache_dict._resolved # Not resoled yet
assert lazy_cache_dict["spam"] == 42
assert "spam" in lazy_cache_dict._resolved
del lazy_cache_dict["spam"]
assert "spam" not in lazy_cache_dict._raw_dict
assert "spam" not in lazy_cache_dict._resolved
lazy_cache_dict["foo"] = answer
assert lazy_cache_dict["foo"] == 42
assert "foo" in lazy_cache_dict._resolved
# Emulate some mess in data, e.g. value from `_raw_dict` deleted but not from `_resolved`
del lazy_cache_dict._raw_dict["foo"]
assert "foo" in lazy_cache_dict._resolved
with pytest.raises(KeyError):
# Error expected here, but we still expect to remove also record into `resolved`
del lazy_cache_dict["foo"]
assert "foo" not in lazy_cache_dict._resolved
lazy_cache_dict["baz"] = answer
# Key in `_resolved` not created yet
assert "baz" in lazy_cache_dict._raw_dict
assert "baz" not in lazy_cache_dict._resolved
del lazy_cache_dict._raw_dict["baz"]
assert "baz" not in lazy_cache_dict._raw_dict
assert "baz" not in lazy_cache_dict._resolved
def test_lazy_cache_dict_clear():
def answer():
return 42
lazy_cache_dict = LazyDictWithCache()
assert len(lazy_cache_dict) == 0
lazy_cache_dict["spam"] = answer
lazy_cache_dict["foo"] = answer
lazy_cache_dict["baz"] = answer
assert len(lazy_cache_dict) == 3
assert len(lazy_cache_dict._raw_dict) == 3
assert not lazy_cache_dict._resolved
assert lazy_cache_dict["spam"] == 42
assert len(lazy_cache_dict._resolved) == 1
# Emulate some mess in data, contain some data into the `_resolved`
lazy_cache_dict._resolved.add("biz")
assert len(lazy_cache_dict) == 3
assert len(lazy_cache_dict._resolved) == 2
# And finally cleanup everything
lazy_cache_dict.clear()
assert len(lazy_cache_dict) == 0
assert not lazy_cache_dict._raw_dict
assert not lazy_cache_dict._resolved
| TestProviderManager |
python | django__django | django/db/backends/base/base.py | {
"start": 909,
"end": 28589
} | class ____:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = "unknown"
display_name = "unknown"
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Stack of active 'atomic' blocks.
self.atomic_blocks = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
self.rollback_exc = None
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
self.health_check_enabled = False
self.health_check_done = False
# Thread-safety related attributes.
self._thread_sharing_lock = threading.Lock()
self._thread_sharing_count = 0
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func, robust) tuple, where sids is a set of
# the active savepoint IDs when this function was registered and robust
# specifies whether it's allowed for the function to fail.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"vendor={self.vendor!r} alias={self.alias!r}>"
)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Return a tzinfo of the database connection time zone.
This is only used when time zone support is enabled. When a datetime is
read from the database, it is always returned in this time zone.
When the database backend supports time zones, it doesn't matter which
time zone Django uses, as long as aware datetimes are used everywhere.
Other users connecting to the database can choose their own time zone.
When the database backend doesn't support time zones, the time zone
Django uses may be constrained by the requirements of other users of
the database.
"""
if not settings.USE_TZ:
return None
elif self.settings_dict["TIME_ZONE"] is None:
return datetime.UTC
else:
return zoneinfo.ZoneInfo(self.settings_dict["TIME_ZONE"])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict["TIME_ZONE"] is None:
return "UTC"
else:
return self.settings_dict["TIME_ZONE"]
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen),
stacklevel=2,
)
return list(self.queries_log)
def get_database_version(self):
"""Return a tuple of the database's version."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_database_version() "
"method."
)
def check_database_version_supported(self):
"""
Raise an error if the database version isn't supported by this
version of Django.
"""
if (
self.features.minimum_database_version is not None
and self.get_database_version() < self.features.minimum_database_version
):
db_version = ".".join(map(str, self.get_database_version()))
min_db_version = ".".join(map(str, self.features.minimum_database_version))
raise NotSupportedError(
f"{self.display_name} {min_db_version} or later is required "
f"(found {db_version})."
)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_connection_params() "
"method"
)
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_new_connection() "
"method"
)
def init_connection_state(self):
"""Initialize the database connection settings."""
if self.alias not in RAN_DB_VERSION_CHECK:
self.check_database_version_supported()
RAN_DB_VERSION_CHECK.add(self.alias)
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a create_cursor() method"
)
# ##### Backend-specific methods for creating connections #####
@async_unsafe
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.atomic_blocks = []
self.needs_rollback = False
# Reset parameters defining when to close/health-check the connection.
self.health_check_enabled = self.settings_dict["CONN_HEALTH_CHECKS"]
max_age = self.settings_dict["CONN_MAX_AGE"]
self.close_at = None if max_age is None else time.monotonic() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# New connections are healthy.
self.health_check_done = True
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict["AUTOCOMMIT"])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict["TIME_ZONE"] is not None and not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is False."
% self.alias
)
@async_unsafe
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
if self.in_atomic_block and self.closed_in_transaction:
raise ProgrammingError(
"Cannot open a new connection in an atomic block."
)
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.close_if_health_check_failed()
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with debug_transaction(self, "COMMIT"), self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with debug_transaction(self, "ROLLBACK"), self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
@async_unsafe
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
@async_unsafe
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
@async_unsafe
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
@async_unsafe
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
@async_unsafe
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace("-", "")
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
@async_unsafe
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func, robust)
for (sids, func, robust) in self.run_on_commit
if sid not in sids
]
@async_unsafe
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
@async_unsafe
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"
)
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(
self, autocommit, force_begin_transaction_with_broken_autocommit=False
):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explicit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.close_if_health_check_failed()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit
and not autocommit
and hasattr(self, "_start_transaction_under_autocommit")
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
elif autocommit:
self._set_autocommit(autocommit)
else:
with debug_transaction(self, "BEGIN"):
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active."
)
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
) from self.rollback_exc
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method"
)
def close_if_health_check_failed(self):
"""Close existing connection if it fails a health check."""
if (
self.connection is None
or not self.health_check_enabled
or self.health_check_done
):
return
if not self.is_usable():
self.close()
self.health_check_done = True
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
self.health_check_done = False
# If the application didn't restore the original autocommit
# setting, don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict["AUTOCOMMIT"]:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
self.health_check_done = True
else:
self.close()
return
if self.close_at is not None and time.monotonic() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
@property
def allow_thread_sharing(self):
with self._thread_sharing_lock:
return self._thread_sharing_count > 0
def inc_thread_sharing(self):
with self._thread_sharing_lock:
self._thread_sharing_count += 1
def dec_thread_sharing(self):
with self._thread_sharing_lock:
if self._thread_sharing_count <= 0:
raise RuntimeError(
"Cannot decrement the thread sharing count below zero."
)
self._thread_sharing_count -= 1
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `inc_thread_sharing()`
method). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s." % (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@contextmanager
def _nodb_cursor(self):
"""
Return a cursor from an alternative connection to be used when there is
no need to access the main database, specifically for test db
creation/deletion. This also prevents the production database from
being exposed to potential child threads while (or after) the test
database is destroyed. Refs #10868, #17786, #16969.
"""
conn = self.__class__({**self.settings_dict, "NAME": None}, alias=NO_DB_ALIAS)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
"The SchemaEditorClass attribute of this database wrapper is still None"
)
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func, robust=False):
if not callable(func):
raise TypeError("on_commit()'s callback must be a callable.")
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func, robust))
elif not self.get_autocommit():
raise TransactionManagementError(
"on_commit() cannot be used in manual transaction management"
)
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() (%s).",
e,
exc_info=True,
)
else:
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
_, func, robust = current_run_on_commit.pop(0)
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() during "
f"transaction (%s).",
e,
exc_info=True,
)
else:
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
return type(self)(settings_dict, alias)
| BaseDatabaseWrapper |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/black/cases/composition_no_trailing_comma.py | {
"start": 0,
"end": 5587
} | class ____:
def test(self) -> None:
with patch("black.out", print):
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file failed to reformat."
)
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 1 file left unchanged, 1 file failed to reformat.",
)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 1 file left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
for i in (a,):
if (
# Rule 1
i % 2 == 0
# Rule 2
and i % 3 == 0
):
while (
# Just a comment
call()
# Another
):
print(i)
xxxxxxxxxxxxxxxx = Yyyy2YyyyyYyyyyy(
push_manager=context.request.resource_manager,
max_items_to_push=num_items,
batch_size=Yyyy2YyyyYyyyyYyyy.FULL_SIZE
).push(
# Only send the first n items.
items=items[:num_items]
)
return (
'Utterly failed doctest test for %s\n File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def omitting_trailers(self) -> None:
get_collection(
hey_this_is_a_very_long_call, it_has_funny_attributes, really=True
)[OneLevelIndex]
get_collection(
hey_this_is_a_very_long_call, it_has_funny_attributes, really=True
)[OneLevelIndex][TwoLevelIndex][ThreeLevelIndex][FourLevelIndex]
d[0][1][2][3][4][5][6][7][8][9][10][11][12][13][14][15][16][17][18][19][20][21][
22
]
assignment = (
some.rather.elaborate.rule() and another.rule.ending_with.index[123]
)
def easy_asserts(self) -> None:
assert {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
} == expected, "Not what we expected"
assert expected == {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
}, "Not what we expected"
assert expected == {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
}
def tricky_asserts(self) -> None:
assert {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
} == expected(
value, is_going_to_be="too long to fit in a single line", srsly=True
), "Not what we expected"
assert {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
} == expected, (
"Not what we expected and the message is too long to fit in one line"
)
assert expected(
value, is_going_to_be="too long to fit in a single line", srsly=True
) == {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
}, "Not what we expected"
assert expected == {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
}, (
"Not what we expected and the message is too long to fit in one line"
" because it's too long"
)
dis_c_instance_method = """\
%3d 0 LOAD_FAST 1 (x)
2 LOAD_CONST 1 (1)
4 COMPARE_OP 2 (==)
6 LOAD_FAST 0 (self)
8 STORE_ATTR 0 (x)
10 LOAD_CONST 0 (None)
12 RETURN_VALUE
""" % (
_C.__init__.__code__.co_firstlineno + 1,
)
assert (
expectedexpectedexpectedexpectedexpectedexpectedexpectedexpectedexpect
== {
key1: value1,
key2: value2,
key3: value3,
key4: value4,
key5: value5,
key6: value6,
key7: value7,
key8: value8,
key9: value9
}
)
| C |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 112749,
"end": 135496
} | class ____(BaseAlertRuleTriggerActionTest):
@cached_property
def action(self):
return create_alert_rule_trigger_action(
self.trigger,
AlertRuleTriggerAction.Type.EMAIL,
AlertRuleTriggerAction.TargetType.USER,
target_identifier=str(self.user.id),
)
def test(self) -> None:
type = AlertRuleTriggerAction.Type.EMAIL
target_type = AlertRuleTriggerAction.TargetType.TEAM
target_identifier = str(self.team.id)
update_alert_rule_trigger_action(
self.action, type=type, target_type=target_type, target_identifier=target_identifier
)
assert self.action.type == type.value
assert self.action.target_type == target_type.value
assert self.action.target_identifier == target_identifier
@responses.activate
def test_slack(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
external_id="1",
provider="slack",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"installation_type": "born_as_bot",
},
)
type = AlertRuleTriggerAction.Type.SLACK
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "#some_channel"
channel_id = "s_c"
with self.patch_msg_schedule_response(channel_id):
with self.patch_msg_delete_scheduled_response(channel_id):
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == channel_id
assert action.target_display == channel_name
assert action.integration_id == integration.id
def test_slack_not_existing(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
external_id="1",
provider="slack",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
type = AlertRuleTriggerAction.Type.SLACK
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "#some_channel_that_doesnt_exist"
with self.patch_msg_schedule_response("channel_not_found"):
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
)
@patch("slack_sdk.web.client.WebClient._perform_urllib_http_request")
def test_slack_rate_limiting(self, mock_api_call: MagicMock) -> None:
"""Should handle 429 from Slack on existing Metric Alert update"""
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
external_id="1",
provider="slack",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"installation_type": "born_as_bot",
},
)
type = AlertRuleTriggerAction.Type.SLACK
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "#some_channel"
mock_api_call.return_value = {
"body": orjson.dumps({"ok": False, "error": "ratelimited"}).decode(),
"headers": {},
"status": 429,
}
with self.patch_msg_schedule_response("channel_not_found"):
with pytest.raises(ApiRateLimitedError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
)
@patch("sentry.integrations.msteams.utils.get_channel_id", return_value="some_id")
def test_msteams(self, mock_get_channel_id: MagicMock) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, self.user, external_id="1", provider="msteams"
)
type = AlertRuleTriggerAction.Type.MSTEAMS
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "some_channel"
channel_id = "some_id"
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == channel_id
assert action.target_display == channel_name
assert action.integration_id == integration.id
mock_get_channel_id.assert_called_once_with(
self.organization, integration.id, "some_channel"
)
@patch("sentry.integrations.msteams.utils.get_channel_id", return_value=None)
def test_msteams_not_existing(self, mock_get_channel_id: MagicMock) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, self.user, external_id="1", provider="msteams"
)
type = AlertRuleTriggerAction.Type.MSTEAMS
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "some_channel"
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
)
def test_pagerduty(self) -> None:
services = [
{
"type": "service",
"integration_key": "PND4F9",
"service_id": "123",
"service_name": "hellboi",
}
]
integration, org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="pagerduty",
name="Example PagerDuty",
external_id="example-pagerduty",
metadata={"services": services},
)
with assume_test_silo_mode(SiloMode.CONTROL):
service = add_service(
org_integration,
service_name=services[0]["service_name"],
integration_key=services[0]["integration_key"],
)
type = AlertRuleTriggerAction.Type.PAGERDUTY
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
target_identifier = service["id"]
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=str(target_identifier),
integration_id=integration.id,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == target_identifier
assert action.target_display == "hellboi"
assert action.integration_id == integration.id
def test_pagerduty_not_existing(self) -> None:
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="pagerduty",
name="Example PagerDuty",
external_id="example-pagerduty",
)
type = AlertRuleTriggerAction.Type.PAGERDUTY
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
target_identifier = "1"
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=target_identifier,
integration_id=integration.id,
)
@responses.activate
def test_opsgenie(self) -> None:
metadata = {
"api_key": "1234-ABCD",
"DISCORD_BASE_URL": "https://api.opsgenie.com/",
"domain_name": "test-app.app.opsgenie.com",
}
team = {"id": "123-id", "team": "cool-team", "integration_key": "1234-5678"}
integration, org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="opsgenie",
name="test-app",
external_id="test-app",
metadata=metadata,
)
with assume_test_silo_mode_of(OrganizationIntegration):
org_integration.config = {"team_table": [team]}
org_integration.save()
resp_data = {
"result": "Integration [sentry] is valid",
"took": 1,
"requestId": "hello-world",
}
responses.add(
responses.POST,
url="https://api.opsgenie.com/v2/integrations/authenticate",
json=resp_data,
)
type = AlertRuleTriggerAction.Type.OPSGENIE
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=team["id"],
integration_id=integration.id,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == team["id"]
assert action.target_display == "cool-team"
assert action.integration_id == integration.id
def test_opsgenie_not_existing(self) -> None:
metadata = {
"api_key": "1234-ABCD",
"DISCORD_BASE_URL": "https://api.opsgenie.com/",
"domain_name": "test-app.app.opsgenie.com",
}
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="opsgenie",
name="test-app",
external_id="test-app",
metadata=metadata,
)
type = AlertRuleTriggerAction.Type.OPSGENIE
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
target_identifier = "fake-team-id-123"
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=target_identifier,
integration_id=integration.id,
)
@responses.activate
def test_discord(self) -> None:
channel_id = "channel-id"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.GUILD_TEXT.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET,
url=f"{DISCORD_BASE_URL}/channels/{channel_id}",
json={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.GUILD_TEXT.value,
},
)
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == channel_id
assert action.target_display == channel_id
assert action.integration_id == integration.id
@responses.activate
def test_discord_invalid_channel_id(self) -> None:
channel_id = "****bad****"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.GUILD_TEXT.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET, url=f"{DISCORD_BASE_URL}/channels/{channel_id}", status=404
)
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
@responses.activate
def test_discord_bad_response(self) -> None:
channel_id = "channel-id"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.GUILD_TEXT.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET,
url=f"{DISCORD_BASE_URL}/channels/{channel_id}",
body="Error",
status=500,
)
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
@responses.activate
def test_discord_no_integration(self) -> None:
channel_id = "channel-id"
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=None,
)
@responses.activate
@mock.patch("sentry.integrations.discord.utils.channel.validate_channel_id")
def test_discord_timeout(self, mock_validate_channel_id: MagicMock) -> None:
mock_validate_channel_id.side_effect = ApiTimeoutError("Discord channel lookup timed out")
channel_id = "channel-id"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.GUILD_TEXT.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET,
url=f"{DISCORD_BASE_URL}/channels/{channel_id}",
json={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
},
)
with pytest.raises(ChannelLookupTimeoutError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
@responses.activate
def test_discord_channel_not_in_guild(self) -> None:
channel_id = "channel-id"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.DM.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET,
url=f"{DISCORD_BASE_URL}/channels/{channel_id}",
json={
"guild_id": "other-guild",
"name": f"{guild_name}",
"type": ChannelType.DM.value,
},
)
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
@responses.activate
def test_discord_unsupported_type(self) -> None:
channel_id = "channel-id"
guild_id = "example-discord-server"
guild_name = "Server Name"
integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="discord",
name="Example Discord",
external_id=f"{guild_id}",
metadata={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.DM.value,
},
)
type = AlertRuleTriggerAction.Type.DISCORD
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
responses.add(
method=responses.GET,
url=f"{DISCORD_BASE_URL}/channels/{channel_id}",
json={
"guild_id": f"{guild_id}",
"name": f"{guild_name}",
"type": ChannelType.DM.value,
},
)
with pytest.raises(InvalidTriggerActionError):
update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_id,
integration_id=integration.id,
)
@responses.activate
def test_supported_priority(self) -> None:
metadata = {
"api_key": "1234-ABCD",
"DISCORD_BASE_URL": "https://api.opsgenie.com/",
"domain_name": "test-app.app.opsgenie.com",
}
team = {"id": "123-id", "team": "cool-team", "integration_key": "1234-5678"}
integration, org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="opsgenie",
name="test-app",
external_id="test-app",
metadata=metadata,
)
with assume_test_silo_mode_of(OrganizationIntegration):
org_integration.config = {"team_table": [team]}
org_integration.save()
resp_data = {
"result": "Integration [sentry] is valid",
"took": 1,
"requestId": "hello-world",
}
responses.add(
responses.POST,
url="https://api.opsgenie.com/v2/integrations/authenticate",
json=resp_data,
)
type = AlertRuleTriggerAction.Type.OPSGENIE
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
priority = "P1"
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=team["id"],
integration_id=integration.id,
priority=priority,
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == team["id"]
assert action.target_display == "cool-team"
assert action.integration_id == integration.id
app_config = action.get_single_sentry_app_config()
assert app_config is not None
assert app_config["priority"] == priority # priority stored in config
@patch("sentry.integrations.msteams.utils.get_channel_id", return_value="some_id")
def test_unsupported_priority(self, mock_get_channel_id: MagicMock) -> None:
integration, _ = self.create_provider_integration_for(
self.organization, self.user, external_id="1", provider="msteams"
)
type = AlertRuleTriggerAction.Type.MSTEAMS
target_type = AlertRuleTriggerAction.TargetType.SPECIFIC
channel_name = "some_channel"
channel_id = "some_id"
action = update_alert_rule_trigger_action(
self.action,
type,
target_type,
target_identifier=channel_name,
integration_id=integration.id,
priority="critical",
)
assert action.alert_rule_trigger == self.trigger
assert action.type == type.value
assert action.target_type == target_type.value
assert action.target_identifier == channel_id
assert action.target_display == channel_name
assert action.integration_id == integration.id
assert action.sentry_app_config is None # priority is not stored inside
| UpdateAlertRuleTriggerAction |
python | altair-viz__altair | altair/expr/__init__.py | {
"start": 606,
"end": 2097
} | class ____(type):
"""
Metaclass for :class:`expr`.
Currently providing read-only class properties, representing JavaScript constants.
"""
@property
def NaN(cls) -> Expression:
"""Not a number (same as JavaScript literal NaN)."""
return ConstExpression("NaN")
@property
def LN10(cls) -> Expression:
"""The natural log of 10 (alias to Math.LN10)."""
return ConstExpression("LN10")
@property
def E(cls) -> Expression:
"""The transcendental number e (alias to Math.E)."""
return ConstExpression("E")
@property
def LOG10E(cls) -> Expression:
"""The base 10 logarithm e (alias to Math.LOG10E)."""
return ConstExpression("LOG10E")
@property
def LOG2E(cls) -> Expression:
"""The base 2 logarithm of e (alias to Math.LOG2E)."""
return ConstExpression("LOG2E")
@property
def SQRT1_2(cls) -> Expression:
"""The square root of 0.5 (alias to Math.SQRT1_2)."""
return ConstExpression("SQRT1_2")
@property
def LN2(cls) -> Expression:
"""The natural log of 2 (alias to Math.LN2)."""
return ConstExpression("LN2")
@property
def SQRT2(cls) -> Expression:
"""The square root of 2 (alias to Math.SQRT1_2)."""
return ConstExpression("SQRT2")
@property
def PI(cls) -> Expression:
"""The transcendental number pi (alias to Math.PI)."""
return ConstExpression("PI")
| _ExprMeta |
python | pytest-dev__pytest | src/_pytest/assertion/__init__.py | {
"start": 2993,
"end": 3077
} | class ____(Protocol):
def mark_rewrite(self, *names: str) -> None: ...
| RewriteHook |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/sns.py | {
"start": 1207,
"end": 4623
} | class ____(AwsBaseOperator[SnsHook]):
"""
Publish a message to Amazon SNS.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SnsPublishOperator`
:param target_arn: either a TopicArn or an EndpointArn
:param message: the default message you want to send (templated)
:param subject: the message subject you want to send (templated)
:param message_attributes: the message attributes you want to send as a flat dict (data type will be
determined automatically)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
:param message_deduplication_id: Every message must have a unique message_deduplication_id.
This parameter applies only to FIFO (first-in-first-out) topics.
:param message_group_id: Tag that specifies that a message belongs to a specific message group.
This parameter applies only to FIFO (first-in-first-out) topics.
"""
aws_hook_class = SnsHook
template_fields: Sequence[str] = aws_template_fields(
"target_arn",
"message",
"subject",
"message_attributes",
"message_deduplication_id",
"message_group_id",
)
template_fields_renderers = {"message_attributes": "json"}
def __init__(
self,
*,
target_arn: str,
message: str,
subject: str | None = None,
message_attributes: dict | None = None,
message_deduplication_id: str | None = None,
message_group_id: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.target_arn = target_arn
self.message = message
self.subject = subject
self.message_attributes = message_attributes
self.message_deduplication_id = message_deduplication_id
self.message_group_id = message_group_id
def execute(self, context: Context):
self.log.info(
"Sending SNS notification to %s using %s:\nsubject=%s\nattributes=%s\nmessage=%s\nmessage_deduplication_id=%s\nmessage_group_id=%s",
self.target_arn,
self.aws_conn_id,
self.subject,
self.message_attributes,
self.message,
self.message_deduplication_id,
self.message_group_id,
)
return self.hook.publish_to_target(
target_arn=self.target_arn,
message=self.message,
subject=self.subject,
message_attributes=self.message_attributes,
message_deduplication_id=self.message_deduplication_id,
message_group_id=self.message_group_id,
)
| SnsPublishOperator |
python | ansible__ansible | test/integration/targets/gathering_facts/cache_plugins/none.py | {
"start": 457,
"end": 1061
} | class ____(BaseCacheModule):
def __init__(self, *args, **kwargs):
self.empty = {}
def get(self, key):
return self.empty.get(key)
def set(self, key, value):
return value
def keys(self):
return self.empty.keys()
def contains(self, key):
return key in self.empty
def delete(self, key):
del self.emtpy[key]
def flush(self):
self.empty = {}
def copy(self):
return self.empty.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self.empty = data
| CacheModule |
python | tox-dev__tox | src/tox/config/loader/replacer.py | {
"start": 1228,
"end": 1318
} | class ____(Exception):
"""Couldn't find end terminator in MatchExpression."""
| MatchError |
python | wandb__wandb | wandb/sdk/lib/redirect.py | {
"start": 4216,
"end": 14701
} | class ____:
"""An FSM emulating a terminal.
Characters are stored in a 2D matrix (buffer) indexed by the cursor.
"""
_MAX_LINES = 100
def __init__(self):
self.buffer = defaultdict(lambda: defaultdict(lambda: _defchar))
self.cursor = Cursor()
self._num_lines = None # Cache
# For diffing:
self._prev_num_lines = None
self._prev_last_line = None
def cursor_up(self, n=1):
n = min(n, self.cursor.y)
self.cursor.y -= n
def cursor_down(self, n=1):
self.cursor.y += n
def cursor_left(self, n=1):
n = min(n, self.cursor.x)
self.cursor.x -= n
def cursor_right(self, n=1):
self.cursor.x += n
def carriage_return(self):
self.cursor.x = 0
def cursor_position(self, line, column):
self.cursor.x = min(column, 1) - 1
self.cursor.y = min(line, 1) - 1
def cursor_column(self, column):
self.cursor.x = min(column, 1) - 1
def cursor_line(self, line):
self.cursor.y = min(line, 1) - 1
def linefeed(self):
self.cursor_down()
self.carriage_return()
def _get_line_len(self, n):
if n not in self.buffer:
return 0
line = self.buffer[n]
if not line:
return 0
n = max(line.keys())
for i in range(n, -1, -1):
if line[i] != _defchar:
return i + 1
return 0
@property
def num_lines(self):
if self._num_lines is not None:
return self._num_lines
ret = 0
if self.buffer:
n = max(self.buffer.keys())
for i in range(n, -1, -1):
if self._get_line_len(i):
ret = i + 1
break
self._num_lines = ret
return ret
def display(self):
return [
[self.buffer[i][j].data for j in range(self._get_line_len(i))]
for i in range(self.num_lines)
]
def erase_screen(self, mode=0):
if mode == 0:
for i in range(self.cursor.y + 1, self.num_lines):
if i in self.buffer:
del self.buffer[i]
self.erase_line(mode)
if mode == 1:
for i in range(self.cursor.y):
if i in self.buffer:
del self.buffer[i]
self.erase_line(mode)
elif mode == 2 or mode == 3:
self.buffer.clear()
def erase_line(self, mode=0):
curr_line = self.buffer[self.cursor.y]
if mode == 0:
for i in range(self.cursor.x, self._get_line_len(self.cursor.y)):
if i in curr_line:
del curr_line[i]
elif mode == 1:
for i in range(self.cursor.x + 1):
if i in curr_line:
del curr_line[i]
else:
curr_line.clear()
def insert_lines(self, n=1):
for i in range(self.num_lines - 1, self.cursor.y, -1):
self.buffer[i + n] = self.buffer[i]
for i in range(self.cursor.y + 1, self.cursor.y + 1 + n):
if i in self.buffer:
del self.buffer[i]
def _write_plain_text(self, plain_text):
self.buffer[self.cursor.y].update(
[
(self.cursor.x + i, self.cursor.char.copy(data=c))
for i, c in enumerate(plain_text)
]
)
self.cursor.x += len(plain_text)
def _write_text(self, text):
prev_end = 0
for match in SEP_RE.finditer(text):
start, end = match.span()
self._write_plain_text(text[prev_end:start])
prev_end = end
c = match.group()
if c == "\n":
self.linefeed()
elif c == "\r":
self.carriage_return()
elif c == "\b":
self.cursor_left()
else:
continue
self._write_plain_text(text[prev_end:])
def _remove_osc(self, text):
return re.sub(ANSI_OSC_RE, "", text)
def write(self, data):
self._num_lines = None # invalidate cache
data = self._remove_osc(data)
prev_end = 0
for match in ANSI_CSI_RE.finditer(data):
start, end = match.span()
text = data[prev_end:start]
csi = data[start:end]
prev_end = end
self._write_text(text)
self._handle_csi(csi, *match.groups())
self._write_text(data[prev_end:])
def _handle_csi(self, csi, params, command):
try:
if command == "m":
p = params.split(";")[0]
if not p:
p = "0"
if p in ANSI_FG:
self.cursor.char.fg = p
elif p in ANSI_BG:
self.cursor.char.bg = p
elif p == ANSI_RESET:
self.cursor.char.reset()
elif p in ANSI_STYLES:
style = ANSI_STYLES[p]
off = style.startswith("/")
if off:
style = style[1:]
self.cursor.char[style] = not off
else:
abcd = {
"A": "cursor_up",
"B": "cursor_down",
"C": "cursor_right",
"D": "cursor_left",
}
cursor_fn = abcd.get(command)
if cursor_fn:
getattr(self, cursor_fn)(int(params) if params else 1)
elif command == "J":
p = params.split(";")[0]
p = int(p) if p else 0
self.erase_screen(p)
elif command == "K":
p = params.split(";")[0]
p = int(p) if p else 0
self.erase_line(p)
elif command == "L":
p = int(params) if params else 1
self.insert_lines(p)
elif command in "Hf":
p = params.split(";")
if len(p) == 2:
p = (int(p[0]), int(p[1]))
elif len(p) == 1:
p = (int(p[0]), 1)
else:
p = (1, 1)
self.cursor_position(*p)
except Exception:
pass
def _get_line(self, n):
line = self.buffer[n]
line_len = self._get_line_len(n)
# We have to loop through each character in the line and check if foreground,
# background and other attributes (italics, bold, underline, etc) of the ith
# character are different from those of the (i-1)th character. If different, the
# appropriate ascii character for switching the color/attribute should be
# appended to the output string before appending the actual character. This loop
# and subsequent checks can be expensive, especially because 99% of terminal
# output use default colors and formatting. Even in outputs that do contain
# colors and styles, its unlikely that they will change on a per character
# basis.
# So instead we create a character list without any ascii codes (`out`), and a
# list of all the foregrounds in the line (`fgs`) on which we call np.diff() and
# np.where() to find the indices where the foreground change, and insert the
# ascii characters in the output list (`out`) on those indices. All of this is
# the done only if there are more than 1 foreground color in the line in the
# first place (`if len(set(fgs)) > 1 else None`). Same logic is repeated for
# background colors and other attributes.
out = [line[i].data for i in range(line_len)]
# for dynamic insert using original indices
idxs = np.arange(line_len)
insert = lambda i, c: (out.insert(idxs[i], c), idxs[i:].__iadd__(1)) # noqa
fgs = [int(_defchar.fg)] + [int(line[i].fg) for i in range(line_len)]
[
insert(i, _get_char(line[int(i)].fg)) for i in np.where(np.diff(fgs))[0]
] if len(set(fgs)) > 1 else None
bgs = [int(_defchar.bg)] + [int(line[i].bg) for i in range(line_len)]
[
insert(i, _get_char(line[int(i)].bg)) for i in np.where(np.diff(bgs))[0]
] if len(set(bgs)) > 1 else None
attrs = {
k: [False] + [line[i][k] for i in range(line_len)]
for k in Char.__slots__[3:]
}
[
[
insert(i, _get_char(ANSI_STYLES_REV[k if line[int(i)][k] else "/" + k]))
for i in np.where(np.diff(v))[0]
]
for k, v in attrs.items()
if any(v)
]
return "".join(out)
def read(self):
num_lines = self.num_lines
if self._prev_num_lines is None:
ret = os.linesep.join(map(self._get_line, range(num_lines)))
if ret:
ret += os.linesep
else:
return ret
else:
curr_line = self._get_line(self._prev_num_lines - 1)
if curr_line == self._prev_last_line:
if num_lines == self._prev_num_lines:
return ""
ret = (
os.linesep.join(
map(self._get_line, range(self._prev_num_lines, num_lines))
)
+ os.linesep
)
else:
ret = (
"\r"
+ os.linesep.join(
map(self._get_line, range(self._prev_num_lines - 1, num_lines))
)
+ os.linesep
)
if num_lines > self._MAX_LINES:
shift = num_lines - self._MAX_LINES
for i in range(shift, num_lines):
self.buffer[i - shift] = self.buffer[i]
for i in range(self._MAX_LINES, max(self.buffer.keys())):
if i in self.buffer:
del self.buffer[i]
self.cursor.y -= min(self.cursor.y, shift)
self._num_lines = num_lines = self._MAX_LINES
self._prev_num_lines = num_lines
self._prev_last_line = self._get_line(num_lines - 1)
return ret
_MIN_CALLBACK_INTERVAL = 2 # seconds
| TerminalEmulator |
python | numpy__numpy | numpy/polynomial/tests/test_chebyshev.py | {
"start": 1496,
"end": 3645
} | class ____:
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0] * i + [1]
cj = [0] * j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebpow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(cheb.chebmul, [c] * j, np.array([1]))
res = cheb.chebpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
| TestArithmetic |
python | redis__redis-py | tests/test_multidb/test_healthcheck.py | {
"start": 5860,
"end": 7885
} | class ____:
@pytest.mark.parametrize(
"hc1_side_effect,hc2_side_effect,hc1_call_count,hc2_call_count,expected_result",
[
([False, False, False], [True, True, True], 3, 0, False),
([False, False, True], [False, False, False], 3, 3, False),
([False, True, True], [False, False, True], 2, 3, True),
([True, True, True], [False, True, False], 1, 2, True),
],
ids=[
"HC1 - no successful",
"HC2 - no successful",
"HC1 - successful",
"HC2 - successful",
],
)
def test_policy_returns_true_for_any_successful_probe(
self,
hc1_side_effect,
hc2_side_effect,
hc1_call_count,
hc2_call_count,
expected_result,
):
mock_hc1 = Mock(spec=HealthCheck)
mock_hc2 = Mock(spec=HealthCheck)
mock_hc1.check_health.side_effect = hc1_side_effect
mock_hc2.check_health.side_effect = hc2_side_effect
mock_db = Mock(spec=Database)
policy = HealthyAnyPolicy(3, 0.01)
assert policy.execute([mock_hc1, mock_hc2], mock_db) == expected_result
assert mock_hc1.check_health.call_count == hc1_call_count
assert mock_hc2.check_health.call_count == hc2_call_count
def test_policy_raise_unhealthy_database_exception_if_exception_occurs_on_failed_health_check(
self,
):
mock_hc1 = Mock(spec=HealthCheck)
mock_hc2 = Mock(spec=HealthCheck)
mock_hc1.check_health.side_effect = [False, False, ConnectionError]
mock_hc2.check_health.side_effect = [True, True, True]
mock_db = Mock(spec=Database)
policy = HealthyAnyPolicy(3, 0.01)
with pytest.raises(UnhealthyDatabaseException, match="Unhealthy database"):
policy.execute([mock_hc1, mock_hc2], mock_db)
assert mock_hc1.check_health.call_count == 3
assert mock_hc2.check_health.call_count == 0
@pytest.mark.onlynoncluster
| TestHealthyAnyPolicy |
python | getsentry__sentry | src/sentry/issues/merge.py | {
"start": 558,
"end": 2435
} | class ____(TypedDict):
parent: str
children: list[str]
def handle_merge(
group_list: Sequence[Group],
project_lookup: Mapping[int, Project],
acting_user: RpcUser | User | None,
) -> MergedGroup:
"""
Merge a list of groups into a single group.
Returns a dict with the primary group id and a list of the merged group ids.
"""
if any([group.issue_category != GroupCategory.ERROR for group in group_list]):
raise rest_framework.exceptions.ValidationError(detail="Only error issues can be merged.")
# Sort by:
# 1) Earliest first-seen time.
# 2) On tie: Higher times-seen (# of associated events)
# 3) On double-tie: Lower id.
group_list_sorted = sorted(
group_list,
key=lambda g: (g.first_seen, -g.times_seen, g.id),
)
primary_group, groups_to_merge = group_list_sorted[0], group_list_sorted[1:]
group_ids_to_merge = [g.id for g in groups_to_merge]
eventstream_state = eventstream.backend.start_merge(
primary_group.project_id, group_ids_to_merge, primary_group.id, primary_group.first_seen
)
Group.objects.filter(id__in=group_ids_to_merge).update(
status=GroupStatus.PENDING_MERGE, substatus=None
)
transaction_id = uuid4().hex
merge_groups.delay(
from_object_ids=group_ids_to_merge,
to_object_id=primary_group.id,
transaction_id=transaction_id,
eventstream_state=eventstream_state,
)
Activity.objects.create(
project=project_lookup[primary_group.project_id],
group=primary_group,
type=ActivityType.MERGE.value,
user_id=acting_user.id if acting_user else None,
data={"issues": [{"id": c.id} for c in groups_to_merge]},
)
return MergedGroup(
parent=str(primary_group.id),
children=[str(g.id) for g in groups_to_merge],
)
| MergedGroup |
python | django__django | tests/delete/models.py | {
"start": 5874,
"end": 5973
} | class ____(models.Model):
r_proxy = models.ForeignKey("RProxy", models.CASCADE, null=True)
| Origin |
python | plotly__plotly.py | tests/test_core/test_subplots/test_get_subplot.py | {
"start": 181,
"end": 6180
} | class ____(TestCase):
def test_get_subplot(self):
# Make Figure with subplot types
fig = subplots.make_subplots(
rows=4,
cols=2,
specs=[
[{}, {"secondary_y": True}],
[{"type": "polar"}, {"type": "ternary"}],
[{"type": "scene"}, {"type": "geo"}],
[{"type": "domain", "colspan": 2}, None],
],
)
fig.add_scatter(y=[2, 1, 3], row=1, col=1)
fig.add_scatter(y=[2, 1, 3], row=1, col=2)
fig.add_scatter(y=[1, 3, 2], row=1, col=2, secondary_y=True)
fig.add_trace(go.Scatterpolar(r=[2, 1, 3], theta=[20, 50, 125]), row=2, col=1)
fig.add_traces(
[go.Scatterternary(a=[0.2, 0.1, 0.3], b=[0.4, 0.6, 0.5])],
rows=[2],
cols=[2],
)
fig.add_scatter3d(
x=[2, 0, 1], y=[0, 1, 0], z=[0, 1, 2], mode="lines", row=3, col=1
)
fig.add_scattergeo(lat=[0, 40], lon=[10, 5], mode="lines", row=3, col=2)
fig.add_parcats(
dimensions=[
{"values": ["A", "A", "B", "A", "B"]},
{"values": ["a", "a", "a", "b", "b"]},
],
row=4,
col=1,
)
fig.update_traces(uid=None)
fig.update(layout_height=1200)
# Check
expected = Figure(
{
"data": [
{"type": "scatter", "xaxis": "x", "y": [2, 1, 3], "yaxis": "y"},
{"type": "scatter", "xaxis": "x2", "y": [2, 1, 3], "yaxis": "y2"},
{"type": "scatter", "xaxis": "x2", "y": [1, 3, 2], "yaxis": "y3"},
{
"r": [2, 1, 3],
"subplot": "polar",
"theta": [20, 50, 125],
"type": "scatterpolar",
},
{
"a": [0.2, 0.1, 0.3],
"b": [0.4, 0.6, 0.5],
"subplot": "ternary",
"type": "scatterternary",
},
{
"mode": "lines",
"scene": "scene",
"type": "scatter3d",
"x": [2, 0, 1],
"y": [0, 1, 0],
"z": [0, 1, 2],
},
{
"geo": "geo",
"lat": [0, 40],
"lon": [10, 5],
"mode": "lines",
"type": "scattergeo",
},
{
"dimensions": [
{"values": ["A", "A", "B", "A", "B"]},
{"values": ["a", "a", "a", "b", "b"]},
],
"domain": {"x": [0.0, 0.9400000000000001], "y": [0.0, 0.19375]},
"type": "parcats",
},
],
"layout": {
"geo": {
"domain": {
"x": [0.5700000000000001, 0.9400000000000001],
"y": [0.26875, 0.4625],
}
},
"height": 1200,
"polar": {"domain": {"x": [0.0, 0.37], "y": [0.5375, 0.73125]}},
"scene": {"domain": {"x": [0.0, 0.37], "y": [0.26875, 0.4625]}},
"ternary": {
"domain": {
"x": [0.5700000000000001, 0.9400000000000001],
"y": [0.5375, 0.73125],
}
},
"xaxis": {"anchor": "y", "domain": [0.0, 0.37]},
"xaxis2": {
"anchor": "y2",
"domain": [0.5700000000000001, 0.9400000000000001],
},
"yaxis": {"anchor": "x", "domain": [0.80625, 1.0]},
"yaxis2": {"anchor": "x2", "domain": [0.80625, 1.0]},
"yaxis3": {"anchor": "x2", "overlaying": "y2", "side": "right"},
},
}
)
expected.update_traces(uid=None)
# Make sure we have expected starting figure
self.assertEqual(fig, expected)
# (1, 1)
subplot = fig.get_subplot(1, 1)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis, yaxis=fig.layout.yaxis)
)
# (1, 2) Primary
subplot = fig.get_subplot(1, 2)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis2, yaxis=fig.layout.yaxis2)
)
# (1, 2) Primary
subplot = fig.get_subplot(1, 2, secondary_y=True)
self.assertEqual(
subplot, SubplotXY(xaxis=fig.layout.xaxis2, yaxis=fig.layout.yaxis3)
)
# (2, 1)
subplot = fig.get_subplot(2, 1)
self.assertEqual(subplot, fig.layout.polar)
# (2, 2)
subplot = fig.get_subplot(2, 2)
self.assertEqual(subplot, fig.layout.ternary)
# (3, 1)
subplot = fig.get_subplot(3, 1)
self.assertEqual(subplot, fig.layout.scene)
# (3, 2)
subplot = fig.get_subplot(3, 2)
self.assertEqual(subplot, fig.layout.geo)
# (4, 1)
subplot = fig.get_subplot(4, 1)
domain = fig.data[-1].domain
self.assertEqual(subplot, SubplotDomain(x=domain.x, y=domain.y))
def test_get_subplot_out_of_bounds(self):
fig = subplots.make_subplots(rows=4, cols=2)
self.assertRaises(ValueError, lambda: fig.get_subplot(0, 1))
self.assertRaises(ValueError, lambda: fig.get_subplot(5, 1))
self.assertRaises(ValueError, lambda: fig.get_subplot(1, 0))
self.assertRaises(ValueError, lambda: fig.get_subplot(1, 3))
| TestGetSubplot |
python | gevent__gevent | src/greentest/3.14/test_urllib.py | {
"start": 2130,
"end": 2481
} | class ____(object):
def fakehttp(self, fakedata, mock_close=False):
fake_http_class = fakehttp(fakedata, mock_close=mock_close)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fake_http_class
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
| FakeHTTPMixin |
python | dask__dask | dask/array/_array_expr/_ufunc.py | {
"start": 8470,
"end": 10032
} | class ____(ArrayExpr):
_parameters = ["array", "index", "meta", "name", "func"]
@functools.cached_property
def _meta(self):
meta = self.operand("meta")
a = np.empty_like(meta, shape=(1,) * meta.ndim, dtype=meta.dtype)
result = self.operand("func")(a)
return result[self.operand("index")]
@functools.cached_property
def chunks(self):
return self.array.chunks
@functools.cached_property
def _name(self):
return self.operand("name") + _tokenize_deterministic(*self.operands)
def _layer(self) -> dict:
return {
(self._name,) + key[1:]: (getitem, key, self.operand("index"))
for key in core.flatten(self.array.__dask_keys__())
}
@derived_from(np)
def frexp(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.frexp, x, dtype=object)
left = DoubleOutputs(tmp, 0, getattr(x, "_meta", x), "mantissa-", np.frexp)
right = DoubleOutputs(tmp, 1, getattr(x, "_meta", x), "exponent-", np.frexp)
return new_collection(left), new_collection(right)
@derived_from(np)
def modf(x):
# Not actually object dtype, just need to specify something
tmp = elemwise(np.modf, x, dtype=object)
left = DoubleOutputs(tmp, 0, getattr(x, "_meta", x), "modf1-", np.modf)
right = DoubleOutputs(tmp, 1, getattr(x, "_meta", x), "modf2-", np.modf)
return new_collection(left), new_collection(right)
@derived_from(np)
def divmod(x, y):
res1 = x // y
res2 = x % y
return res1, res2
| DoubleOutputs |
python | huggingface__transformers | src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py | {
"start": 12420,
"end": 13940
} | class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [Wav2Vec2ConformerGroupNormConvLayer(config, layer_id=0)] + [
Wav2Vec2ConformerNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
Wav2Vec2ConformerLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
| Wav2Vec2ConformerFeatureEncoder |
python | pandas-dev__pandas | pandas/tests/indexing/multiindex/test_multiindex.py | {
"start": 275,
"end": 10038
} | class ____:
def test_multiindex_perf_warn(self, performance_warning):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.default_rng(2).random(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(performance_warning):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(performance_warning):
df.loc[(0,)]
@pytest.mark.parametrize("offset", [-5, 5])
def test_indexing_over_hashtable_size_cutoff(self, monkeypatch, offset):
size_cutoff = 20
n = size_cutoff + offset
with monkeypatch.context():
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
Index(["R1", "R2", np.nan, "R4"], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(result, expected)
def test_exclusive_nat_column_indexing(self):
# GH 38025
# test multi indexing when one column exclusively contains NaT values
df = DataFrame(
{
"a": [pd.NaT, pd.NaT, pd.NaT, pd.NaT],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
df = df.set_index(["a", "b"])
expected = DataFrame(
{
"c": [10, 15, np.nan, 20],
},
index=[
Index([pd.NaT, pd.NaT, pd.NaT, pd.NaT], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(df, expected)
def test_nested_tuples_duplicates(self):
# GH#30892
dti = pd.to_datetime(["20190101", "20190101", "20190102"])
idx = Index(["a", "a", "c"])
mi = MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
df2 = df.copy(deep=True)
df2.loc[(dti[0], "a"), "c2"] = 1.0
tm.assert_frame_equal(df2, expected)
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
def test_multiindex_with_datatime_level_preserves_freq(self):
# https://github.com/pandas-dev/pandas/issues/35563
idx = Index(range(2), name="A")
dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B")
mi = MultiIndex.from_product([idx, dti])
df = DataFrame(np.random.default_rng(2).standard_normal((14, 2)), index=mi)
result = df.loc[0].index
tm.assert_index_equal(result, dti)
assert result.freq == dti.freq
def test_multiindex_complex(self):
# GH#42145
complex_data = [1 + 2j, 4 - 3j, 10 - 1j]
non_complex_data = [3, 4, 5]
result = DataFrame(
{
"x": complex_data,
"y": non_complex_data,
"z": non_complex_data,
}
)
result.set_index(["x", "y"], inplace=True)
expected = DataFrame(
{"z": non_complex_data},
index=MultiIndex.from_arrays(
[complex_data, non_complex_data],
names=("x", "y"),
),
)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex_with_duplicates(self):
# GH 38015
mi = MultiIndex.from_tuples([("A", "cat"), ("B", "cat"), ("B", "cat")])
df = DataFrame(index=mi)
df = df.rename(index={"A": "Apple"}, level=0)
mi2 = MultiIndex.from_tuples([("Apple", "cat"), ("B", "cat"), ("B", "cat")])
expected = DataFrame(index=mi2)
tm.assert_frame_equal(df, expected)
def test_series_align_multiindex_with_nan_overlap_only(self):
# GH 38439
mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
mi2 = MultiIndex.from_arrays([[np.nan, 82.0], [np.nan, np.nan]])
ser1 = Series([1, 2], index=mi1)
ser2 = Series([1, 2], index=mi2)
result1, result2 = ser1.align(ser2)
mi = MultiIndex.from_arrays([[81.0, 82.0, np.nan], [np.nan, np.nan, np.nan]])
expected1 = Series([1.0, np.nan, 2.0], index=mi)
expected2 = Series([np.nan, 2.0, 1.0], index=mi)
tm.assert_series_equal(result1, expected1)
tm.assert_series_equal(result2, expected2)
def test_series_align_multiindex_with_nan(self):
# GH 38439
mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
mi2 = MultiIndex.from_arrays([[np.nan, 81.0], [np.nan, np.nan]])
ser1 = Series([1, 2], index=mi1)
ser2 = Series([1, 2], index=mi2)
result1, result2 = ser1.align(ser2)
mi = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
expected1 = Series([1, 2], index=mi)
expected2 = Series([2, 1], index=mi)
tm.assert_series_equal(result1, expected1)
tm.assert_series_equal(result2, expected2)
def test_nunique_smoke(self):
# GH 34019
n = DataFrame([[1, 2], [1, 2]]).set_index([0, 1]).index.nunique()
assert n == 1
def test_multiindex_repeated_keys(self):
# GH19414
tm.assert_series_equal(
Series([1, 2], MultiIndex.from_arrays([["a", "b"]])).loc[
["a", "a", "b", "b"]
],
Series([1, 1, 2, 2], MultiIndex.from_arrays([["a", "a", "b", "b"]])),
)
def test_multiindex_with_na_missing_key(self):
# GH46173
df = DataFrame.from_dict(
{
("foo",): [1, 2, 3],
("bar",): [5, 6, 7],
(None,): [8, 9, 0],
}
)
with pytest.raises(KeyError, match="missing_key"):
df[[("missing_key",)]]
def test_multiindex_dtype_preservation(self):
# GH51261
columns = MultiIndex.from_tuples([("A", "B")], names=["lvl1", "lvl2"])
df = DataFrame(["value"], columns=columns).astype("category")
df_no_multiindex = df["A"]
assert isinstance(df_no_multiindex["B"].dtype, CategoricalDtype)
# geopandas 1763 analogue
df = DataFrame(
[[1, 0], [0, 1]],
columns=[
["foo", "foo"],
["location", "location"],
["x", "y"],
],
).assign(bools=Series([True, False], dtype="boolean"))
assert isinstance(df["bools"].dtype, BooleanDtype)
def test_multiindex_from_tuples_with_nan(self):
# GH#23578
result = MultiIndex.from_tuples([("a", "b", "c"), np.nan, ("d", "", "")])
expected = MultiIndex.from_tuples(
[("a", "b", "c"), (np.nan, np.nan, np.nan), ("d", "", "")]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("operation", ["div", "mul", "add", "sub"])
def test_groupyby_rename_categories_operation_with_multiindex(self, operation):
# GH#51500
data = DataFrame(
[["C", "B", "B"], ["B", "A", "A"], ["B", "A", "B"]], columns=["0", "1", "2"]
)
data["0"] = data["0"].astype("category")
data["0"] = data["0"].cat.rename_categories({"C": "B", "B": "C"})
a = data.groupby(by=["0", "1"])["2"].value_counts()
b = data.groupby(by=["0", "1"]).size()
result = getattr(a, operation)(b)
expected = getattr(a, operation)(b.sort_index(ascending=False))
tm.assert_series_equal(result, expected)
def test_multiindex_assign_aligns_as_implicit_tuple(self):
# GH 61841
cols = MultiIndex.from_tuples([("A", "B")])
df1 = DataFrame([[i] for i in range(3)], columns=cols)
df2 = df1.copy()
df3 = df1.copy()
s1 = df1["A"].rolling(2).mean()
s2 = s1.copy()
s3 = s1.copy()
df2["C"] = s2
df3[("C", "")] = s3
tm.assert_frame_equal(df2, df3)
df1["C"] = s1
tm.assert_frame_equal(df1, df2)
tm.assert_frame_equal(df1, df3)
df1["C"] = s1
tm.assert_frame_equal(df1, df2)
tm.assert_frame_equal(df1, df3)
def test_multiindex_assign_alignment_with_non_string_dtype(self):
# GH 62518
columns = MultiIndex.from_arrays(
[["a", "a", "z", "z"], pd.Categorical([1, 2, 1, 2])]
)
meta = DataFrame(columns=columns, dtype=object)
meta["z"] = meta["z"].astype("int64")
result = DataFrame(
data={
("a", 1): Series([], dtype=object),
("a", 2): Series([], dtype=object),
("z", 1): Series([], dtype="int64"),
("z", 2): Series([], dtype="int64"),
},
columns=columns,
)
tm.assert_frame_equal(meta, result)
| TestMultiIndexBasic |
python | kamyu104__LeetCode-Solutions | Python/car-fleet.py | {
"start": 33,
"end": 481
} | class ____(object):
def carFleet(self, target, position, speed):
"""
:type target: int
:type position: List[int]
:type speed: List[int]
:rtype: int
"""
times = [float(target-p)/s for p, s in sorted(zip(position, speed))]
result, curr = 0, 0
for t in reversed(times):
if t > curr:
result += 1
curr = t
return result
| Solution |
python | jina-ai__jina | jina/serve/executors/__init__.py | {
"start": 13006,
"end": 65158
} | class ____(JAMLCompatible, metaclass=ExecutorType):
"""
The base class of all Executors, can be used to build encoder, indexer, etc.
:class:`jina.Executor` as an alias for this class.
EXAMPLE USAGE
.. code-block:: python
from jina import Executor, requests, Flow
class MyExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
print(docs) # process docs here
f = Flow().add(uses=Executor) # you can add your Executor to a Flow
Any executor inherited from :class:`BaseExecutor` always has the **meta** defined in :mod:`jina.executors.metas.defaults`.
All arguments in the :func:`__init__` can be specified with a ``with`` map in the YAML config. Example:
.. highlight:: python
.. code-block:: python
class MyAwesomeExecutor(Executor):
def __init__(awesomeness=5):
pass
is equal to
.. highlight:: yaml
.. code-block:: yaml
jtype: MyAwesomeExecutor
with:
awesomeness: 5
"""
def __init__(
self,
metas: Optional[Dict] = None,
requests: Optional[Dict] = None,
runtime_args: Optional[Dict] = None,
workspace: Optional[str] = None,
dynamic_batching: Optional[Dict] = None,
**kwargs,
):
"""`metas` and `requests` are always auto-filled with values from YAML config.
:param metas: a dict of metas fields
:param requests: a dict of endpoint-function mapping
:param runtime_args: a dict of arguments injected from :class:`Runtime` during runtime
:param kwargs: additional extra keyword arguments to avoid failing when extra params ara passed that are not expected
:param workspace: the workspace of the executor. Only used if a workspace is not already provided in `metas` or `runtime_args`
:param dynamic_batching: a dict of endpoint-dynamic_batching config mapping
"""
self._add_metas(metas)
self._add_requests(requests)
self._add_dynamic_batching(dynamic_batching)
self._add_runtime_args(runtime_args)
self.logger = JinaLogger(self.__class__.__name__, **vars(self.runtime_args))
self._validate_sagemaker()
self._init_instrumentation(runtime_args)
self._init_monitoring()
self._init_workspace = workspace
if __dry_run_endpoint__ not in self.requests:
self.requests[__dry_run_endpoint__] = (
_FunctionWithSchema.get_function_with_schema(
self.__class__._dry_run_func
)
)
else:
self.logger.warning(
f' Endpoint {__dry_run_endpoint__} is defined by the Executor. Be aware that this endpoint is usually reserved to enable health checks from the Client through the gateway.'
f' So it is recommended not to expose this endpoint. '
)
if type(self) == BaseExecutor:
self.requests[__default_endpoint__] = (
_FunctionWithSchema.get_function_with_schema(
self.__class__._dry_run_func
)
)
self._lock = contextlib.AsyncExitStack()
try:
if not getattr(self.runtime_args, 'allow_concurrent', False):
self._lock = (
asyncio.Lock()
) # Lock to run in Executor non async methods in a way that does not block the event loop to do health checks without the fear of having race conditions or multithreading issues.
except RuntimeError:
pass
self._write_lock = (
threading.Lock()
) # watch because this makes it no serializable
def _get_endpoint_models_dict(self):
from jina._docarray import docarray_v2
if not docarray_v2:
from docarray.document.pydantic_model import PydanticDocument
endpoint_models = {}
for endpoint, function_with_schema in self.requests.items():
_is_generator = function_with_schema.is_generator
_is_singleton_doc = function_with_schema.is_singleton_doc
_is_batch_docs = function_with_schema.is_batch_docs
_parameters_model = function_with_schema.parameters_model
if docarray_v2:
# if the endpoint is not a generator endpoint, then the request schema is a DocumentArray and we need
# to get the doc_type from the schema
# otherwise, since generator endpoints only accept a Document as input, the request_schema is the schema
# of the Document
if not _is_generator:
request_schema = (
function_with_schema.request_schema.doc_type
if _is_batch_docs
else function_with_schema.request_schema
)
response_schema = (
function_with_schema.response_schema.doc_type
if _is_batch_docs
else function_with_schema.response_schema
)
else:
request_schema = function_with_schema.request_schema
response_schema = function_with_schema.response_schema
else:
request_schema = PydanticDocument
response_schema = PydanticDocument
endpoint_models[endpoint] = {
'input': {
'name': request_schema.__name__,
'model': request_schema,
},
'output': {
'name': response_schema.__name__,
'model': response_schema,
},
'is_generator': _is_generator,
'is_singleton_doc': _is_singleton_doc,
'parameters': {
'name': (
_parameters_model.__name__
if _parameters_model is not None
else None
),
'model': _parameters_model,
},
}
return endpoint_models
def _dry_run_func(self, *args, **kwargs):
pass
def _init_monitoring(self):
if (
hasattr(self.runtime_args, 'metrics_registry')
and self.runtime_args.metrics_registry
):
with ImportExtensions(
required=True,
help_text='You need to install the `prometheus_client` to use the montitoring functionality of jina',
):
from prometheus_client import Summary
self._summary_method = Summary(
'process_request_seconds',
'Time spent when calling the executor request method',
registry=self.runtime_args.metrics_registry,
namespace='jina',
labelnames=('executor', 'executor_endpoint', 'runtime_name'),
)
self._metrics_buffer = {'process_request_seconds': self._summary_method}
else:
self._summary_method = None
self._metrics_buffer = None
if self.meter:
self._process_request_histogram = self.meter.create_histogram(
name='jina_process_request_seconds',
description='Time spent when calling the executor request method',
)
self._histogram_buffer = {
'jina_process_request_seconds': self._process_request_histogram
}
else:
self._process_request_histogram = None
self._histogram_buffer = None
def _init_instrumentation(self, _runtime_args: Optional[Dict] = None):
if not _runtime_args:
_runtime_args = {}
instrumenting_module_name = _runtime_args.get('name', self.__class__.__name__)
args_tracer_provider = _runtime_args.get('tracer_provider', None)
if args_tracer_provider:
self.tracer_provider = args_tracer_provider
self.tracer = self.tracer_provider.get_tracer(instrumenting_module_name)
else:
self.tracer_provider = None
self.tracer = None
args_meter_provider = _runtime_args.get('meter_provider', None)
if args_meter_provider:
self.meter_provider = args_meter_provider
self.meter = self.meter_provider.get_meter(instrumenting_module_name)
else:
self.meter_provider = None
self.meter = None
@property
def requests(self):
"""
Get the request dictionary corresponding to this specific class
:return: Returns the requests corresponding to the specific Executor instance class
"""
if hasattr(self, '_requests'):
return self._requests
else:
if not hasattr(self, 'requests_by_class'):
self.requests_by_class = {}
if self.__class__.__name__ not in self.requests_by_class:
self.requests_by_class[self.__class__.__name__] = {}
# we need to copy so that different instances with different (requests) in input do not disturb one another
self._requests = copy.copy(self.requests_by_class[self.__class__.__name__])
return self._requests
@property
def write_endpoints(self):
"""
Get the list of endpoints bound to write methods
:return: Returns the list of endpoints bound to write methods
"""
if hasattr(self, '_write_methods'):
endpoints = []
for endpoint, fn in self.requests.items():
if fn.fn.__name__ in self._write_methods:
endpoints.append(endpoint)
return endpoints
else:
return []
def _add_requests(self, _requests: Optional[Dict]):
if _requests:
func_names = {f.fn.__name__: e for e, f in self.requests.items()}
for endpoint, func in _requests.items():
# the following line must be `getattr(self.__class__, func)` NOT `getattr(self, func)`
# this to ensure we always have `_func` as unbound method
if func in func_names:
if func_names[func] in self.requests:
del self.requests[func_names[func]]
_func = getattr(self.__class__, func)
if callable(_func):
# the target function is not decorated with `@requests` yet
self.requests[endpoint] = (
_FunctionWithSchema.get_function_with_schema(_func)
)
elif typename(_func) == 'jina.executors.decorators.FunctionMapper':
# the target function is already decorated with `@requests`, need unwrap with `.fn`
self.requests[endpoint] = (
_FunctionWithSchema.get_function_with_schema(_func.fn)
)
else:
raise TypeError(
f'expect {typename(self)}.{func} to be a function, but receiving {typename(_func)}'
)
def _validate_sagemaker(self):
# sagemaker expects the POST /invocations endpoint to be defined.
# if it is not defined, we check if there is only one endpoint defined,
# and if so, we use it as the POST /invocations endpoint, or raise an error
if (
not hasattr(self, 'runtime_args')
or not hasattr(self.runtime_args, 'provider')
or self.runtime_args.provider != ProviderType.SAGEMAKER.value
):
return
remove_keys = set()
for k in self.requests.keys():
if k != '/invocations':
remove_keys.add(k)
if '/invocations' in self.requests:
for k in remove_keys:
self.requests.pop(k)
return
if (
hasattr(self.runtime_args, 'provider_endpoint')
and self.runtime_args.provider_endpoint
):
endpoint_to_use = ('/' + self.runtime_args.provider_endpoint).lower()
elif len(self.requests) == 1:
endpoint_to_use = list(self.requests.keys())[0]
else:
raise ValueError('Cannot identify the endpoint to use for "/invocations"')
if endpoint_to_use in list(self.requests.keys()):
self.logger.warning(f'Using "{endpoint_to_use}" as "/invocations" route')
self.requests['/invocations'] = self.requests[endpoint_to_use]
if (
getattr(self, 'dynamic_batching', {}).get(endpoint_to_use, None)
is not None
):
self.dynamic_batching['/invocations'] = self.dynamic_batching[
endpoint_to_use
]
self.dynamic_batching.pop(endpoint_to_use)
for k in remove_keys:
self.requests.pop(k)
return
def _add_dynamic_batching(self, _dynamic_batching: Optional[Dict]):
from collections.abc import Mapping
def deep_update(source, overrides):
for key, value in overrides.items():
if isinstance(value, Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
if _dynamic_batching:
self.dynamic_batching = getattr(self, 'dynamic_batching', {})
self.dynamic_batching = deep_update(
self.dynamic_batching, _dynamic_batching
)
def _add_metas(self, _metas: Optional[Dict]):
from jina.serve.executors.metas import get_default_metas
tmp = get_default_metas()
if _metas:
tmp.update(_metas)
unresolved_attr = False
target = SimpleNamespace()
# set self values filtered by those non-exist, and non-expandable
for k, v in tmp.items():
if k == 'workspace' and not (v is None or v == ''):
warnings.warn(
'Setting `workspace` via `metas.workspace` is deprecated. '
'Instead, use `f.add(..., workspace=...)` when defining a a Flow in Python; '
'the `workspace` parameter when defining a Flow using YAML; '
'or `--workspace` when starting an Executor using the CLI.',
category=DeprecationWarning,
)
if not hasattr(target, k):
if isinstance(v, str):
if not env_var_regex.findall(v):
setattr(target, k, v)
else:
unresolved_attr = True
else:
setattr(target, k, v)
elif type(getattr(target, k)) == type(v):
setattr(target, k, v)
if unresolved_attr:
_tmp = vars(self)
_tmp['metas'] = tmp
new_metas = JAML.expand_dict(_tmp)['metas']
for k, v in new_metas.items():
if not hasattr(target, k):
if isinstance(v, str):
if not (
env_var_regex.findall(v) or internal_var_regex.findall(v)
):
setattr(target, k, v)
else:
raise ValueError(
f'{k}={v} is not substitutable or badly referred'
)
else:
setattr(target, k, v)
# `name` is important as it serves as an identifier of the executor
# if not given, then set a name by the rule
if not getattr(target, 'name', None):
setattr(target, 'name', self.__class__.__name__)
self.metas = target
def close(self) -> None:
"""
Always invoked as executor is destroyed.
You can write destructor & saving logic here.
"""
pass
def __call__(self, req_endpoint: str, **kwargs):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
if req_endpoint in self.requests:
return self.requests[req_endpoint](
self, **kwargs
) # unbound method, self is required
elif __default_endpoint__ in self.requests:
return self.requests[__default_endpoint__](
self, **kwargs
) # unbound method, self is required
async def __acall__(self, req_endpoint: str, **kwargs):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
if req_endpoint in self.requests:
return await self.__acall_endpoint__(req_endpoint, **kwargs)
elif __default_endpoint__ in self.requests:
return await self.__acall_endpoint__(__default_endpoint__, **kwargs)
async def __acall_endpoint__(
self, req_endpoint, tracing_context: Optional['Context'], **kwargs
):
# Decorator to make sure that `parameters` are passed as PydanticModels if needed
def parameters_as_pydantic_models_decorator(func, parameters_pydantic_model):
@functools.wraps(func) # Step 2: Use functools.wraps to preserve metadata
def wrapper(*args, **kwargs):
parameters = kwargs.get('parameters', None)
if parameters is not None:
parameters = parameters_pydantic_model(**parameters)
kwargs['parameters'] = parameters
result = func(*args, **kwargs)
return result
return wrapper
# Decorator to make sure that `docs` are fed one by one to method using singleton document serving
def loop_docs_decorator(func):
@functools.wraps(func) # Step 2: Use functools.wraps to preserve metadata
def wrapper(*args, **kwargs):
docs = kwargs.pop('docs')
if docarray_v2:
from docarray import DocList
ret = DocList[response_schema]()
else:
ret = DocumentArray()
for doc in docs:
f_ret = func(*args, doc=doc, **kwargs)
if f_ret is None:
ret.append(doc) # this means change in place
else:
ret.append(f_ret)
return ret
return wrapper
def async_loop_docs_decorator(func):
@functools.wraps(func) # Step 2: Use functools.wraps to preserve metadata
async def wrapper(*args, **kwargs):
docs = kwargs.pop('docs')
if docarray_v2:
from docarray import DocList
ret = DocList[response_schema]()
else:
ret = DocumentArray()
for doc in docs:
f_ret = await original_func(*args, doc=doc, **kwargs)
if f_ret is None:
ret.append(doc) # this means change in place
else:
ret.append(f_ret)
return ret
return wrapper
fn_info = self.requests[req_endpoint]
original_func = fn_info.fn
is_generator = fn_info.is_generator
is_batch_docs = fn_info.is_batch_docs
response_schema = fn_info.response_schema
parameters_model = fn_info.parameters_model
is_parameters_pydantic_model = fn_info.parameters_is_pydantic_model
func = original_func
if is_generator or is_batch_docs:
pass
elif kwargs.get('docs', None) is not None:
# This means I need to pass every doc (most likely 1, but potentially more)
if iscoroutinefunction(original_func):
func = async_loop_docs_decorator(original_func)
else:
func = loop_docs_decorator(original_func)
if is_parameters_pydantic_model:
func = parameters_as_pydantic_models_decorator(func, parameters_model)
async def exec_func(
summary, histogram, histogram_metric_labels, tracing_context
):
with MetricsTimer(summary, histogram, histogram_metric_labels):
if iscoroutinefunction(func):
return await func(self, tracing_context=tracing_context, **kwargs)
else:
async with self._lock:
return await get_or_reuse_loop().run_in_executor(
None,
functools.partial(
func, self, tracing_context=tracing_context, **kwargs
),
)
runtime_name = (
self.runtime_args.name if hasattr(self.runtime_args, 'name') else None
)
_summary = (
self._summary_method.labels(
self.__class__.__name__, req_endpoint, runtime_name
)
if self._summary_method
else None
)
_histogram_metric_labels = {
'executor': self.__class__.__name__,
'executor_endpoint': req_endpoint,
'runtime_name': runtime_name,
}
if self.tracer:
with self.tracer.start_as_current_span(
req_endpoint, context=tracing_context
):
from opentelemetry.propagate import extract
from opentelemetry.trace.propagation.tracecontext import (
TraceContextTextMapPropagator,
)
tracing_carrier_context = {}
TraceContextTextMapPropagator().inject(tracing_carrier_context)
return await exec_func(
_summary,
self._process_request_histogram,
_histogram_metric_labels,
extract(tracing_carrier_context),
)
else:
return await exec_func(
_summary,
self._process_request_histogram,
_histogram_metric_labels,
None,
)
@property
def workspace(self) -> Optional[str]:
"""
Get the workspace directory of the Executor.
:return: returns the workspace of the current shard of this Executor.
"""
workspace = (
getattr(self.runtime_args, 'workspace', None)
or getattr(self.metas, 'workspace')
or self._init_workspace
or __cache_path__
)
if workspace:
shard_id = getattr(
self.runtime_args,
'shard_id',
None,
)
return _get_workspace_from_name_and_shards(
workspace=workspace, shard_id=shard_id, name=self.metas.name
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@classmethod
def from_hub(
cls: Type[T],
uri: str,
context: Optional[Dict[str, Any]] = None,
uses_with: Optional[Dict] = None,
uses_metas: Optional[Dict] = None,
uses_requests: Optional[Dict] = None,
uses_dynamic_batching: Optional[Dict] = None,
**kwargs,
) -> T:
"""Construct an Executor from Hub.
:param uri: a hub Executor scheme starts with `jinahub://`
:param context: context replacement variables in a dict, the value of the dict is the replacement.
:param uses_with: dictionary of parameters to overwrite from the default config's with field
:param uses_metas: dictionary of parameters to overwrite from the default config's metas field
:param uses_requests: dictionary of parameters to overwrite from the default config's requests field
:param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field
:param kwargs: other kwargs accepted by the CLI ``jina hub pull``
:return: the Hub Executor object.
.. highlight:: python
.. code-block:: python
from jina import Executor
from docarray import Document, DocumentArray
executor = Executor.from_hub(
uri='jinahub://CLIPImageEncoder', install_requirements=True
)
"""
from hubble.executor.helper import is_valid_huburi
_source = None
if is_valid_huburi(uri):
from hubble.executor.hubio import HubIO
from hubble.executor.parsers import set_hub_pull_parser
_args = ArgNamespace.kwargs2namespace(
{'no_usage': True, **kwargs},
set_hub_pull_parser(),
positional_args=(uri,),
)
_source = HubIO(args=_args).pull()
if not _source or _source.startswith('docker://'):
raise ValueError(
f'Can not construct a native Executor from {uri}. Looks like you want to use it as a '
f'Docker container, you may want to use it in the Flow via `.add(uses={uri})` instead.'
)
return cls.load_config(
_source,
context=context,
uses_with=uses_with,
uses_metas=uses_metas,
uses_requests=uses_requests,
uses_dynamic_batching=uses_dynamic_batching,
)
# overload_inject_start_executor_serve
@overload
def serve(
self,
*,
allow_concurrent: Optional[bool] = False,
compression: Optional[str] = None,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
description: Optional[str] = None,
disable_auto_volume: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
exit_on_exceptions: Optional[List] = [],
external: Optional[bool] = False,
floating: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
grpc_channel_options: Optional[dict] = None,
grpc_metadata: Optional[dict] = None,
grpc_server_options: Optional[dict] = None,
host: Optional[List] = ['0.0.0.0'],
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
metrics: Optional[bool] = False,
metrics_exporter_host: Optional[str] = None,
metrics_exporter_port: Optional[int] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'executor',
native: Optional[bool] = False,
no_reduce: Optional[bool] = False,
output_array_type: Optional[str] = None,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_monitoring: Optional[int] = None,
prefer_platform: Optional[str] = None,
protocol: Optional[Union[str, List[str]]] = ['GRPC'],
provider: Optional[str] = ['NONE'],
provider_endpoint: Optional[str] = None,
py_modules: Optional[List] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
raft_configuration: Optional[dict] = None,
reload: Optional[bool] = False,
replicas: Optional[int] = 1,
retries: Optional[int] = -1,
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
stateful: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
tls: Optional[bool] = False,
traces_exporter_host: Optional[str] = None,
traces_exporter_port: Optional[int] = None,
tracing: Optional[bool] = False,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_dynamic_batching: Optional[dict] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
volumes: Optional[List] = None,
when: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Serve this Executor in a temporary Flow. Useful in testing an Executor in remote settings.
:param allow_concurrent: Allow concurrent requests to be processed by the Executor. This is only recommended if the Executor is thread-safe.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param disable_auto_volume: Do not automatically mount a volume for dockerized Executors.
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param exit_on_exceptions: List of exceptions that will cause the Executor to shut down.
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param floating: If set, the current Pod/Deployment can not be further chained, and the next `.add()` will chain after the last Pod/Deployment not this current one.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param grpc_channel_options: Dictionary of kwargs arguments that will be passed to the grpc channel as options when creating a channel, example : {'grpc.max_send_message_length': -1}. When max_attempts > 1, the 'grpc.service_config' option will not be applicable.
:param grpc_metadata: The metadata to be passed to the gRPC request.
:param grpc_server_options: Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}
:param host: The host of the Gateway, which the client should connect to, by default it is 0.0.0.0. In the case of an external Executor (`--external` or `external=True`) this can be a list of hosts. Then, every resulting address will be considered as one replica of the Executor.
:param install_requirements: If set, try to install `requirements.txt` from the local Executor if exists in the Executor folder. If using Hub, install `requirements.txt` in the Hub Executor bundle to local.
:param log_config: The config name or the absolute path to the YAML config file of the logger used in this object.
:param metrics: If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. Otherwise a no-op implementation will be provided.
:param metrics_exporter_host: If tracing is enabled, this hostname will be used to configure the metrics exporter agent.
:param metrics_exporter_port: If tracing is enabled, this port will be used to configure the metrics exporter agent.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_reduce: Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]. In the case of an external Executor (`--external` or `external=True`) this can be a list of ports. Then, every resulting address will be considered as one replica of the Executor.
:param port_monitoring: The port on which the prometheus server is exposed, default is a random port between [49152, 65535]
:param prefer_platform: The preferred target Docker platform. (e.g. "linux/amd64", "linux/arm64")
:param protocol: Communication protocol of the server exposed by the Executor. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: ['GRPC', 'HTTP', 'WEBSOCKET'].
:param provider: If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: ['NONE', 'SAGEMAKER', 'AZURE'].
:param provider_endpoint: If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://jina.ai/serve/concepts/executor/executor-files/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node.
:param reload: If set, the Executor will restart while serving if YAML configuration source or Executor modules are changed. If YAML configuration is changed, the whole deployment is reloaded and new processes will be restarted. If only Python modules of the Executor have changed, they will be reloaded to the interpreter without restarting process.
:param replicas: The number of replicas in the deployment
:param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param tls: If set, connect to deployment using tls encryption
:param traces_exporter_host: If tracing is enabled, this hostname will be used to configure the trace exporter agent.
:param traces_exporter_port: If tracing is enabled, this port will be used to configure the trace exporter agent.
:param tracing: If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. Otherwise a no-op implementation will be provided.
:param uses: The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached before the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_before_address: The address of the uses-before runtime
:param uses_dynamic_batching: Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param when: The condition that the documents need to fulfill before reaching the Executor.The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_executor_serve
@classmethod
def serve(
cls,
uses_with: Optional[Dict] = None,
uses_metas: Optional[Dict] = None,
uses_requests: Optional[Dict] = None,
stop_event: Optional[Union['threading.Event', 'multiprocessing.Event']] = None,
uses_dynamic_batching: Optional[Dict] = None,
reload: bool = False,
**kwargs,
):
"""Serve this Executor in a temporary Flow. Useful in testing an Executor in remote settings.
:param uses_with: dictionary of parameters to overwrite from the default config's with field
:param uses_metas: dictionary of parameters to overwrite from the default config's metas field
:param uses_requests: dictionary of parameters to overwrite from the default config's requests field
:param reload: If set, the Executor reloads the modules as they change
:param stop_event: a threading event or a multiprocessing event that once set will resume the control Flow
to main thread.
:param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field
:param reload: a flag indicating if the Executor should watch the Python files of its implementation to reload the code live while serving.
:param kwargs: other kwargs accepted by the Flow, full list can be found `here <https://jina.ai/serve/api/jina.orchestrate.flow.base/>`
"""
warnings.warn(
f'Executor.serve() is no more supported and will be deprecated soon. Use Deployment to serve an Executor instead: '
f'https://jina.ai/serve/concepts/executor/serve/',
DeprecationWarning,
)
from jina.orchestrate.deployments import Deployment
dep = Deployment(
uses=cls,
uses_with=uses_with,
uses_metas=uses_metas,
uses_requests=uses_requests,
uses_dynamic_batching=uses_dynamic_batching,
reload=reload,
**kwargs,
)
with dep:
dep.block(stop_event)
class StandaloneExecutorType(BetterEnum):
"""
Type of standalone Executors
"""
EXTERNAL = 0 # served by a gateway
SHARED = 1 # not served by a gateway, served by head/worker
@staticmethod
def to_kubernetes_yaml(
uses: str,
output_base_path: str,
k8s_namespace: Optional[str] = None,
executor_type: Optional[
StandaloneExecutorType
] = StandaloneExecutorType.EXTERNAL,
uses_with: Optional[Dict] = None,
uses_metas: Optional[Dict] = None,
uses_requests: Optional[Dict] = None,
uses_dynamic_batching: Optional[Dict] = None,
**kwargs,
):
"""
Converts the Executor into a set of yaml deployments to deploy in Kubernetes.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: the Executor to use. Has to be containerized and accessible from K8s
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param executor_type: The type of Executor. Can be external or shared. External Executors include the Gateway. Shared Executors don't. Defaults to External
:param uses_with: dictionary of parameters to overwrite from the default config's with field
:param uses_metas: dictionary of parameters to overwrite from the default config's metas field
:param uses_requests: dictionary of parameters to overwrite from the default config's requests field
:param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field
:param kwargs: other kwargs accepted by the Flow, full list can be found `here <https://jina.ai/serve/api/jina.orchestrate.flow.base/>`
"""
warnings.warn(
f'Executor.to_kubernetes_yaml() is no more supported and will be deprecated soon. Use Deployment to export kubernetes YAML files: '
f'https://jina.ai/serve/concepts/executor/serve/#serve-via-kubernetes',
DeprecationWarning,
)
from jina.orchestrate.flow.base import Flow
Flow(**kwargs).add(
uses=uses,
uses_with=uses_with,
uses_metas=uses_metas,
uses_requests=uses_requests,
uses_dynamic_batching=uses_dynamic_batching,
).to_kubernetes_yaml(
output_base_path=output_base_path,
k8s_namespace=k8s_namespace,
include_gateway=executor_type
== BaseExecutor.StandaloneExecutorType.EXTERNAL,
)
to_k8s_yaml = to_kubernetes_yaml
@staticmethod
def to_docker_compose_yaml(
uses: str,
output_path: Optional[str] = None,
network_name: Optional[str] = None,
executor_type: Optional[
StandaloneExecutorType
] = StandaloneExecutorType.EXTERNAL,
uses_with: Optional[Dict] = None,
uses_metas: Optional[Dict] = None,
uses_requests: Optional[Dict] = None,
uses_dynamic_batching: Optional[Dict] = None,
**kwargs,
):
"""
Converts the Executor into a yaml file to run with `docker-compose up`
:param uses: the Executor to use. Has to be containerized
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
:param executor_type: The type of Executor. Can be external or shared. External Executors include the Gateway. Shared Executors don't. Defaults to External
:param uses_with: dictionary of parameters to overwrite from the default config's with field
:param uses_metas: dictionary of parameters to overwrite from the default config's metas field
:param uses_requests: dictionary of parameters to overwrite from the default config's requests field
:param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's requests field
:param kwargs: other kwargs accepted by the Flow, full list can be found `here <https://jina.ai/serve/api/jina.orchestrate.flow.base/>`
"""
warnings.warn(
f'Executor.to_docker_compose_yaml() is no more supported and will be deprecated soon. Use Deployment to export docker compose YAML files: '
f'https://jina.ai/serve/concepts/executor/serve/#serve-via-docker-compose',
DeprecationWarning,
)
from jina.orchestrate.flow.base import Flow
f = Flow(**kwargs).add(
uses=uses,
uses_with=uses_with,
uses_metas=uses_metas,
uses_requests=uses_requests,
uses_dynamic_batching=uses_dynamic_batching,
)
f.to_docker_compose_yaml(
output_path=output_path,
network_name=network_name,
include_gateway=executor_type
== BaseExecutor.StandaloneExecutorType.EXTERNAL,
)
def monitor(
self, name: Optional[str] = None, documentation: Optional[str] = None
) -> Optional[MetricsTimer]:
"""
Get a given prometheus metric, if it does not exist yet, it will create it and store it in a buffer.
:param name: the name of the metrics
:param documentation: the description of the metrics
:return: the given prometheus metrics or None if monitoring is not enable.
"""
_summary = (
self._metrics_buffer.get(name, None) if self._metrics_buffer else None
)
_histogram = (
self._histogram_buffer.get(name, None) if self._histogram_buffer else None
)
if self._metrics_buffer and not _summary:
from prometheus_client import Summary
_summary = Summary(
name,
documentation,
registry=self.runtime_args.metrics_registry,
namespace='jina',
labelnames=('runtime_name',),
).labels(self.runtime_args.name)
self._metrics_buffer[name] = _summary
if self._histogram_buffer and not _histogram:
_histogram = self.meter.create_histogram(
name=f'jina_{name}', description=documentation
)
self._histogram_buffer[name] = _histogram
if _summary or _histogram:
return MetricsTimer(
_summary,
_histogram,
histogram_metric_labels={'runtime_name': self.runtime_args.name},
)
return contextlib.nullcontext()
def snapshot(self, snapshot_file: str):
"""
Interface to take a snapshot from the Executor. Implement it to enable periodic snapshots
:param snapshot_file: The file path where to store the binary representation of the Executor snapshot
"""
raise Exception('Raising an Exception. Snapshot is not enabled by default')
def restore(self, snapshot_file: str):
"""
Interface to restore the state of the Executor from a snapshot that has been taken by the snapshot method.
:param snapshot_file: The file path from where to reconstruct the Executor
"""
pass
def _run_snapshot(self, snapshot_file: str, did_raise_exception):
try:
from pathlib import Path
p = Path(snapshot_file)
p.parent.mkdir(parents=True, exist_ok=True)
p.touch()
with self._write_lock:
self.snapshot(snapshot_file)
except:
did_raise_exception.set()
raise
def _run_restore(self, snapshot_file: str, did_raise_exception):
try:
with self._write_lock:
self.restore(snapshot_file)
except:
did_raise_exception.set()
raise
finally:
os.remove(snapshot_file)
| BaseExecutor |
python | huggingface__transformers | tests/models/funnel/test_modeling_funnel.py | {
"start": 12586,
"end": 15687
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
FunnelModel,
FunnelForMaskedLM,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": (FunnelBaseModel, FunnelModel),
"fill-mask": FunnelForMaskedLM,
"question-answering": FunnelForQuestionAnswering,
"text-classification": FunnelForSequenceClassification,
"token-classification": FunnelForTokenClassification,
"zero-shot": FunnelForSequenceClassification,
}
if is_torch_available()
else {}
)
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = FunnelModelTester(self)
self.config_tester = ConfigTester(self, config_class=FunnelConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
for param in ["r_w_bias", "r_r_bias", "r_kernel", "r_s_bias", "seg_embed"]:
if hasattr(module, param) and getattr(module, param) is not None:
weight = getattr(module, param)
weight.data.fill_(3)
@require_torch
| FunnelModelTest |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_wisconsin_zip.py | {
"start": 1759,
"end": 4110
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Wisconsin zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_wisconsin_zip": ["53001", "53541", "54466", "54990"],
"invalid_wisconsin_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_wisconsin_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_wisconsin_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_wisconsin_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidWisconsinZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidWisconsinZip |
python | plotly__plotly.py | plotly/graph_objs/scatterpolar/_selected.py | {
"start": 233,
"end": 3387
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.selected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterpolar.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterpolar.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterpolar.selected.Mark
er` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolar.selected.Text
font` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Selected`
marker
:class:`plotly.graph_objects.scatterpolar.selected.Mark
er` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolar.selected.Text
font` instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Selected |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_ranking.py | {
"start": 6949,
"end": 7419
} | class ____(UptimeTestCase):
def test(self) -> None:
bucket = datetime(2024, 7, 18, 0, 47)
assert get_organization_bucket(bucket) == set()
dummy_org_id = 47
self.project.organization = Organization(id=dummy_org_id)
self.project.organization_id = dummy_org_id
add_base_url_to_rank(self.project, "https://sentry.io")
assert get_organization_bucket(bucket) == {self.project.organization_id}
| GetOrganizationBucketTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_sets.py | {
"start": 19611,
"end": 20296
} | class ____(ReadonlyGraphQLContextTestMatrix):
def test_unauthorized_error_on_add_dynamic_partitions(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
ADD_DYNAMIC_PARTITION_MUTATION,
variables={
"partitionsDefName": "foo",
"partitionKey": "bar",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
assert result.data["addDynamicPartition"]["__typename"] == "UnauthorizedError"
| TestDynamicPartitionReadonlyFailure |
python | scipy__scipy | scipy/_build_utils/tempita/_tempita.py | {
"start": 17573,
"end": 37611
} | class ____:
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return ''
def __iter__(self):
return iter(())
def __bool__(self):
return False
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
if expr == delimiters[0] and in_expr:
raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
elif expr == delimiters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i, current in enumerate(tokens):
if isinstance(current, basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, last_index, last_pos):
"""Given a string and index, return (line, column)"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimiters=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for '), first
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
with open(template_name, 'rb') as f:
template_content = f.read()
template = Template(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
with open(options.output, 'wb') as f:
f.write(result)
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
| _Empty |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 31618,
"end": 32010
} | class ____(greentest.TestCase):
def test_pure(self):
self.assertEqual(greenlet.Greenlet.__module__,
'gevent.greenlet')
self.assertEqual(greenlet.SpawnedLink.__module__,
'gevent.greenlet')
X = object()
del AbstractGenericGetTestCase
del AbstractGenericWaitTestCase
if __name__ == '__main__':
greentest.main()
| TestPure |
python | dask__distributed | distributed/tests/test_worker.py | {
"start": 119344,
"end": 122569
} | class ____:
def __init__(self, main_thread_id):
self.main_thread_id = main_thread_id
self.data = random.randbytes(OFFLOAD_THRESHOLD + 1)
def __sizeof__(self):
return len(self.data)
def __getstate__(self):
assert self.main_thread_id
assert self.main_thread_id != threading.get_ident()
return (self.data, self.main_thread_id, threading.get_ident())
def __setstate__(self, state):
_, main_thread, serialize_thread = state
assert main_thread != threading.get_ident()
return EnsureOffloaded(main_thread)
@gen_cluster(client=True)
async def test_offload_getdata(c, s, a, b):
"""Test that functions wrapped by offload() are metered"""
x = c.submit(EnsureOffloaded, threading.get_ident(), key="x", workers=[a.address])
y = c.submit(lambda x: None, x, key="y", workers=[b.address])
await y
@gen_cluster(client=True)
async def test_startstops(c, s, a, b):
t0 = time()
x = c.submit(inc, 1, key="x", workers=[a.address])
y = c.submit(inc, x, key="y", workers=[b.address])
await wait(y)
t1 = time()
ss = b.state.tasks["y"].startstops
assert len(ss) == 2
assert ss[0]["action"] == "transfer"
assert ss[0]["source"] == a.address
assert ss[1]["action"] == "compute"
assert (
t0 + b.scheduler_delay
< ss[0]["start"]
< ss[0]["stop"]
< ss[1]["start"]
< ss[1]["stop"]
< t1 + b.scheduler_delay
)
@gen_cluster(client=True, nthreads=[("", 1)])
@pytest.mark.parametrize("state", ["cancelled", "resumed"])
async def test_suppress_keyerror_for_cancelled_tasks(c, s, a, state):
async with BlockedExecute(s.address) as b:
with captured_logger("distributed.worker", level=logging.ERROR) as log:
x = (await c.scatter({"x": 1}))["x"]
y = c.submit(inc, x, key="y", workers=[b.address])
await b.in_execute.wait()
del x, y
await async_poll_for(lambda: "x" not in b.data, timeout=5)
if state == "resumed":
y = c.submit(inc, 1, key="y", workers=[a.address])
z = c.submit(inc, y, key="z", workers=[b.address])
await wait_for_state("y", "resumed", b)
b.block_execute.set()
b.block_execute_exit.set()
if state == "resumed":
assert await z == 3
del y, z
await async_poll_for(lambda: not b.state.tasks, timeout=5)
assert not log.getvalue()
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_suppress_compute_failure_for_cancelled_tasks(c, s, a):
with captured_logger("distributed.worker", level=logging.WARNING) as log:
in_event = Event()
block_event = Event()
def block_and_raise(in_event, block_event):
in_event.set()
block_event.wait()
return 1 / 0
x = c.submit(block_and_raise, in_event, block_event, key="x")
await in_event.wait()
del x
await wait_for_state("x", "cancelled", a)
await block_event.set()
await async_poll_for(lambda: not a.state.tasks, timeout=5)
assert not log.getvalue()
| EnsureOffloaded |
python | pydantic__pydantic | tests/benchmarks/test_attribute_access.py | {
"start": 260,
"end": 2311
} | class ____(BaseModel):
field1: str
field2: int
field3: float
inner1: InnerValidateAssignment
inner2: InnerValidateAssignment
_private_field1: str
_private_field2: int
_private_field3: float
@cached_property
def prop_cached1(self) -> str:
return self.field1 + self._private_field1
@cached_property
def prop_cached2(self) -> int:
return self.field2 + self._private_field2
@cached_property
def prop_cached3(self) -> float:
return self.field3 + self._private_field3
def test_setattr(benchmark):
def set_attrs(m):
m.field1 = 'test1'
m.field2 = 43
m.field3 = 4.0
m.inner1.inner_field1 = 'test inner1'
m.inner1.inner_field2 = 421
m.inner2.inner_field1 = 'test inner2'
m.inner2.inner_field2 = 422
m._private_field1 = 'test2'
m._private_field2 = 44
m._private_field3 = 5.1
m.prop_cached1 = 'cache override'
m.prop_cached2 = 10
m.prop_cached3 = 10.1
inner = {'inner_field1': 'test inner', 'inner_field2': 420}
model = Model(field1='test', field2=42, field3=3.14, inner1=inner, inner2=inner)
benchmark(set_attrs, model)
model.field2 = 'bad' # check benchmark setup
with pytest.raises(ValidationError):
model.inner1.field2 = 'bad'
def test_getattr(benchmark):
def get_attrs(m):
_ = m.field1
_ = m.field2
_ = m.field3
_ = m.inner1.inner_field1
_ = m.inner1.inner_field2
_ = m.inner2.inner_field1
_ = m.inner2.inner_field2
_ = m._private_field1
_ = m._private_field2
_ = m._private_field3
_ = m.prop_cached1
_ = m.prop_cached2
_ = m.prop_cached3
inner = {'inner_field1': 'test inner', 'inner_field2': 420}
model = Model(field1='test1', field2=42, field3=3.14, inner1=inner, inner2=inner)
model._private_field1 = 'test2'
model._private_field2 = 43
model._private_field3 = 4.14
benchmark(get_attrs, model)
| Model |
python | doocs__leetcode | solution/0400-0499/0422.Valid Word Square/Solution.py | {
"start": 0,
"end": 294
} | class ____:
def validWordSquare(self, words: List[str]) -> bool:
m = len(words)
for i, w in enumerate(words):
for j, c in enumerate(w):
if j >= m or i >= len(words[j]) or c != words[j][i]:
return False
return True
| Solution |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 10827,
"end": 12496
} | class ____(CtrlNode):
nodeName = 'RemovePeriodic'
uiTemplate = [
#('windowSize', 'intSpin', {'value': 500, 'min': 10, 'max': 1000000, 'suffix': 'pts'}),
#('numBins', 'intSpin', {'value': 50, 'min': 3, 'max': 1000000})
('f0', 'spin', {'value': 60, 'suffix': 'Hz', 'siPrefix': True, 'min': 0, 'max': None}),
('harmonics', 'intSpin', {'value': 30, 'min': 0}),
('samples', 'intSpin', {'value': 1, 'min': 1}),
]
def processData(self, data):
times = data.xvals('Time')
dt = times[1]-times[0]
data1 = data.asarray()
ft = np.fft.fft(data1)
## determine frequencies in fft data
df = 1.0 / (len(data1) * dt)
## flatten spikes at f0 and harmonics
f0 = self.ctrls['f0'].value()
for i in range(1, self.ctrls['harmonics'].value()+2):
f = f0 * i # target frequency
## determine index range to check for this frequency
ind1 = int(np.floor(f / df))
ind2 = int(np.ceil(f / df)) + (self.ctrls['samples'].value()-1)
if ind1 > len(ft)/2.:
break
mag = (abs(ft[ind1-1]) + abs(ft[ind2+1])) * 0.5
for j in range(ind1, ind2+1):
phase = np.angle(ft[j]) ## Must preserve the phase of each point, otherwise any transients in the trace might lead to large artifacts.
re = mag * np.cos(phase)
im = mag * np.sin(phase)
ft[j] = re + im*1j
ft[len(ft)-j] = re - im*1j
data2 = np.fft.ifft(ft).real
return data2
| RemovePeriodic |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 258205,
"end": 258533
} | class ____(VegaLiteSchema):
"""ConditionalValueDefGradientstringnullExprRef schema wrapper."""
_schema = {
"$ref": "#/definitions/ConditionalValueDef<(Gradient|string|null|ExprRef)>"
}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalValueDefGradientstringnullExprRef |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/executable_component/function_component.py | {
"start": 2596,
"end": 3654
} | class ____:
def __init__(self, execute_fn: Callable):
self.execute_fn = execute_fn
found_args = {"context"} | self.resource_keys | ({"config"} if self.config_cls else set())
extra_args = self.function_params_names - found_args
if extra_args:
check.failed(
f"Found extra arguments in execute_fn: {extra_args}. "
"Arguments must be valid resource params or annotated with Upstream"
)
@cached_property
def resource_keys(self) -> set[str]:
return {arg.name for arg in get_resource_args(self.execute_fn)}
@cached_property
def function_params_names(self) -> set[str]:
return {arg.name for arg in get_function_params(self.execute_fn)}
@cached_property
def config_cls(self) -> Union[type, None]:
return get_config_param_type(self.execute_fn)
@cached_property
def config_fields(self) -> Optional[dict[str, Field]]:
return self.config_cls.to_fields_dict() if self.config_cls else None
@public
| ExecuteFnMetadata |
python | tensorflow__tensorflow | tensorflow/python/ops/nccl_ops_test.py | {
"start": 5538,
"end": 6381
} | class ____(NcclTestCase):
def testBroadcast(self):
self._Test(_NcclBroadcast, lambda x, y: x)
def testBroadcastSingleDevice(self):
# Broadcasts on a single device are removed completely during rewrite.
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:GPU:0'],))
def testBroadcastToCpuError(self):
try:
# Broadcasts to CPU is not supported.
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:CPU:0'],))
except errors.NotFoundError as e:
self.assertRegex(
str(e), "No registered '_NcclBroadcastRecv' OpKernel for CPU devices")
else:
# Session isn't executed when no GPU is available.
if test.is_gpu_available():
self.fail("Didn't raise NotFoundError trying to broadcast to CPU")
| BroadcastTest |
python | openai__openai-python | src/openai/resources/fine_tuning/checkpoints/permissions.py | {
"start": 1115,
"end": 8145
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PermissionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return PermissionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> PermissionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return PermissionsWithStreamingResponse(self)
def create(
self,
fine_tuned_model_checkpoint: str,
*,
project_ids: SequenceNotStr[str],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[PermissionCreateResponse]:
"""
**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
This enables organization owners to share fine-tuned models with other projects
in their organization.
Args:
project_ids: The project identifiers to grant access to.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get_api_list(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
page=SyncPage[PermissionCreateResponse],
body=maybe_transform({"project_ids": project_ids}, permission_create_params.PermissionCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
model=PermissionCreateResponse,
method="post",
)
def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["ascending", "descending"] | Omit = omit,
project_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to view all permissions for a
fine-tuned model checkpoint.
Args:
after: Identifier for the last permission ID from the previous pagination request.
limit: Number of permissions to retrieve.
order: The order in which to retrieve permissions.
project_id: The ID of the project to get permissions for.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
return self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"project_id": project_id,
},
permission_retrieve_params.PermissionRetrieveParams,
),
),
cast_to=PermissionRetrieveResponse,
)
def delete(
self,
permission_id: str,
*,
fine_tuned_model_checkpoint: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> PermissionDeleteResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
Organization owners can use this endpoint to delete a permission for a
fine-tuned model checkpoint.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuned_model_checkpoint:
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
if not permission_id:
raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
return self._delete(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=PermissionDeleteResponse,
)
| Permissions |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_template_spec.py | {
"start": 383,
"end": 4030
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metadata': 'V1ObjectMeta',
'spec': 'V1PodSpec'
}
attribute_map = {
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1PodTemplateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metadata = None
self._spec = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def metadata(self):
"""Gets the metadata of this V1PodTemplateSpec. # noqa: E501
:return: The metadata of this V1PodTemplateSpec. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PodTemplateSpec.
:param metadata: The metadata of this V1PodTemplateSpec. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1PodTemplateSpec. # noqa: E501
:return: The spec of this V1PodTemplateSpec. # noqa: E501
:rtype: V1PodSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1PodTemplateSpec.
:param spec: The spec of this V1PodTemplateSpec. # noqa: E501
:type: V1PodSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodTemplateSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodTemplateSpec):
return True
return self.to_dict() != other.to_dict()
| V1PodTemplateSpec |
python | django__django | tests/postgres_tests/test_array.py | {
"start": 47890,
"end": 54552
} | class ____(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": "c"}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {"array_0": "", "array_1": "", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"array": ["This field is required."]})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False), size=5, remove_trailing_nulls=True
)
data = {
"array_0": "a",
"array_1": "",
"array_2": "b",
"array_3": "",
"array_4": "",
}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {"array_0": "", "array_1": ""}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"array": [
"Item 3 in the array did not validate: This field is required."
]
},
)
def test_invalid_integer(self):
msg = (
"Item 2 in the array did not validate: Ensure this value is less than or "
"equal to 100."
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(
str(SplitForm()),
"""
<div>
<label for="id_array_0">Array:</label>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</div>
""",
)
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "c", "defg"])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
],
)
def test_invalid_char_length_with_remove_trailing_nulls(self):
field = SplitArrayField(
forms.CharField(max_length=2, required=False),
size=3,
remove_trailing_nulls=True,
)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "", ""])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
],
)
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
form = Form({"field_0": "1", "field_1": "2"})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
def test_splitarrayfield_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, True),
({"field": None}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "1", "field_1": "0"}, True),
({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False),
({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
def test_splitarrayfield_remove_trailing_nulls_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(
forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True
)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, False),
({"field": None}, {"field_0": "", "field_1": ""}, False),
({"field": []}, {"field_0": "", "field_1": ""}, False),
({"field": [1]}, {"field_0": "1", "field_1": ""}, False),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
| TestSplitFormField |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams1.py | {
"start": 295,
"end": 568
} | class ____:
class ClassE: ...
class ClassF:
class A[T]: ...
int_alias = int
class B(A[int_alias]):
pass
# This should generate an error because ClassE is out of scope.
class C(A[ClassE]):
pass
| ClassD |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v5_check.py | {
"start": 1892,
"end": 2230
} | class ____:
description = model.Check(non_blank)
weight = model.Check(gt_zero)
price = model.Check(gt_zero)
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
| LineItem |
python | getsentry__sentry | tools/flake8_plugin.py | {
"start": 6285,
"end": 6639
} | class ____:
def __init__(self, tree: ast.AST, filename: str) -> None:
self.tree = tree
self.filename = filename
def run(self) -> Generator[tuple[int, int, str, type[Any]]]:
visitor = SentryVisitor(self.filename)
visitor.visit(self.tree)
for e in visitor.errors:
yield (*e, type(self))
| SentryCheck |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 2031,
"end": 2809
} | class ____(nn.Module):
"""
Compute batch normalization over the sequence length (time) dimension.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps)
def forward(self, inputs: torch.Tensor):
"""
Parameters:
inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`):
input for Batch norm calculation
Returns:
`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`
"""
output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length)
output = self.batchnorm(output)
return output.transpose(1, 2)
| PatchTSMixerBatchNorm |
python | Textualize__textual | src/textual/_types.py | {
"start": 620,
"end": 1935
} | class ____:
"""Helper type for a parameter that isn't specified in a method call."""
SegmentLines = List[List["Segment"]]
CallbackType = Union[Callable[[], Awaitable[None]], Callable[[], None]]
"""Type used for arbitrary callables used in callbacks."""
IgnoreReturnCallbackType = Union[Callable[[], Awaitable[Any]], Callable[[], Any]]
"""A callback which ignores the return type."""
WatchCallbackBothValuesType = Union[
Callable[[Any, Any], Awaitable[None]],
Callable[[Any, Any], None],
]
"""Type for watch methods that accept the old and new values of reactive objects."""
WatchCallbackNewValueType = Union[
Callable[[Any], Awaitable[None]],
Callable[[Any], None],
]
"""Type for watch methods that accept only the new value of reactive objects."""
WatchCallbackNoArgsType = Union[
Callable[[], Awaitable[None]],
Callable[[], None],
]
"""Type for watch methods that do not require the explicit value of the reactive."""
WatchCallbackType = Union[
WatchCallbackBothValuesType,
WatchCallbackNewValueType,
WatchCallbackNoArgsType,
]
"""Type used for callbacks passed to the `watch` method of widgets."""
AnimationLevel = Literal["none", "basic", "full"]
"""The levels that the [`TEXTUAL_ANIMATIONS`][textual.constants.TEXTUAL_ANIMATIONS] env var can be set to."""
| UnusedParameter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.