language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/jit/test_backends.py | {
"start": 3605,
"end": 5202
} | class ____(JitBackendTestCase):
"""
Tests for BasicModule.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModule.
self.module = BasicModule()
self.scripted_module = torch.jit.script(BasicModule())
self.lowered_module = to_test_backend_multi(
self.scripted_module,
{"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}},
)
def test_execution(self):
# Test execution with backend against Python and JIT.
input = torch.randn(5)
# Test all three module methods.
self.check_function("accum", (input, input))
self.check_function("sub_accum", (input, input))
self.check_function("forward", (input, input))
def test_save_load(self):
# Lowered module should produce the same outputs.
self.test_execution()
# Save the compile spec to compare against the version retrieved after loading.
pre_compile_spec = self.lowered_module.__getattr__(
"__loweredModule__"
).__getattr__("__method_compile_spec")
# Save and load the lowered module.
self.save_load()
# Get the compile spec after loading.
post_compile_spec = self.lowered_module.__getattr__(
"__loweredModule__"
).__getattr__("__method_compile_spec")
# Compile specs should match.
self.assertEqual(pre_compile_spec, post_compile_spec)
# Loaded module should produce the same outputs.
self.test_execution()
| BasicModuleTest |
python | ray-project__ray | doc/source/ray-more-libs/doc_code/dask_on_ray_callbacks.py | {
"start": 1920,
"end": 2800
} | class ____(RayDaskCallback):
def __init__(self, cache_actor_handle, put_threshold=10):
self.cache_actor = cache_actor_handle
self.put_threshold = put_threshold
def _ray_presubmit(self, task, key, deps):
try:
return ray.get(self.cache_actor.get.remote(str(key)))
except KeyError:
return None
def _ray_pretask(self, key, object_refs):
start_time = timer()
return start_time
def _ray_posttask(self, key, result, pre_state):
execution_time = timer() - pre_state
if execution_time > self.put_threshold:
self.cache_actor.put.remote(str(key), result)
cache_actor = SimpleCacheActor.remote()
cache_callback = SimpleCacheCallback(cache_actor, put_threshold=2)
with cache_callback:
z.compute(scheduler=ray_dask_get)
# __caching_actor_end__
# fmt: on
| SimpleCacheCallback |
python | neetcode-gh__leetcode | python/2235-add-two-integers.py | {
"start": 0,
"end": 91
} | class ____:
def sum(self, num1: int, num2: int) -> int:
return num1 + num2
| Solution |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 205171,
"end": 205732
} | class ____(object):
# https://argoproj.github.io/argo-workflows/fields/#inputs
def __init__(self):
tree = lambda: defaultdict(tree)
self.payload = tree()
def parameters(self, parameters):
if "parameters" not in self.payload:
self.payload["parameters"] = []
for parameter in parameters:
self.payload["parameters"].append(parameter.to_json())
return self
def to_json(self):
return self.payload
def __str__(self):
return json.dumps(self.payload, indent=4)
| Inputs |
python | apache__airflow | providers/apache/hive/src/airflow/providers/apache/hive/sensors/named_hive_partition.py | {
"start": 1047,
"end": 4196
} | class ____(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:param metastore_conn_id: Reference to the
:ref:`metastore thrift service connection id <howto/connection:hive_metastore>`.
"""
template_fields: Sequence[str] = ("partition_names",)
ui_color = "#8d99ae"
def __init__(
self,
*,
partition_names: list[str],
metastore_conn_id: str = "metastore_default",
poke_interval: int = 60 * 3,
hook: Any = None,
**kwargs: Any,
):
super().__init__(poke_interval=poke_interval, **kwargs)
self.next_index_to_poke = 0
if isinstance(partition_names, str):
raise TypeError("partition_names must be an array of strings")
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.hook = hook
if self.hook and metastore_conn_id != "metastore_default":
self.log.warning(
"A hook was passed but a non default metastore_conn_id=%s was used", metastore_conn_id
)
@staticmethod
def parse_partition_name(partition: str) -> tuple[Any, ...]:
"""Get schema, table, and partition info."""
first_split = partition.split(".", 1)
if len(first_split) == 1:
schema = "default"
table_partition = max(first_split) # poor man first
else:
schema, table_partition = first_split
second_split = table_partition.split("/", 1)
if len(second_split) == 1:
raise ValueError(f"Could not parse {partition}into table, partition")
table, partition = second_split
return schema, table, partition
def poke_partition(self, partition: str) -> Any:
"""Check for a named partition."""
if not self.hook:
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
self.hook = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
schema, table, partition = self.parse_partition_name(partition)
self.log.info("Poking for %s.%s/%s", schema, table, partition)
return self.hook.check_for_named_partition(schema, table, partition)
def poke(self, context: Context) -> bool:
number_of_partitions = len(self.partition_names)
poke_index_start = self.next_index_to_poke
for i in range(number_of_partitions):
self.next_index_to_poke = (poke_index_start + i) % number_of_partitions
if not self.poke_partition(self.partition_names[self.next_index_to_poke]):
return False
self.next_index_to_poke = 0
return True
| NamedHivePartitionSensor |
python | neetcode-gh__leetcode | python/0199-binary-tree-right-side-view.py | {
"start": 192,
"end": 694
} | class ____:
def rightSideView(self, root: TreeNode) -> List[int]:
res = []
q = collections.deque([root])
while q:
rightSide = None
qLen = len(q)
for i in range(qLen):
node = q.popleft()
if node:
rightSide = node
q.append(node.left)
q.append(node.right)
if rightSide:
res.append(rightSide.val)
return res
| Solution |
python | ray-project__ray | doc/source/serve/doc_code/http_guide/http_guide.py | {
"start": 3128,
"end": 3420
} | class ____:
def __init__(self, child):
self.child = child
serve.run(ParentDeployment.bind(ChildDeployment.bind()))
resp = requests.get("http://localhost:8000/")
assert resp.json() == {"message": "Hello from the child deployment!"}
# __end_fastapi_factory_pattern__
| ParentDeployment |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride3.py | {
"start": 1160,
"end": 1233
} | class ____(Generic[_T_F]):
def do_stuff(self) -> Iterable[_T_F]: ...
| F1 |
python | tensorflow__tensorflow | tensorflow/python/grappler/cluster_test.py | {
"start": 1159,
"end": 7610
} | class ____(test.TestCase):
def testBasic(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(
disable_detailed_stats=False, disable_timeline=False)
op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts(
grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 4)
self.assertTrue(step_stats.dev_stats)
def testNoDetailedStats(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(disable_detailed_stats=True)
op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts(
grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 0)
self.assertEqual(len(step_stats.dev_stats), 0)
def testMemoryEstimates(self):
with ops.Graph().as_default() as g:
with ops.device('/job:localhost/replica:0/task:0/device:CPU:0'):
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
grappler_cluster = cluster.Cluster(
disable_detailed_stats=True, disable_timeline=True)
peak_mem = grappler_cluster.DeterminePeakMemoryUsage(grappler_item)
self.assertLessEqual(1, len(peak_mem))
snapshot = peak_mem['/job:localhost/replica:0/task:0/device:CPU:0']
peak_usage = snapshot[0]
self.assertEqual(12, peak_usage)
live_tensors = snapshot[1]
self.assertEqual(5, len(live_tensors))
def testVirtualCluster(self):
with ops.Graph().as_default() as g:
with ops.device('/device:GPU:0'):
a = random_ops.random_uniform(shape=[1024, 1024])
b = random_ops.random_uniform(shape=[1024, 1024])
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
device_properties = device_properties_pb2.DeviceProperties(
type='GPU',
frequency=1000,
num_cores=60,
environment={'architecture': '7'})
named_device = device_properties_pb2.NamedDevice(
properties=device_properties, name='/device:GPU:0')
grappler_cluster = cluster.Cluster(
disable_detailed_stats=False,
disable_timeline=False,
devices=[named_device])
op_perfs, run_time, _ = grappler_cluster.MeasureCosts(grappler_item)
self.assertEqual(run_time, 0.000209)
self.assertEqual(len(op_perfs), 5)
estimated_perf = grappler_cluster.EstimatePerformance(named_device)
self.assertEqual(7680.0, estimated_perf)
def testContext(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=())
b = random_ops.random_uniform(shape=())
c = a + b
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(c)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
with cluster.Provision(
disable_detailed_stats=False, disable_timeline=False) as gcluster:
op_perfs, run_time, step_stats = gcluster.MeasureCosts(grappler_item)
self.assertTrue(run_time > 0)
self.assertEqual(len(op_perfs), 4)
self.assertTrue(step_stats.dev_stats)
def testAvailableOps(self):
with cluster.Provision() as gcluster:
op_names = gcluster.ListAvailableOps()
self.assertTrue('Add' in op_names)
self.assertTrue('MatMul' in op_names)
self.assertEqual(op_names, sorted(op_names))
def testSupportDevices(self):
with ops.Graph().as_default() as g:
a = random_ops.random_uniform(shape=(2, 3))
b = random_ops.random_uniform(shape=(2, 3))
c = a + b
dims = math_ops.range(0, array_ops.rank(c), 1)
d = math_ops.reduce_sum(a, axis=dims)
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=g)
grappler_item = item.Item(mg)
device_properties = device_properties_pb2.DeviceProperties(
type='GPU', frequency=1000, num_cores=60)
named_gpu = device_properties_pb2.NamedDevice(
properties=device_properties, name='/GPU:0')
device_properties = device_properties_pb2.DeviceProperties(
type='CPU', frequency=3000, num_cores=6)
named_cpu = device_properties_pb2.NamedDevice(
properties=device_properties, name='/CPU:0')
virtual_cluster = cluster.Cluster(devices=[named_cpu, named_gpu])
supported_dev = virtual_cluster.GetSupportedDevices(grappler_item)
self.assertEqual(supported_dev['add'], ['/CPU:0', '/GPU:0'])
self.assertEqual(supported_dev['Sum'], ['/CPU:0', '/GPU:0'])
self.assertEqual(supported_dev['range'], ['/CPU:0', '/GPU:0'])
real_cluster = cluster.Cluster()
supported_dev = real_cluster.GetSupportedDevices(grappler_item)
if test.is_gpu_available():
self.assertEqual(supported_dev['add'], [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:GPU:0'
])
self.assertEqual(supported_dev['Sum'], [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:GPU:0'
])
# The axis tensor must reside on the host
self.assertEqual(supported_dev['range'],
['/job:localhost/replica:0/task:0/device:CPU:0'])
else:
self.assertEqual(supported_dev['add'],
['/job:localhost/replica:0/task:0/device:CPU:0'])
if __name__ == '__main__':
test.main()
| ClusterTest |
python | getsentry__sentry | tests/sentry/incidents/endpoints/serializers/test_incident.py | {
"start": 434,
"end": 1381
} | class ____(TestCase):
@freeze_time()
def test_simple(self) -> None:
incident = self.create_incident(date_started=timezone.now() - timedelta(minutes=5))
result = serialize(incident)
assert result["id"] == str(incident.id)
assert result["identifier"] == str(incident.identifier)
assert result["organizationId"] == str(incident.organization_id)
assert result["projects"] == [p.slug for p in incident.projects.all()]
assert result["status"] == incident.status
assert result["statusMethod"] == incident.status_method
assert result["type"] == incident.type
assert result["title"] == incident.title
assert result["dateStarted"] == incident.date_started
assert result["dateDetected"] == incident.date_detected
assert result["dateCreated"] == incident.date_added
assert result["dateClosed"] == incident.date_closed
| IncidentSerializerTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_lambda_function.py | {
"start": 1702,
"end": 6222
} | class ____:
def test_init(self):
op = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
aws_conn_id="aws_conn_test",
region_name="foo-bar-1",
verify="/spam/egg.pem",
botocore_config={"baz": "qux"},
)
assert op.function_name == FUNCTION_NAME
assert op.role == ROLE_ARN
assert op.code == {"ImageUri": IMAGE_URI}
assert op.aws_conn_id == "aws_conn_test"
assert op.region_name == "foo-bar-1"
assert op.verify == "/spam/egg.pem"
assert op.botocore_config == {"baz": "qux"}
@mock.patch.object(LambdaHook, "create_lambda")
@mock.patch.object(LambdaHook, "conn")
def test_create_lambda_without_wait_for_completion(self, mock_hook_conn, mock_hook_create_lambda):
operator = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
)
operator.execute(None)
mock_hook_create_lambda.assert_called_once()
mock_hook_conn.get_waiter.assert_not_called()
@mock.patch.object(LambdaHook, "create_lambda")
@mock.patch.object(LambdaHook, "conn")
@pytest.mark.parametrize(
"op_kwargs",
[
pytest.param({}, id="no-additional-parameters"),
pytest.param(
{"region_name": "eu-west-1", "verify": True, "botocore_config": {}},
id="additional-parameters",
),
],
)
def test_create_lambda_with_wait_for_completion(self, mock_hook_conn, mock_hook_create_lambda, op_kwargs):
operator = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
wait_for_completion=True,
aws_conn_id="aws_conn_test",
**op_kwargs,
)
operator.execute(None)
mock_hook_create_lambda.assert_called_once()
mock_hook_conn.get_waiter.assert_called_once_with("function_active_v2")
@mock.patch.object(LambdaHook, "create_lambda")
def test_create_lambda_deferrable(self, _):
operator = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
deferrable=True,
)
with pytest.raises(TaskDeferred):
operator.execute(None)
@mock.patch.object(LambdaHook, "create_lambda")
@mock.patch.object(LambdaHook, "conn")
@pytest.mark.parametrize(
"config",
[
pytest.param(
{
"architectures": ["arm64"],
"logging_config": {"LogFormat": "Text", "LogGroup": "/custom/log-group/"},
"snap_start": {"ApplyOn": "PublishedVersions"},
"ephemeral_storage": {"Size": 1024},
},
id="with-config-argument",
),
],
)
def test_create_lambda_using_config_argument(self, mock_hook_conn, mock_hook_create_lambda, config):
operator = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
config=config,
)
operator.execute(None)
mock_hook_create_lambda.assert_called_once()
mock_hook_conn.get_waiter.assert_not_called()
assert operator.config.get("logging_config") == config.get("logging_config")
assert operator.config.get("architectures") == config.get("architectures")
assert operator.config.get("snap_start") == config.get("snap_start")
assert operator.config.get("ephemeral_storage") == config.get("ephemeral_storage")
def test_template_fields(self):
operator = LambdaCreateFunctionOperator(
task_id="task_test",
function_name=FUNCTION_NAME,
role=ROLE_ARN,
code={
"ImageUri": IMAGE_URI,
},
)
validate_template_fields(operator)
| TestLambdaCreateFunctionOperator |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_set_output.py | {
"start": 14024,
"end": 16131
} | class ____(_SetOutputMixin):
def fit(self, X, y=None):
assert isinstance(X, list)
self.n_features_in_ = len(X[0])
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return np.asarray([f"X{i}" for i in range(self.n_features_in_)], dtype=object)
@pytest.mark.parametrize("dataframe_lib", ["pandas", "polars"])
def test_set_output_list_input(dataframe_lib):
"""Check set_output for list input.
Non-regression test for #27037.
"""
lib = pytest.importorskip(dataframe_lib)
X = [[0, 1, 2, 3], [4, 5, 6, 7]]
est = EstimatorWithListInput()
est.set_output(transform=dataframe_lib)
X_out = est.fit(X).transform(X)
assert isinstance(X_out, lib.DataFrame)
assert_array_equal(X_out.columns, ["X0", "X1", "X2", "X3"])
@pytest.mark.parametrize("name", sorted(ADAPTERS_MANAGER.adapters))
def test_adapter_class_has_interface(name):
"""Check adapters have the correct interface."""
assert isinstance(ADAPTERS_MANAGER.adapters[name], ContainerAdapterProtocol)
def test_check_library_installed(monkeypatch):
"""Check import error changed."""
orig_import_module = importlib.import_module
def patched_import_module(name):
if name == "pandas":
raise ImportError()
orig_import_module(name, package=None)
monkeypatch.setattr(importlib, "import_module", patched_import_module)
msg = "Setting output container to 'pandas' requires"
with pytest.raises(ImportError, match=msg):
check_library_installed("pandas")
def test_get_adapter_from_container():
"""Check the behavior fo `_get_adapter_from_container`."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [1, 2, 3], "b": [10, 20, 100]})
adapter = _get_adapter_from_container(X)
assert adapter.container_lib == "pandas"
err_msg = "The container does not have a registered adapter in scikit-learn."
with pytest.raises(ValueError, match=err_msg):
_get_adapter_from_container(X.to_numpy())
| EstimatorWithListInput |
python | python-pillow__Pillow | src/PIL/XpmImagePlugin.py | {
"start": 3170,
"end": 4400
} | class ____(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
data = bytearray()
bpp, palette = self.args
dest_length = self.state.xsize * self.state.ysize
if self.mode == "RGB":
dest_length *= 3
pixel_header = False
while len(data) < dest_length:
line = self.fd.readline()
if not line:
break
if line.rstrip() == b"/* pixels */" and not pixel_header:
pixel_header = True
continue
line = b'"'.join(line.split(b'"')[1:-1])
for i in range(0, len(line), bpp):
key = line[i : i + bpp]
if self.mode == "RGB":
data += palette[key]
else:
data += o8(palette.index(key))
self.set_as_raw(bytes(data))
return -1, 0
#
# Registry
Image.register_open(XpmImageFile.format, XpmImageFile, _accept)
Image.register_decoder("xpm", XpmDecoder)
Image.register_extension(XpmImageFile.format, ".xpm")
Image.register_mime(XpmImageFile.format, "image/xpm")
| XpmDecoder |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_templatetags.py | {
"start": 13125,
"end": 14187
} | class ____(ThumbnailerBase):
def assertCorrectOutput(self, src, alias_name, **overrides):
options = settings.THUMBNAIL_ALIASES[''][alias_name]
options.update(overrides)
output = self.render_template(src)
expected = self.verify_thumbnail(options['size'], options)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_invalid_alias_name(self):
self.assertEqual(
self.render_template('{% thumbnail filename "notanalias" %}'),
''
)
def test_correct_alias(self):
self.assertCorrectOutput('{% thumbnail filename "small" %}', 'small')
def test_alias_overrides(self):
self.assertCorrectOutput(
'{% thumbnail filename "small" upscale %}',
'small',
upscale=True,
)
self.assertCorrectOutput(
'{% thumbnail filename "small" upscale bw %}',
'small',
bw=True,
upscale=True,
)
| ThumbnailTagAliasTest |
python | django-guardian__django-guardian | guardian/testapp/tests/test_orphans.py | {
"start": 670,
"end": 14046
} | class ____(TestCase):
def setUp(self):
# Create objects for which we would assign obj perms
self.target_user1 = User.objects.create(username="user1")
self.target_group1 = Group.objects.create(name="group1")
self.target_obj1 = ContentType.objects.create(model="foo", app_label="fake-for-guardian-tests")
self.target_obj2 = ContentType.objects.create(model="bar", app_label="fake-for-guardian-tests")
# Required if MySQL backend is used :/
create_permissions(auth_app, 1)
self.user = User.objects.create(username="user")
self.group = Group.objects.create(name="group")
def _create_orphan_permissions(self, count=10):
"""Helper method to create a specific number of orphan permissions"""
target_objs = []
for i in range(count):
target_obj = ContentType.objects.create(
model=f"test_model_{self.id()}_{i}", app_label="fake-for-guardian-tests"
)
target_objs.append(target_obj)
# Assign permissions to both user and group
assign_perm("change_contenttype", self.user, target_obj)
assign_perm("delete_contenttype", self.group, target_obj)
# Delete the target objects to create orphans
for target_obj in target_objs:
target_obj.delete()
return count * 2 # Each object had 2 permissions (user + group)
def _get_current_orphan_count(self):
"""Get current number of orphan permissions"""
from guardian.models import GroupObjectPermission, UserObjectPermission
user_orphans = sum(1 for obj in UserObjectPermission.objects.all() if obj.content_object is None)
group_orphans = sum(1 for obj in GroupObjectPermission.objects.all() if obj.content_object is None)
return user_orphans + group_orphans
def test_clean_perms(self):
# assign obj perms
target_perms = {
self.target_user1: ["change_%s" % user_module_name],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
obj_perms_count = sum(len(val) for key, val in target_perms.items())
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign_perm(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
removed = clean_orphan_obj_perms()
self.assertEqual(removed, obj_perms_count)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
def test_clean_perms_with_batch_size(self):
"""Test cleaning orphan permissions with batch_size parameter"""
# Test that batch_size parameter works without exact count verification
self._create_orphan_permissions(5)
# Clean with batch size - just verify it runs and removes something
removed = clean_orphan_obj_perms(batch_size=3)
# The main goal is to test that batch_size parameter is accepted and works
self.assertGreaterEqual(removed, 0) # Should remove at least 0 orphans
# Clean any remaining to ensure clean state for next tests
clean_orphan_obj_perms()
def test_clean_perms_with_max_batches(self):
"""Test cleaning orphan permissions with max_batches parameter"""
# Get current orphan count and create new ones
self._create_orphan_permissions(10)
total_orphans = self._get_current_orphan_count()
# Clean with batch size and max batches - should process limited batches
removed = clean_orphan_obj_perms(batch_size=3, max_batches=2)
self.assertLessEqual(removed, total_orphans)
self.assertGreater(removed, 0)
# Clean the rest
remaining = clean_orphan_obj_perms()
self.assertEqual(removed + remaining, total_orphans)
def test_clean_perms_with_skip_batches(self):
"""Test cleaning orphan permissions with skip_batches parameter"""
# Get current orphan count and create new ones
self._create_orphan_permissions(10)
total_orphans = self._get_current_orphan_count()
# Skip some batches and clean the rest
removed = clean_orphan_obj_perms(batch_size=3, skip_batches=2)
self.assertLessEqual(removed, total_orphans)
# Clean the skipped ones
remaining = clean_orphan_obj_perms()
self.assertEqual(removed + remaining, total_orphans)
def test_clean_perms_with_max_duration_secs(self):
"""Test cleaning orphan permissions with max_duration_secs parameter"""
self._create_orphan_permissions(5)
# Test with very short duration (should stop early)
with patch("guardian.utils.time.monotonic") as mock_time:
# Mock time to simulate duration limit being reached
mock_time.side_effect = [0, 0.5, 2.0] + [3.0] * 20
removed = clean_orphan_obj_perms(batch_size=2, max_duration_secs=1)
# Should stop early due to time limit
self.assertGreaterEqual(removed, 0)
def test_clean_perms_combined_parameters(self):
"""Test cleaning orphan permissions with multiple parameters combined"""
expected_orphans = self._create_orphan_permissions(15)
# Use batch_size, skip_batches, max_batches
removed = clean_orphan_obj_perms(batch_size=3, skip_batches=1, max_batches=3)
self.assertGreaterEqual(removed, 0)
self.assertLessEqual(removed, expected_orphans)
# Clean remaining
remaining = clean_orphan_obj_perms()
self.assertEqual(removed + remaining, expected_orphans)
def test_clean_perms_no_orphans(self):
"""Test cleaning when there are no orphan permissions"""
# Don't create any orphans
removed = clean_orphan_obj_perms(batch_size=5)
self.assertEqual(removed, 0)
def test_clean_perms_edge_cases(self):
"""Test edge cases for parameter combinations"""
# Test that parameters are accepted and work without exact count verification
self._create_orphan_permissions(5)
# Test with batch_size larger than total records
removed = clean_orphan_obj_perms(batch_size=100)
self.assertGreaterEqual(removed, 0) # Should work without errors
# Clean any remaining
clean_orphan_obj_perms()
# Create more orphans for next test
self._create_orphan_permissions(3)
# Test with skip_batches larger than available batches
removed = clean_orphan_obj_perms(batch_size=2, skip_batches=10)
# With skip_batches > available batches, should remove 0
self.assertEqual(removed, 0)
# Clean the remaining orphans to verify function still works
remaining = clean_orphan_obj_perms()
self.assertGreaterEqual(remaining, 0) # Should clean remaining orphans
def test_clean_perms_return_value_consistency(self):
"""Test that return value is consistent across different parameter combinations"""
expected_orphans = self._create_orphan_permissions(8)
# Clean in batches and verify total
total_removed = 0
while True:
removed = clean_orphan_obj_perms(batch_size=3, max_batches=1)
if removed == 0:
break
total_removed += removed
self.assertEqual(total_removed, expected_orphans)
def test_clean_perms_command(self):
"""
Same test as the one above but rather function directly, we call
management command instead.
"""
# assign obj perms
target_perms = {
self.target_user1: ["change_%s" % user_module_name],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign_perm(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
call_command("clean_orphan_obj_perms", verbosity=0)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
def test_clean_perms_command_with_batch_size(self):
"""Test management command with batch-size parameter"""
# Create orphans
self._create_orphan_permissions(5)
# Don't rely on _get_current_orphan_count, just run the command and verify it works
out = StringIO()
call_command("clean_orphan_obj_perms", batch_size=3, verbosity=1, stdout=out)
output = out.getvalue()
# Just verify that some permissions were removed and the format is correct
self.assertIn("Removed", output)
self.assertIn("object permission entries with no targets", output)
# Extract the actual number and verify it's reasonable
import re
match = re.search(r"Removed (\d+) object permission entries", output)
if match:
removed = int(match.group(1))
self.assertGreater(removed, 0) # Should have removed something
self.assertLessEqual(removed, 20) # Reasonable upper bound
def test_clean_perms_command_with_max_batches(self):
"""Test management command with max-batches parameter"""
expected_orphans = self._create_orphan_permissions(8)
out = StringIO()
call_command("clean_orphan_obj_perms", batch_size=3, max_batches=2, verbosity=1, stdout=out)
output = out.getvalue()
# Should have removed some permissions
self.assertIn("Removed", output)
self.assertIn("object permission entries", output)
# Extract the number from output and verify it's reasonable
import re
match = re.search(r"Removed (\d+) object permission entries", output)
if match:
removed = int(match.group(1))
self.assertGreater(removed, 0)
self.assertLessEqual(removed, expected_orphans)
def test_clean_perms_command_with_skip_batches(self):
"""Test management command with skip-batches parameter"""
self._create_orphan_permissions(8)
out = StringIO()
call_command("clean_orphan_obj_perms", batch_size=3, skip_batches=1, verbosity=1, stdout=out)
output = out.getvalue()
self.assertIn("Removed", output)
self.assertIn("object permission entries", output)
def test_clean_perms_command_with_max_duration_secs(self):
"""Test management command with max-duration-secs parameter"""
self._create_orphan_permissions(5)
with patch("guardian.utils.time.monotonic") as mock_time:
mock_time.side_effect = [0, 0.5, 2.0] + [3.0] * 20
out = StringIO()
call_command("clean_orphan_obj_perms", batch_size=2, max_duration_secs=1, verbosity=1, stdout=out)
output = out.getvalue()
# Should have some output indicating removal
self.assertIn("Removed", output)
def test_clean_perms_command_combined_parameters(self):
"""Test management command with multiple parameters"""
self._create_orphan_permissions(10)
out = StringIO()
call_command("clean_orphan_obj_perms", batch_size=3, skip_batches=1, max_batches=2, verbosity=1, stdout=out)
output = out.getvalue()
self.assertIn("Removed", output)
self.assertIn("object permission entries", output)
def test_clean_perms_command_verbosity_levels(self):
"""Test management command with different verbosity levels"""
self._create_orphan_permissions(3)
# Test verbosity=0 (should produce no output)
out = StringIO()
call_command("clean_orphan_obj_perms", verbosity=0, stdout=out)
self.assertEqual(out.getvalue().strip(), "")
# Create more orphans and test verbosity=1
self._create_orphan_permissions(2)
out = StringIO()
call_command("clean_orphan_obj_perms", verbosity=1, stdout=out)
output = out.getvalue()
self.assertIn("Removed", output)
self.assertIn("object permission entries", output)
def test_clean_perms_command_no_orphans(self):
"""Test management command when there are no orphan permissions"""
# First clean any existing orphans
clean_orphan_obj_perms()
out = StringIO()
call_command("clean_orphan_obj_perms", verbosity=1, stdout=out)
output = out.getvalue()
self.assertIn("Removed 0 object permission entries with no targets", output)
| OrphanedObjectPermissionsTest |
python | altair-viz__altair | tools/vega_expr.py | {
"start": 12296,
"end": 25876
} | class ____:
"""
``SchemaInfo``-like, but operates on `expressions.md`_.
.. _expressions.md:
https://raw.githubusercontent.com/vega/vega/main/docs/docs/expressions.md
"""
remap_title: ClassVar[ReplaceMany] = ReplaceMany(
fmt_match=r"(?P<key>{0})\(", fmt_replace="{0}("
)
def __init__(self, name: str, children: Sequence[Token], /) -> None:
self.name: str = name
self._children: Sequence[Token] = children
self.parameters: list[VegaExprParam] = []
self.doc: str = ""
self.signature: str = ""
self._special: set[Special] = set()
def get_links(self, rst_renderer: RSTRenderer) -> dict[str, str]:
"""Retrieve dict of link text to link url."""
from mistune import BlockState
links = {}
state = BlockState()
for t in self._children:
if t.get("type") == "link" and (url := t.get("attrs", {}).get("url")):
text = rst_renderer.render_children(t, state)
text = text.replace("`", "")
links[text] = expand_urls(url)
return links
def with_doc(self) -> Self:
"""
Parses docstring content in full.
Accessible via ``self.doc``
"""
s: str = parser.render_tokens(self._doc_tokens())
s = italics_to_backticks(s, self.parameter_names(variadic=False))
s = type(self).remap_title(s)
self.doc = format_doc(s)
return self
def with_parameters(self) -> Self:
"""
Parses signature content into an intermediate representation.
Accessible via ``self.parameters``.
"""
split: Iterator[str] = self._split_signature_tokens(exclude_name=True)
self.parameters = list(VegaExprParam.from_texts(split))
if not self.parameters:
self._special.add(Special.NO_PARAMETERS)
return self
def with_signature(self) -> Self:
"""
Parses ``self.parameters`` into a full signature definition line.
Accessible via ``self.signature``
"""
param_list = (
VegaExprParam.star_args()
if self.is_overloaded()
else ", ".join(p.render() for p in self.parameters)
)
self.signature = METHOD_SIGNATURE.format(
title=self.title,
sep="" if self.is_no_parameters() else ",",
param_list=param_list,
marker="" if (self.is_variadic() or self.is_no_parameters()) else ", /",
return_ann=RETURN_ANNOTATION,
type_ignore=(
f" {IGNORE_OVERRIDE}" if self.is_incompatible_override() else ""
),
)
return self
def parameter_names(self, *, variadic: bool = True) -> Iterator[str]:
"""Pass ``variadic=False`` to omit names like``*args``."""
if self.parameters:
it: Iterator[str] = (
(p.name for p in self.parameters)
if variadic
else (p.name for p in self.parameters if not p.variadic)
)
yield from it
elif self.is_no_parameters():
yield from ()
else:
msg = (
f"Cannot provide `parameter_names` until they have been initialized via:\n"
f"{type(self).__name__}.with_parameters()\n\n"
f"{self!r}"
)
raise TypeError(msg)
def render(self) -> str:
"""Return fully parsed method definition."""
if self.is_overloaded():
body_params = STAR_ARGS[1:]
else:
body_params = (
f"({self.parameters[0].name},)"
if len(self.parameters) == 1
else f"({','.join(self.parameter_names())})"
)
return METHOD_TEMPLATE.format(
decorator=DECORATOR,
signature=self.signature,
doc=self.doc,
return_wrapper=RETURN_WRAPPER,
name=f"{self.name!r}",
body_params=body_params,
)
@property
def title(self) -> str:
"""
Use for the method definition, but not when calling internally.
Updates ``remap_title`` class variable for documentation example substitutions.
"""
title = f"{self.name}_" if self.is_keyword() else self.name
type(self).remap_title.update({self.name: f"alt.expr.{title}"})
return title
def _signature_tokens(self) -> Iterator[Token]:
"""
Target for signature appears between 2 softbreak tokens.
- Proceeds to the first token **after** a softbreak
- Yield **only** text tokens
- Skips all inline html tags
- Stops at 2nd softbreak
"""
it: Iterator[Token] = iter(self)
current = next(it)
while current[TYPE] != SOFTBREAK:
current = next(it)
next(it)
for target in it:
if target[TYPE] == TEXT:
yield target
elif target[TYPE] == SOFTBREAK:
break
else:
continue
def _split_signature_tokens(self, *, exclude_name: bool = False) -> Iterator[str]:
"""
Normalize the text content of the signature.
Examples
--------
The following definition:
<a name="sequence" href="#sequence">#</a>
<b>sequence</b>([<i>start</i>, ]<i>stop</i>[, <i>step</i>])<br/>
Returns an array containing an arithmetic sequence of numbers.
...
Will yield:
['sequence', '(', '[', 'start', ']', 'stop', '[', 'step', ']', ')']
When called with ``exclude_name=True``:
['(', '[', 'start', ']', 'stop', '[', 'step', ']', ')']
"""
EXCLUDE_INNER: set[str] = {self.name} if exclude_name else set()
EXCLUDE: set[str] = {", "} | EXCLUDE_INNER
for token in self._signature_tokens():
raw: str = token[RAW]
if raw == OPEN_PAREN:
yield raw
elif raw.startswith(OPEN_PAREN):
yield raw[0]
for s in raw[1:].split(","):
if (clean := s.strip(" -")) not in EXCLUDE_INNER:
yield from VegaExprDef._split_markers(clean)
elif (clean := raw.strip(", -")) not in EXCLUDE:
yield from VegaExprDef._split_markers(clean)
@staticmethod
def _split_markers(s: str, /) -> Iterator[str]:
"""
When ``s`` ends with one of these markers:
")", "]", "...", " |"
- Split ``s`` into rest, match
- using the length of the match to index
- Append match to ``end``
- Recurse
""" # noqa: D400
if s.isalnum():
yield s
return
end: list[str] = []
original = s # Save original string to detect changes
if s.endswith(CLOSE_PAREN):
end.append(CLOSE_PAREN)
s = s[:-1]
elif s.endswith(CLOSE_BRACKET):
end.append(CLOSE_BRACKET)
s = s[:-1]
elif s.endswith(ELLIPSIS):
end.append(ELLIPSIS)
s = s[:-3]
elif s.endswith(INLINE_OVERLOAD):
end.append(INLINE_OVERLOAD)
s = s[:-2]
if s == original:
# Nothing was removed; avoid infinite recursion
yield s
elif len(s) == 1:
yield s
elif len(s) > 1:
yield from VegaExprDef._split_markers(s)
yield from end
def _doc_tokens(self) -> Sequence[Token]:
"""Return the slice of `self.children` that contains docstring content."""
for idx, item in enumerate(self):
if item[TYPE] == SOFTBREAK and self[idx + 1][TYPE] == TEXT:
return self[idx + 1 :]
else:
continue
msg = (
f"Expected to find a text node marking the start of docstring content.\n"
f"Failed for:\n\n{self!r}"
)
raise NotImplementedError(msg)
def is_callable(self) -> bool:
"""
Rough filter for excluding `constants`_.
- Most of the parsing is to handle varying signatures.
- Constants can just be referenced by name, so can skip those
Notes
-----
- Overwriting the <a name> with the rendered text
- required for `clamprange` -> `clampRange`
.. _constants:
https://vega.github.io/vega/docs/expressions/#constants
"""
if self.is_overloaded_string_array() or self.is_bound_variable_name():
return False
it: Iterator[Token] = iter(self)
current: str = next(it, {}).get(RAW, "")
name: str = self.name.casefold()
while current.casefold() != name:
if (el := next(it, None)) is not None:
current = el.get(RAW, "")
else:
return False
if current != self.name:
self.name = current
next(it)
return next(it).get(RAW, "").startswith(OPEN_PAREN)
def is_bound_variable_name(self) -> bool:
"""
``Vega`` `bound variables`_.
These do not provide signatures:
{"datum", "event", "signal"}
.. _bound variables:
https://vega.github.io/vega/docs/expressions/#bound-variables
"""
RESERVED_NAMES: set[str] = {"datum", "event", "signal"}
return self.name in RESERVED_NAMES
def is_overloaded(self) -> bool:
"""
Covers the `color functions`_.
These look like:
lab(l, a, b[, opacity]) | lab(specifier)
Looping of parameters is for signatures like `sequence`_:
sequence([start, ]stop[, step])
The optional first parameter, followed by a required one would need an
``@overload`` in ``python``.
.. _color functions:
https://vega.github.io/vega/docs/expressions/#color-functions
.. _sequence:
https://vega.github.io/vega/docs/expressions/#sequence
"""
for idx, item in enumerate(self):
if item[TYPE] == TEXT and item.get(RAW, "").endswith(INLINE_OVERLOAD):
return self[idx + 1][TYPE] == SOFTBREAK
else:
continue
for idx, p in enumerate(self.parameters):
if not p.required:
others = self.parameters[idx + 1 :]
if not others:
return False
else:
return any(sp.required for sp in others)
return False
def is_overloaded_string_array(self) -> bool:
"""
HACK: There are string/array functions that overlap.
- the `.md` handles this by prefixing the `<a name=...` for the string version
- This is very different to the handled overload kinds
- Both definitions have full documentation and appear under different sections
- Unlike color functions, sequence
- These are inline
"""
return self.name.startswith("string_")
def is_keyword(self) -> bool:
return keyword.iskeyword(self.name)
def is_incompatible_override(self) -> bool:
"""
``self.title`` shadows an unrelated ``SchemaBase`` method.
Requires an ignore comment for a type checker.
"""
return self.title in _SCHEMA_BASE_MEMBERS
def is_variadic(self) -> bool:
"""Position-only parameter separator `"/"` not allowed after `"*"` parameter."""
return self.is_overloaded() or any(p.variadic for p in self.parameters)
def is_no_parameters(self) -> bool:
"""
Signature has been parsed for parameters, but none were present.
For example the definition for `now`_ would **only** return ``True``
after calling ``self.with_parameters()``.
.. _now:
https://vega.github.io/vega/docs/expressions/#now
"""
return bool(self._special) and Special.NO_PARAMETERS in self._special
def __iter__(self) -> Iterator[Token]:
yield from self._children
@overload
def __getitem__(self, index: int) -> Token: ...
@overload
def __getitem__(self, index: slice) -> Sequence[Token]: ...
def __getitem__(self, index: int | slice) -> Token | Sequence[Token]:
return self._children.__getitem__(index)
def __repr__(self) -> str:
return (
f"{type(self).__name__}(\n "
f"name={self.name!r},\n "
f"parameters={self.parameters!r},\n "
f"doc={self.doc!r}\n"
")"
)
@classmethod
def from_tokens(cls, tokens: Iterable[Token], /) -> Iterator[Self]:
"""
Lazy, filtered partial parser.
Applies a series of filters before rendering everything but the docs.
Parameters
----------
tokens
`ast tokens`_ produced by ``mistune``
.. _ast tokens:
https://mistune.lepture.com/en/latest/guide.html#abstract-syntax-tree
"""
for tok in tokens:
if (
(children := tok.get(CHILDREN)) is not None
and (child := next(iter(children)).get(RAW)) is not None
and (match := FUNCTION_DEF_LINE.match(child))
and (node := cls(match["name"], children)).is_callable()
):
yield node.with_parameters().with_signature()
@dataclasses.dataclass
| VegaExprDef |
python | doocs__leetcode | solution/0900-0999/0966.Vowel Spellchecker/Solution.py | {
"start": 0,
"end": 802
} | class ____:
def spellchecker(self, wordlist: List[str], queries: List[str]) -> List[str]:
def f(w):
t = []
for c in w:
t.append("*" if c in "aeiou" else c)
return "".join(t)
s = set(wordlist)
low, pat = {}, {}
for w in wordlist:
t = w.lower()
low.setdefault(t, w)
pat.setdefault(f(t), w)
ans = []
for q in queries:
if q in s:
ans.append(q)
continue
q = q.lower()
if q in low:
ans.append(low[q])
continue
q = f(q)
if q in pat:
ans.append(pat[q])
continue
ans.append("")
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0072_remove_md5_field.py | {
"start": 120,
"end": 399
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0071_add_env_var_privacy"),
]
operations = [
migrations.RemoveField(
model_name="importedfile",
name="md5",
),
]
| Migration |
python | numba__numba | numba/core/pylowering.py | {
"start": 2273,
"end": 24891
} | class ____(BaseLower):
GeneratorLower = generators.PyGeneratorLower
def init(self):
# Strings to be frozen into the Environment object
self._frozen_strings = set()
self._live_vars = set()
def pre_lower(self):
super(PyLower, self).pre_lower()
self.init_pyapi()
def post_lower(self):
pass
def pre_block(self, block):
self.init_vars(block)
def lower_inst(self, inst):
if isinstance(inst, ir.Assign):
value = self.lower_assign(inst)
self.storevar(value, inst.target.name)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setitem(target, index, value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
ok = self.pyapi.object_delitem(target, index)
self.check_int_status(ok)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setattr(target,
self._freeze_string(inst.attr),
value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelAttr):
target = self.loadvar(inst.target.name)
ok = self.pyapi.object_delattr(target,
self._freeze_string(inst.attr))
self.check_int_status(ok)
elif isinstance(inst, ir.StoreMap):
dct = self.loadvar(inst.dct.name)
key = self.loadvar(inst.key.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.dict_setitem(dct, key, value)
self.check_int_status(ok)
elif isinstance(inst, ir.Return):
retval = self.loadvar(inst.value.name)
if self.generator_info:
# StopIteration
# We own a reference to the "return value", but we
# don't return it.
self.pyapi.decref(retval)
self.genlower.return_from_generator(self)
return
# No need to incref() as the reference is already owned.
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
if cond.type == llvmlite.ir.IntType(1):
istrue = cond
else:
istrue = self.pyapi.object_istrue(cond)
zero = llvmlite.ir.Constant(istrue.type, None)
pred = self.builder.icmp_unsigned('!=', istrue, zero)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Del):
self.delvar(inst.value)
elif isinstance(inst, ir.PopBlock):
pass # this is just a marker
elif isinstance(inst, ir.Raise):
if inst.exception is not None:
exc = self.loadvar(inst.exception.name)
# A reference will be stolen by raise_object() and another
# by return_exception_raised().
self.incref(exc)
else:
exc = None
self.pyapi.raise_object(exc)
self.return_exception_raised()
else:
msg = f"{type(inst)}, {inst}"
raise NumbaNotImplementedError(msg)
@cached_property
def _omitted_typobj(self):
"""Return a `OmittedArg` type instance as a LLVM value suitable for
testing at runtime.
"""
from numba.core.dispatcher import OmittedArg
return self.pyapi.unserialize(
self.pyapi.serialize_object(OmittedArg))
def lower_assign(self, inst):
"""
The returned object must have a new reference
"""
value = inst.value
if isinstance(value, (ir.Const, ir.FreeVar)):
return self.lower_const(value.value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
self.incref(val)
return val
elif isinstance(value, ir.Expr):
return self.lower_expr(value)
elif isinstance(value, ir.Global):
return self.lower_global(value.name, value.value)
elif isinstance(value, ir.Yield):
return self.lower_yield(value)
elif isinstance(value, ir.Arg):
param = self.func_ir.func_id.pysig.parameters.get(value.name)
obj = self.fnargs[value.index]
slot = cgutils.alloca_once_value(self.builder, obj)
# Don't check for OmittedArg unless the argument has a default
if param is not None and param.default is inspect.Parameter.empty:
self.incref(obj)
self.builder.store(obj, slot)
else:
# When an argument is omitted, the dispatcher hands it as
# _OmittedArg(<default value>)
typobj = self.pyapi.get_type(obj)
is_omitted = self.builder.icmp_unsigned('==', typobj,
self._omitted_typobj)
with self.builder.if_else(is_omitted, likely=False) as (omitted, present):
with present:
self.incref(obj)
self.builder.store(obj, slot)
with omitted:
# The argument is omitted => get the default value
obj = self.pyapi.object_getattr_string(obj, 'value')
self.builder.store(obj, slot)
return self.builder.load(slot)
else:
raise NotImplementedError(type(value), value)
def lower_yield(self, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
self.genlower.init_generator_state(self)
# Save live vars in state
# We also need to save live vars that are del'ed afterwards.
y = generators.LowerYield(self, yp, yp.live_vars | yp.weak_live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
# Let caller own the reference
self.pyapi.incref(val)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.pyapi.make_none()
def lower_binop(self, expr, op, inplace=False):
lhs = self.loadvar(expr.lhs.name)
rhs = self.loadvar(expr.rhs.name)
assert not isinstance(op, str)
if op in PYTHON_BINOPMAP:
fname, inplace = PYTHON_BINOPMAP[op]
fn = getattr(self.pyapi, fname)
res = fn(lhs, rhs, inplace=inplace)
else:
# Assumed to be rich comparison
fn = PYTHON_COMPAREOPMAP.get(expr.fn, expr.fn)
if fn == 'in': # 'in' and operator.contains have args reversed
lhs, rhs = rhs, lhs
res = self.pyapi.object_richcompare(lhs, rhs, fn)
self.check_error(res)
return res
def lower_expr(self, expr):
if expr.op == 'binop':
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == 'inplace_binop':
return self.lower_binop(expr, expr.fn, inplace=True)
elif expr.op == 'unary':
value = self.loadvar(expr.value.name)
if expr.fn == operator.neg:
res = self.pyapi.number_negative(value)
elif expr.fn == operator.pos:
res = self.pyapi.number_positive(value)
elif expr.fn == operator.not_:
res = self.pyapi.object_not(value)
self.check_int_status(res)
res = self.pyapi.bool_from_bool(res)
elif expr.fn == operator.invert:
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
args = self.pyapi.tuple_pack(argvals)
if expr.vararg:
# Expand *args
varargs = self.pyapi.sequence_tuple(
self.loadvar(expr.vararg.name))
new_args = self.pyapi.sequence_concat(args, varargs)
self.decref(varargs)
self.decref(args)
args = new_args
if not expr.kws:
# No named arguments
ret = self.pyapi.call(fn, args, None)
else:
# Named arguments
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == 'getattr':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == 'build_tuple':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_list':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_map':
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == 'build_set':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == 'getiter':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == 'iternext':
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == 'pair_first':
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == 'pair_second':
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == 'exhaust_iter':
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp_unsigned('!=',
tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == 'getitem':
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == 'static_getitem':
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == 'getslice':
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == 'cast':
val = self.loadvar(expr.value.name)
self.incref(val)
return val
elif expr.op == 'phi':
raise LoweringError("PHI not stripped")
elif expr.op == 'null':
# Make null value
return cgutils.get_null_value(self.pyapi.pyobj)
elif expr.op == 'undef':
# Use a sentinel value for undefined variable
return self.lower_const(_UNDEFINED)
else:
raise NotImplementedError(expr)
def lower_const(self, const):
# All constants are frozen inside the environment
index = self.env_manager.add_const(const)
ret = self.env_manager.read_const(index)
self.check_error(ret)
self.incref(ret)
return ret
def lower_global(self, name, value):
"""
1) Check global scope dictionary.
2) Check __builtins__.
2a) is it a dictionary (for non __main__ module)
2b) is it a module (for __main__ module)
"""
moddict = self.get_module_dict()
obj = self.pyapi.dict_getitem(moddict, self._freeze_string(name))
self.incref(obj) # obj is borrowed
try:
if value in _unsupported_builtins:
raise ForbiddenConstruct("builtins %s() is not supported"
% name, loc=self.loc)
except TypeError:
# `value` is unhashable, ignore
pass
if hasattr(builtins, name):
obj_is_null = self.is_null(obj)
bbelse = self.builder.basic_block
with self.builder.if_then(obj_is_null):
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
builtin = self.builtin_lookup(mod, name)
bbif = self.builder.basic_block
retval = self.builder.phi(self.pyapi.pyobj)
retval.add_incoming(obj, bbelse)
retval.add_incoming(builtin, bbif)
else:
retval = obj
with cgutils.if_unlikely(self.builder, self.is_null(retval)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
return retval
# -------------------------------------------------------------------------
def get_module_dict(self):
return self.env_body.globals
def get_builtin_obj(self, name):
# XXX The builtins dict could be bound into the environment
moddict = self.get_module_dict()
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
return self.builtin_lookup(mod, name)
def builtin_lookup(self, mod, name):
"""
Args
----
mod:
The __builtins__ dictionary or module, as looked up in
a module's globals.
name: str
The object to lookup
"""
fromdict = self.pyapi.dict_getitem(mod, self._freeze_string(name))
self.incref(fromdict) # fromdict is borrowed
bbifdict = self.builder.basic_block
with cgutils.if_unlikely(self.builder, self.is_null(fromdict)):
# This happen if we are using the __main__ module
frommod = self.pyapi.object_getattr(mod, self._freeze_string(name))
with cgutils.if_unlikely(self.builder, self.is_null(frommod)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
bbifmod = self.builder.basic_block
builtin = self.builder.phi(self.pyapi.pyobj)
builtin.add_incoming(fromdict, bbifdict)
builtin.add_incoming(frommod, bbifmod)
return builtin
def check_occurred(self):
"""
Return if an exception occurred.
"""
err_occurred = cgutils.is_not_null(self.builder,
self.pyapi.err_occurred())
with cgutils.if_unlikely(self.builder, err_occurred):
self.return_exception_raised()
def check_error(self, obj):
"""
Return if *obj* is NULL.
"""
with cgutils.if_unlikely(self.builder, self.is_null(obj)):
self.return_exception_raised()
return obj
def check_int_status(self, num, ok_value=0):
"""
Raise an exception if *num* is smaller than *ok_value*.
"""
ok = llvmlite.ir.Constant(num.type, ok_value)
pred = self.builder.icmp_signed('<', num, ok)
with cgutils.if_unlikely(self.builder, pred):
self.return_exception_raised()
def is_null(self, obj):
return cgutils.is_null(self.builder, obj)
def return_exception_raised(self):
"""
Return with the currently raised exception.
"""
self.cleanup_vars()
self.call_conv.return_exc(self.builder)
def init_vars(self, block):
"""
Initialize live variables for *block*.
"""
self._live_vars = set(self.func_ir.get_block_entry_vars(block))
def _getvar(self, name, ltype=None):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, ltype=ltype)
return self.varmap[name]
def loadvar(self, name):
"""
Load the llvm value of the variable named *name*.
"""
# If this raises then the live variables analysis is wrong
assert name in self._live_vars, name
ptr = self.varmap[name]
val = self.builder.load(ptr)
with cgutils.if_unlikely(self.builder, self.is_null(val)):
self.pyapi.raise_missing_name_error(name)
self.return_exception_raised()
return val
def delvar(self, name):
"""
Delete the variable slot with the given name. This will decref
the corresponding Python object.
"""
# If this raises then the live variables analysis is wrong
self._live_vars.remove(name)
ptr = self._getvar(name) # initializes `name` if not already
self.decref(self.builder.load(ptr))
# This is a safety guard against double decref's, but really
# the IR should be correct and have only one Del per variable
# and code path.
self.builder.store(cgutils.get_null_value(ptr.type.pointee), ptr)
def storevar(self, value, name, clobber=False):
"""
Stores a llvm value and allocate stack slot if necessary.
The llvm value can be of arbitrary type.
"""
is_redefine = name in self._live_vars and not clobber
ptr = self._getvar(name, ltype=value.type)
if is_redefine:
old = self.builder.load(ptr)
else:
self._live_vars.add(name)
assert value.type == ptr.type.pointee, (str(value.type),
str(ptr.type.pointee))
self.builder.store(value, ptr)
# Safe to call decref even on non python object
if is_redefine:
self.decref(old)
def cleanup_vars(self):
"""
Cleanup live variables.
"""
for name in self._live_vars:
ptr = self._getvar(name)
self.decref(self.builder.load(ptr))
def alloca(self, name, ltype=None):
"""
Allocate a stack slot and initialize it to NULL.
The default is to allocate a pyobject pointer.
Use ``ltype`` to override.
"""
if ltype is None:
ltype = self.context.get_value_type(types.pyobject)
with self.builder.goto_block(self.entry_block):
ptr = self.builder.alloca(ltype, name=name)
self.builder.store(cgutils.get_null_value(ltype), ptr)
return ptr
def _alloca_var(self, name, fetype):
# This is here for API compatibility with lowering.py::Lower.
# NOTE: fetype is unused
return self.alloca(name)
def incref(self, value):
self.pyapi.incref(value)
def decref(self, value):
"""
This is allow to be called on non pyobject pointer, in which case
no code is inserted.
"""
lpyobj = self.context.get_value_type(types.pyobject)
if value.type == lpyobj:
self.pyapi.decref(value)
def _freeze_string(self, string):
"""
Freeze a Python string object into the code.
"""
return self.lower_const(string)
| PyLower |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_pipeline_job.py | {
"start": 9489,
"end": 13054
} | class ____:
@pytest.mark.asyncio
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobAsyncHook.get_pipeline_service_client"))
async def test_get_pipeline_job(
self, mock_pipeline_service_client, test_async_hook, test_pipeline_job_name
):
mock_pipeline_service_client.return_value.pipeline_job_path = mock.MagicMock(
return_value=test_pipeline_job_name
)
await test_async_hook.get_pipeline_job(
project_id=TEST_PROJECT_ID, location=TEST_REGION, job_id=TEST_PIPELINE_JOB_ID
)
mock_pipeline_service_client.assert_awaited_once_with(region=TEST_REGION)
mock_pipeline_service_client.return_value.get_pipeline_job.assert_awaited_once_with(
request={"name": test_pipeline_job_name},
retry=DEFAULT,
timeout=DEFAULT,
metadata=(),
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"pipeline_state_value",
[
PipelineState.PIPELINE_STATE_CANCELLED,
PipelineState.PIPELINE_STATE_FAILED,
PipelineState.PIPELINE_STATE_PAUSED,
PipelineState.PIPELINE_STATE_SUCCEEDED,
],
)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobAsyncHook.get_pipeline_job"))
async def test_wait_for_pipeline_job_returns_job_if_pipeline_in_complete_state(
self,
mock_get_pipeline_job,
pipeline_state_value,
test_async_hook,
test_pipeline_job_name,
):
expected_job = types.PipelineJob(
state=pipeline_state_value,
name=test_pipeline_job_name,
)
mock_get_pipeline_job.return_value = expected_job
actual_job = await test_async_hook.wait_for_pipeline_job(
project_id=TEST_PROJECT_ID,
location=TEST_REGION,
job_id=TEST_PIPELINE_JOB_ID,
)
assert actual_job == expected_job
@pytest.mark.asyncio
@pytest.mark.parametrize(
"pipeline_state_value",
[
PipelineState.PIPELINE_STATE_CANCELLING,
PipelineState.PIPELINE_STATE_PENDING,
PipelineState.PIPELINE_STATE_QUEUED,
PipelineState.PIPELINE_STATE_RUNNING,
PipelineState.PIPELINE_STATE_UNSPECIFIED,
],
)
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobAsyncHook.get_pipeline_job"))
async def test_wait_for_pipeline_job_loop_is_still_running_if_pipeline_in_incomplete_state(
self,
mock_get_pipeline_job,
pipeline_state_value,
test_async_hook,
):
mock_get_pipeline_job.return_value = types.PipelineJob(state=pipeline_state_value)
task = asyncio.create_task(
test_async_hook.wait_for_pipeline_job(
project_id=TEST_PROJECT_ID,
location=TEST_REGION,
job_id=TEST_PIPELINE_JOB_ID,
)
)
await asyncio.sleep(0.5)
assert task.done() is False
task.cancel()
@pytest.mark.asyncio
@mock.patch(PIPELINE_JOB_STRING.format("PipelineJobAsyncHook.get_pipeline_job"))
async def test_wait_for_pipeline_job_raises_exception(self, mock_get_pipeline_job, test_async_hook):
mock_get_pipeline_job.side_effect = mock.AsyncMock(side_effect=Exception())
with pytest.raises(AirflowException):
await test_async_hook.wait_for_pipeline_job(
project_id=TEST_PROJECT_ID,
location=TEST_REGION,
job_id=TEST_PIPELINE_JOB_ID,
)
| TestPipelineJobAsyncHook |
python | numba__numba | numba/tests/test_parfors.py | {
"start": 46229,
"end": 47048
} | class ____(TestCase):
"""Tests for unsupported use of parfors"""
@unittest.skipIf(not _32bit, "Only impacts 32 bit hardware")
@needs_blas
def test_unsupported_combination_raises(self):
"""
This test is in place until issues with the 'parallel'
target on 32 bit hardware are fixed.
"""
with self.assertRaises(errors.UnsupportedParforsError) as raised:
@njit(parallel=True)
def ddot(a, v):
return np.dot(a, v)
A = np.linspace(0, 1, 20).reshape(2, 10)
v = np.linspace(2, 1, 10)
ddot(A, v)
msg = ("The 'parallel' target is not currently supported on 32 bit "
"hardware")
self.assertIn(msg, str(raised.exception))
@skip_parfors_unsupported
| TestParforsUnsupported |
python | mozilla__bleach | bleach/_vendor/html5lib/filters/base.py | {
"start": 69,
"end": 286
} | class ____(object):
def __init__(self, source):
self.source = source
def __iter__(self):
return iter(self.source)
def __getattr__(self, name):
return getattr(self.source, name)
| Filter |
python | tiangolo__fastapi | fastapi/security/open_id_connect_url.py | {
"start": 356,
"end": 3187
} | class ____(SecurityBase):
"""
OpenID Connect authentication class. An instance of it would be used as a
dependency.
**Warning**: this is only a stub to connect the components with OpenAPI in FastAPI,
but it doesn't implement the full OpenIdConnect scheme, for example, it doesn't use
the OpenIDConnect URL. You would need to to subclass it and implement it in your
code.
"""
def __init__(
self,
*,
openIdConnectUrl: Annotated[
str,
Doc(
"""
The OpenID Connect URL.
"""
),
],
scheme_name: Annotated[
Optional[str],
Doc(
"""
Security scheme name.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
Security scheme description.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
auto_error: Annotated[
bool,
Doc(
"""
By default, if no HTTP Authorization header is provided, required for
OpenID Connect authentication, it will automatically cancel the request
and send the client an error.
If `auto_error` is set to `False`, when the HTTP Authorization header
is not available, instead of erroring out, the dependency result will
be `None`.
This is useful when you want to have optional authentication.
It is also useful when you want to have authentication that can be
provided in one of multiple optional ways (for example, with OpenID
Connect or in a cookie).
"""
),
] = True,
):
self.model = OpenIdConnectModel(
openIdConnectUrl=openIdConnectUrl, description=description
)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
def make_not_authenticated_error(self) -> HTTPException:
return HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
if not authorization:
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
return authorization
| OpenIdConnect |
python | pytorch__pytorch | test/mobile/model_test/nn_ops.py | {
"start": 9565,
"end": 9823
} | class ____(torch.nn.Module):
def forward(self):
a = torch.randn(8, 4)
b = torch.randn(8, 4)
return len(
F.pairwise_distance(a, b),
F.cosine_similarity(a, b),
F.pdist(a),
)
| NNDistanceModule |
python | pytorch__pytorch | test/distributed/test_store.py | {
"start": 24267,
"end": 24809
} | class ____(TestCase):
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
# its test_set_get function, we would exercise the Python
# API directly, instead of going through the C++ trampoline.
# We care about testing the C++ trampoline, so run the
# equivalent of StoreTestBase.test_set_get from C++.
# See `torch/csrc/distributed/c10d/init.cpp` for the definition
# of this test function.
dist._test_python_store(MyPythonStore())
| PythonStoreTest |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 15581,
"end": 17254
} | class ____(base_classes.Apps):
def keys(self):
k = []
for hwnd in get_excel_hwnds():
k.append(App(xl=hwnd).pid)
return k
def add(self, spec=None, add_book=None, xl=None, visible=None):
return App(spec=spec, add_book=add_book, xl=xl, visible=visible)
@staticmethod
def cleanup():
res = subprocess.run(
split('tasklist /FI "IMAGENAME eq EXCEL.exe"'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NO_WINDOW,
encoding=locale.getpreferredencoding(),
)
all_pids = set()
for line in res.stdout.splitlines()[3:]:
# Ignored if there's no processes as it prints only 1 line
_, pid, _, _, _, _ = line.split()
all_pids.add(int(pid))
active_pids = {app.pid for app in xlwings.apps}
zombie_pids = all_pids - active_pids
for pid in zombie_pids:
subprocess.run(
split(f"taskkill /PID {pid} /F"),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NO_WINDOW,
encoding=locale.getpreferredencoding(),
)
def __iter__(self):
for hwnd in get_excel_hwnds():
yield App(xl=hwnd)
def __len__(self):
return len(list(get_excel_hwnds()))
def __getitem__(self, pid):
for hwnd in get_excel_hwnds():
app = App(xl=hwnd)
if app.pid == pid:
return app
raise KeyError("Could not find an Excel instance with this PID.")
| Apps |
python | doocs__leetcode | solution/3600-3699/3697.Compute Decimal Representation/Solution.py | {
"start": 0,
"end": 268
} | class ____:
def decimalRepresentation(self, n: int) -> List[int]:
ans = []
p = 1
while n:
n, v = divmod(n, 10)
if v:
ans.append(p * v)
p *= 10
ans.reverse()
return ans
| Solution |
python | openai__openai-python | src/openai/lib/azure.py | {
"start": 14666,
"end": 26537
} | class ____(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], AsyncOpenAI):
@overload
def __init__(
self,
*,
azure_endpoint: str,
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
project: str | None = None,
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None: ...
@overload
def __init__(
self,
*,
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
project: str | None = None,
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None: ...
@overload
def __init__(
self,
*,
base_url: str,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
project: str | None = None,
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None: ...
def __init__(
self,
*,
azure_endpoint: str | None = None,
azure_deployment: str | None = None,
api_version: str | None = None,
api_key: str | Callable[[], Awaitable[str]] | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
organization: str | None = None,
project: str | None = None,
webhook_secret: str | None = None,
base_url: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None:
"""Construct a new asynchronous azure openai client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `AZURE_OPENAI_API_KEY`
- `organization` from `OPENAI_ORG_ID`
- `project` from `OPENAI_PROJECT_ID`
- `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN`
- `api_version` from `OPENAI_API_VERSION`
- `azure_endpoint` from `AZURE_OPENAI_ENDPOINT`
Args:
azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`
azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id
azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request.
azure_deployment: A model deployment, if given with `azure_endpoint`, sets the base client URL to include `/deployments/{azure_deployment}`.
Not supported with Assistants APIs.
"""
if api_key is None:
api_key = os.environ.get("AZURE_OPENAI_API_KEY")
if azure_ad_token is None:
azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN")
if api_key is None and azure_ad_token is None and azure_ad_token_provider is None:
raise OpenAIError(
"Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables."
)
if api_version is None:
api_version = os.environ.get("OPENAI_API_VERSION")
if api_version is None:
raise ValueError(
"Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable"
)
if default_query is None:
default_query = {"api-version": api_version}
else:
default_query = {**default_query, "api-version": api_version}
if base_url is None:
if azure_endpoint is None:
azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
if azure_endpoint is None:
raise ValueError(
"Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable"
)
if azure_deployment is not None:
base_url = f"{azure_endpoint.rstrip('/')}/openai/deployments/{azure_deployment}"
else:
base_url = f"{azure_endpoint.rstrip('/')}/openai"
else:
if azure_endpoint is not None:
raise ValueError("base_url and azure_endpoint are mutually exclusive")
if api_key is None:
# define a sentinel value to avoid any typing issues
api_key = API_KEY_SENTINEL
super().__init__(
api_key=api_key,
organization=organization,
project=project,
webhook_secret=webhook_secret,
base_url=base_url,
timeout=timeout,
max_retries=max_retries,
default_headers=default_headers,
default_query=default_query,
http_client=http_client,
websocket_base_url=websocket_base_url,
_strict_response_validation=_strict_response_validation,
)
self._api_version = api_version
self._azure_ad_token = azure_ad_token
self._azure_ad_token_provider = azure_ad_token_provider
self._azure_deployment = azure_deployment if azure_endpoint else None
self._azure_endpoint = httpx.URL(azure_endpoint) if azure_endpoint else None
@override
def copy(
self,
*,
api_key: str | Callable[[], Awaitable[str]] | None = None,
organization: str | None = None,
project: str | None = None,
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
api_version: str | None = None,
azure_ad_token: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.AsyncClient | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
return super().copy(
api_key=api_key,
organization=organization,
project=project,
webhook_secret=webhook_secret,
websocket_base_url=websocket_base_url,
base_url=base_url,
timeout=timeout,
http_client=http_client,
max_retries=max_retries,
default_headers=default_headers,
set_default_headers=set_default_headers,
default_query=default_query,
set_default_query=set_default_query,
_extra_kwargs={
"api_version": api_version or self._api_version,
"azure_ad_token": azure_ad_token or self._azure_ad_token,
"azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider,
**_extra_kwargs,
},
)
with_options = copy
async def _get_azure_ad_token(self) -> str | None:
if self._azure_ad_token is not None:
return self._azure_ad_token
provider = self._azure_ad_token_provider
if provider is not None:
token = provider()
if inspect.isawaitable(token):
token = await token
if not token or not isinstance(cast(Any, token), str):
raise ValueError(
f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}",
)
return str(token)
return None
@override
async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
options = model_copy(options)
options.headers = headers
azure_ad_token = await self._get_azure_ad_token()
if azure_ad_token is not None:
if headers.get("Authorization") is None:
headers["Authorization"] = f"Bearer {azure_ad_token}"
elif self.api_key is not API_KEY_SENTINEL:
if headers.get("api-key") is None:
headers["api-key"] = self.api_key
else:
# should never be hit
raise ValueError("Unable to handle auth")
return options
async def _configure_realtime(self, model: str, extra_query: Query) -> tuple[httpx.URL, dict[str, str]]:
auth_headers = {}
query = {
**extra_query,
"api-version": self._api_version,
"deployment": self._azure_deployment or model,
}
if self.api_key and self.api_key != "<missing API key>":
auth_headers = {"api-key": self.api_key}
else:
token = await self._get_azure_ad_token()
if token:
auth_headers = {"Authorization": f"Bearer {token}"}
if self.websocket_base_url is not None:
base_url = httpx.URL(self.websocket_base_url)
merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
realtime_url = base_url.copy_with(raw_path=merge_raw_path)
else:
base_url = self._prepare_url("/realtime")
realtime_url = base_url.copy_with(scheme="wss")
url = realtime_url.copy_with(params={**query})
return url, auth_headers
| AsyncAzureOpenAI |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_tkagg.py | {
"start": 229,
"end": 522
} | class ____(FigureCanvasAgg, FigureCanvasTk):
def draw(self):
super().draw()
self.blit()
def blit(self, bbox=None):
_backend_tk.blit(self._tkphoto, self.renderer.buffer_rgba(),
(0, 1, 2, 3), bbox=bbox)
@_BackendTk.export
| FigureCanvasTkAgg |
python | pytorch__pytorch | test/test_tensor_creation_ops.py | {
"start": 192736,
"end": 205285
} | class ____(TestCase):
def _check(self, original, cvt=lambda t: t, is_alias=True, same_dtype=True, same_device=True, **kwargs):
"""Check the output of 'asarray', given its input and assertion information.
Besides calling 'asarray' itself, this function does 4 different checks:
1. Whether the result is aliased or not, depending on 'is_alias'
2. Whether the result has the expected dtype and elements
3. Whether the result lives in the expected device
4. Whether the result has its 'requires_grad' set or not
"""
result = torch.asarray(cvt(original), **kwargs)
self.assertTrue(isinstance(result, torch.Tensor))
# 1. The storage pointers should be equal only if 'is_alias' is set
if is_alias:
self.assertEqual(result.data_ptr(), original.data_ptr())
else:
self.assertNotEqual(result.data_ptr(), original.data_ptr())
# 2. Comparison of the elements only takes place if the original
# sequence and the resulting tensor have the same data type
if same_dtype:
self.assertEqual(original, result)
else:
dtype = kwargs.get("dtype", torch.get_default_dtype())
self.assertEqual(original.shape, result.shape)
self.assertEqual(dtype, result.dtype)
# 3. Given the specified target device, we first check whether
# its type is the same, and then if its index is the same (if it
# is not None)
if same_device:
device = original.device
else:
device = torch.device(kwargs.get("device", "cpu"))
# Compare the target device type, and its index
self.assertEqual(device.type, result.device.type)
if device.index is not None:
self.assertEqual(device.index, result.device.index)
# 4. By default, 'requires_grad' is unset
self.assertEqual(result.requires_grad, kwargs.get("requires_grad", False))
def _test_alias_with_cvt(self, cvt, device, dtype, shape=(5, 5), only_with_dtype=False):
original = make_tensor(shape, dtype=dtype, device=device)
def check(**kwargs):
self._check(original, cvt=cvt, **kwargs)
if not only_with_dtype:
check(copy=False)
check(device=device)
check(device=device, copy=False)
check(dtype=dtype)
check(dtype=dtype, copy=False)
check(requires_grad=False, dtype=dtype)
check(requires_grad=may_require_grad(dtype), dtype=dtype)
check(device=device, dtype=dtype)
check(device=device, dtype=dtype, copy=False)
# Skipping 'meta' devices, since there's no point in comparing their
# data pointer (which is basically the point here), since they all
# return 0.
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_alias_from_tensor(self, device, dtype):
self._test_alias_with_cvt(identity, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_alias_from_numpy(self, device, dtype):
self._test_alias_with_cvt(to_numpy, device, dtype)
# Skipping 'meta', since 'to_dlpack' does not work for them.
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_alias_from_dlpack(self, device, dtype):
self._test_alias_with_cvt(to_dlpack, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_alias_from_buffer(self, device, dtype):
self._test_alias_with_cvt(to_memview, device, dtype, shape=(5,), only_with_dtype=True)
def _test_copy_with_cvt(self, cvt, device, dtype, shape=(5, 5), only_with_dtype=False):
original = make_tensor(shape, dtype=dtype, device=device)
def check(**kwargs):
self._check(original, cvt=cvt, is_alias=False, **kwargs)
if not only_with_dtype:
check(copy=True)
check(device=device, copy=True)
check(requires_grad=False, dtype=dtype, copy=True)
check(requires_grad=may_require_grad(dtype), dtype=dtype, copy=True)
check(dtype=dtype, copy=True)
check(device=device, dtype=dtype, copy=True)
# Copy is forced because of different device
if torch.cuda.is_available():
other = get_another_device(device)
check(same_device=False, device=other, dtype=dtype)
check(same_device=False, device=other, dtype=dtype, copy=True)
# Copy is forced because of different dtype
if not only_with_dtype:
for other in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dtype != other:
check(same_dtype=False, dtype=other)
check(same_dtype=False, dtype=other, copy=True)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_tensor(self, device, dtype):
self._test_copy_with_cvt(identity, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_copy_from_numpy(self, device, dtype):
self._test_copy_with_cvt(to_numpy, device, dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_dlpack(self, device, dtype):
self._test_copy_with_cvt(to_dlpack, device, dtype)
@onlyCPU
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_copy_from_buffer(self, device, dtype):
self._test_copy_with_cvt(to_memview, device, dtype, shape=(5,), only_with_dtype=True)
def _test_copy_mult_devices(self, devices, dtype, cvt):
cuda1 = devices[0]
cuda2 = devices[1]
original = make_tensor((5, 5), dtype=dtype, device=cuda1)
def check(**kwargs):
self._check(original, cvt, is_alias=False, same_device=False, device=cuda2, **kwargs)
check()
check(copy=True)
check(dtype=dtype, copy=True)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_tensor_mult_devices(self, devices, dtype):
self._test_copy_mult_devices(devices, dtype, identity)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_copy_from_dlpack_mult_devices(self, devices, dtype):
self._test_copy_mult_devices(devices, dtype, to_dlpack)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_list(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=torch.device("cpu"))
def check(**kwargs):
self._check(original, torch.Tensor.tolist, is_alias=False, **kwargs)
same_device = torch.device("cpu") == device
check(same_device=same_device, device=device, dtype=dtype)
check(same_device=same_device, device=device, dtype=dtype, requires_grad=False)
check(same_device=same_device, device=device, dtype=dtype, requires_grad=may_require_grad(dtype))
check(same_device=same_device, device=device, dtype=dtype, copy=True)
@dtypes(torch.float32)
def test_unsupported_alias(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=device)
if torch.cuda.is_available():
other_device = get_another_device(device)
with self.assertRaisesRegex(ValueError,
f"from device '{device}' to '{other_device}'"):
torch.asarray(original, device=other_device, copy=False)
with self.assertRaisesRegex(ValueError,
"with dtype '.*' into dtype '.*'"):
torch.asarray(original, dtype=torch.float64, copy=False)
with self.assertRaisesRegex(ValueError,
"can't alias arbitrary sequence"):
torch.asarray(original.tolist(), copy=False)
@onlyCUDA
@deviceCountAtLeast(2)
@dtypes(torch.float32)
def test_unsupported_alias_mult_devices(self, devices, dtype):
dev1, dev2 = devices[:2]
original = make_tensor((5, 5), dtype=dtype, device=dev1)
with self.assertRaisesRegex(ValueError,
f"from device '{dev1}' to '{dev2}'"):
torch.asarray(original, device=dev2, copy=False)
@dtypes(torch.float32, torch.complex64)
def test_retain_autograd_history(self, device, dtype):
original = make_tensor((5, 5), dtype=dtype, device=device, requires_grad=True)
# 'cloned' has 'grad_fn=<CloneBackwards>'
cloned = original.clone()
def check(**kwargs):
a = torch.asarray(cloned, **kwargs)
requires_grad = kwargs.get("requires_grad", False)
self.assertEqual(a.requires_grad, requires_grad)
# Autograd history shouldn't be retained when requires_grad is False
self.assertEqual(a.grad_fn is None, not requires_grad)
check()
check(requires_grad=True)
check(copy=True)
check(requires_grad=True, copy=True)
check(requires_grad=False)
check(requires_grad=False, copy=True)
@onlyCPU
def test_astensor_consistency(self, device):
# See issue: https://github.com/pytorch/pytorch/pull/71757
examples = [
# Scalars
True,
42,
1.0,
# Homogeneous Lists
[True, True, False],
[1, 2, 3, 42],
[0.0, 1.0, 2.0, 3.0],
# Mixed Lists
[True, False, 0],
[0.0, True, False],
[0, 1.0, 42],
[0.0, True, False, 42],
# With Complex
[0.0, True, False, 42, 5j],
# With Range
range(5),
]
for e in examples:
original = torch.as_tensor(e)
t = torch.asarray(e)
self.assertEqual(t, original)
# Dynamo changes numpy scalar to array, thus skips the asserted error.
@xfailIfTorchDynamo
@onlyCPU
def test_numpy_scalars(self, device):
scalar = np.float64(0.5)
with self.assertRaisesRegex(RuntimeError, "can't alias NumPy scalars."):
torch.asarray(scalar, copy=False)
tensor = torch.asarray(scalar)
self.assertEqual(tensor.dim(), 0)
self.assertEqual(tensor.item(), scalar.item())
self.assertEqual(tensor.dtype, torch.float64)
# Regression test for https://github.com/pytorch/pytorch/issues/97021
zerodim_arr = np.array(1.)
tensor = torch.asarray(zerodim_arr, dtype=torch.int32)
self.assertEqual(tensor.dim(), 0)
self.assertEqual(tensor.item(), zerodim_arr.item())
self.assertEqual(tensor.dtype, torch.int32)
def test_default_device(self, device):
original = torch.arange(5)
examples: list[tuple[Any, dict]] = [
(3, {}),
(original, {}),
(to_numpy(original), {}),
(to_memview(original), {"dtype": original.dtype}),
]
for data, kwargs in examples:
with torch.device(device):
tensor = torch.asarray(data, **kwargs)
self.assertEqual(tensor.device, torch.device(device))
# Check the contents of the tensor.
if isinstance(data, int):
self.assertEqual(data, tensor.item())
else:
self.assertEqual(data, tensor)
@onlyCUDA
def test_device_without_index(self, device):
original = torch.arange(5, device="cuda")
tensor = torch.asarray(original, device="cuda")
# The storage pointers should be equal
self.assertEqual(original.data_ptr(), tensor.data_ptr())
tensor = torch.asarray(original, copy=True, device="cuda")
# The storage pointers should not be equal
self.assertNotEqual(original.data_ptr(), tensor.data_ptr())
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
instantiate_device_type_tests(TestBufferProtocol, globals(), only_for="cpu")
instantiate_device_type_tests(TestAsArray, globals())
if __name__ == '__main__':
TestCase._default_dtype_check_enabled = True
run_tests()
| TestAsArray |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/base.py | {
"start": 3618,
"end": 44696
} | class ____(FunctionCallingLLM):
"""
OpenAI LLM.
Args:
model: name of the OpenAI model to use.
temperature: a float from 0 to 1 controlling randomness in generation; higher will lead to more creative, less deterministic responses.
max_tokens: the maximum number of tokens to generate.
additional_kwargs: Add additional parameters to OpenAI request body.
max_retries: How many times to retry the API call if it fails.
timeout: How long to wait, in seconds, for an API call before failing.
reuse_client: Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability.
api_key: Your OpenAI api key
api_base: The base URL of the API to call
api_version: the version of the API to call
callback_manager: the callback manager is used for observability.
default_headers: override the default headers for API requests.
http_client: pass in your own httpx.Client instance.
async_http_client: pass in your own httpx.AsyncClient instance.
Examples:
`pip install llama-index-llms-openai`
```python
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo")
stream = llm.stream_complete("Hi, write a short story")
for r in stream:
print(r.delta, end="")
```
"""
model: str = Field(
default=DEFAULT_OPENAI_MODEL, description="The OpenAI model to use."
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use during generation.",
ge=0.0,
le=2.0,
)
max_tokens: Optional[int] = Field(
description="The maximum number of tokens to generate.",
default=None,
gt=0,
)
logprobs: Optional[bool] = Field(
description="Whether to return logprobs per token.",
default=None,
)
top_logprobs: int = Field(
description="The number of top token log probs to return.",
default=0,
ge=0,
le=20,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
max_retries: int = Field(
default=3,
description="The maximum number of API retries.",
ge=0,
)
timeout: float = Field(
default=60.0,
description="The timeout, in seconds, for API requests.",
ge=0,
)
default_headers: Optional[Dict[str, str]] = Field(
default=None, description="The default headers for API requests."
)
reuse_client: bool = Field(
default=True,
description=(
"Reuse the OpenAI client between requests. When doing anything with large "
"volumes of async API calls, setting this to false can improve stability."
),
)
api_key: Optional[str] = Field(default=None, description="The OpenAI API key.")
api_base: Optional[str] = Field(
default=None, description="The base URL for OpenAI API."
)
api_version: Optional[str] = Field(
default=None, description="The API version for OpenAI API."
)
strict: bool = Field(
default=False,
description="Whether to use strict mode for invoking tools/using schemas.",
)
reasoning_effort: Optional[Literal["low", "medium", "high", "minimal"]] = Field(
default=None,
description="The effort to use for reasoning models.",
)
modalities: Optional[List[str]] = Field(
default=None,
description="The output modalities to use for the model.",
)
audio_config: Optional[Dict[str, Any]] = Field(
default=None,
description="The audio configuration to use for the model.",
)
_client: Optional[SyncOpenAI] = PrivateAttr()
_aclient: Optional[AsyncOpenAI] = PrivateAttr()
_http_client: Optional[httpx.Client] = PrivateAttr()
_async_http_client: Optional[httpx.AsyncClient] = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_OPENAI_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 3,
timeout: float = 60.0,
reuse_client: bool = True,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
openai_client: Optional[SyncOpenAI] = None,
async_openai_client: Optional[AsyncOpenAI] = None,
# base class
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
strict: bool = False,
reasoning_effort: Optional[Literal["low", "medium", "high"]] = None,
modalities: Optional[List[str]] = None,
audio_config: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
# TODO: Support deprecated max_new_tokens
if "max_new_tokens" in kwargs:
max_tokens = kwargs["max_new_tokens"]
del kwargs["max_new_tokens"]
additional_kwargs = additional_kwargs or {}
api_key, api_base, api_version = resolve_openai_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
# TODO: Temp forced to 1.0 for o1
if model in O1_MODELS:
temperature = 1.0
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
api_key=api_key,
api_version=api_version,
api_base=api_base,
timeout=timeout,
reuse_client=reuse_client,
default_headers=default_headers,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
strict=strict,
reasoning_effort=reasoning_effort,
modalities=modalities,
audio_config=audio_config,
**kwargs,
)
self._client = openai_client
self._aclient = async_openai_client
self._http_client = http_client
self._async_http_client = async_http_client
def _get_client(self) -> SyncOpenAI:
if not self.reuse_client:
return SyncOpenAI(**self._get_credential_kwargs())
if self._client is None:
self._client = SyncOpenAI(**self._get_credential_kwargs())
return self._client
def _get_aclient(self) -> AsyncOpenAI:
if not self.reuse_client:
return AsyncOpenAI(**self._get_credential_kwargs(is_async=True))
if self._aclient is None:
self._aclient = AsyncOpenAI(**self._get_credential_kwargs(is_async=True))
return self._aclient
def _get_model_name(self) -> str:
model_name = self.model
if "ft-" in model_name: # legacy fine-tuning
model_name = model_name.split(":")[0]
elif model_name.startswith("ft:"):
model_name = model_name.split(":")[1]
return model_name
@classmethod
def class_name(cls) -> str:
return "openai_llm"
@property
def _tokenizer(self) -> Optional[Tokenizer]:
"""
Get a tokenizer for this model, or None if a tokenizing method is unknown.
OpenAI can do this using the tiktoken package, subclasses may not have
this convenience.
"""
return tiktoken.encoding_for_model(self._get_model_name())
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=openai_modelname_to_contextsize(self._get_model_name()),
num_output=self.max_tokens or -1,
is_chat_model=is_chat_model(model=self._get_model_name()),
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
model_name=self.model,
# TODO: Temp for O1 beta
system_role=MessageRole.USER
if self.model in O1_MODELS
else MessageRole.SYSTEM,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if self._use_chat_completions(kwargs):
chat_fn = self._chat
else:
chat_fn = completion_to_chat_decorator(self._complete)
return chat_fn(messages, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._use_chat_completions(kwargs):
stream_chat_fn = self._stream_chat
else:
stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
return stream_chat_fn(messages, **kwargs)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self.modalities and "audio" in self.modalities:
raise ValueError(
"Audio is not supported for completion. Use chat/achat instead."
)
if self._use_chat_completions(kwargs):
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._use_chat_completions(kwargs):
stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
else:
stream_complete_fn = self._stream_complete
return stream_complete_fn(prompt, **kwargs)
def _use_chat_completions(self, kwargs: Dict[str, Any]) -> bool:
if "use_chat_completions" in kwargs:
return kwargs["use_chat_completions"]
return self.metadata.is_chat_model
def _get_credential_kwargs(self, is_async: bool = False) -> Dict[str, Any]:
return {
"api_key": self.api_key,
"base_url": self.api_base,
"max_retries": self.max_retries,
"timeout": self.timeout,
"default_headers": self.default_headers,
"http_client": self._async_http_client if is_async else self._http_client,
}
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs}
if self.max_tokens is not None:
# If max_tokens is None, don't include in the payload:
# https://platform.openai.com/docs/api-reference/chat
# https://platform.openai.com/docs/api-reference/completions
base_kwargs["max_tokens"] = self.max_tokens
if self.logprobs is not None and self.logprobs is True:
if self.metadata.is_chat_model:
base_kwargs["logprobs"] = self.logprobs
base_kwargs["top_logprobs"] = self.top_logprobs
else:
base_kwargs["logprobs"] = self.top_logprobs # int in this case
# can't send stream_options to the API when not streaming
all_kwargs = {**base_kwargs, **self.additional_kwargs}
if "stream" not in all_kwargs and "stream_options" in all_kwargs:
del all_kwargs["stream_options"]
if self.model in O1_MODELS and base_kwargs.get("max_tokens") is not None:
# O1 models use max_completion_tokens instead of max_tokens
all_kwargs["max_completion_tokens"] = all_kwargs.get(
"max_completion_tokens", all_kwargs["max_tokens"]
)
all_kwargs.pop("max_tokens", None)
if self.model in O1_MODELS and self.reasoning_effort is not None:
# O1 models support reasoning_effort of low, medium, high
all_kwargs["reasoning_effort"] = self.reasoning_effort
if self.modalities is not None:
all_kwargs["modalities"] = self.modalities
if self.audio_config is not None:
all_kwargs["audio"] = self.audio_config
return all_kwargs
@llm_retry_decorator
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
client = self._get_client()
message_dicts = to_openai_message_dicts(
messages,
model=self.model,
)
if self.reuse_client:
response = client.chat.completions.create(
messages=message_dicts,
stream=False,
**self._get_model_kwargs(**kwargs),
)
else:
with client:
response = client.chat.completions.create(
messages=message_dicts,
stream=False,
**self._get_model_kwargs(**kwargs),
)
openai_message = response.choices[0].message
message = from_openai_message(
openai_message, modalities=self.modalities or ["text"]
)
openai_token_logprobs = response.choices[0].logprobs
logprobs = None
if openai_token_logprobs and openai_token_logprobs.content:
logprobs = from_openai_token_logprobs(openai_token_logprobs.content)
return ChatResponse(
message=message,
raw=response,
logprobs=logprobs,
additional_kwargs=self._get_response_token_counts(response),
)
@llm_retry_decorator
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self.modalities and "audio" in self.modalities:
raise ValueError("Audio is not supported for chat streaming")
client = self._get_client()
message_dicts = to_openai_message_dicts(
messages,
model=self.model,
)
def gen() -> ChatResponseGen:
content = ""
tool_calls: List[ChoiceDeltaToolCall] = []
is_function = False
for response in client.chat.completions.create(
messages=message_dicts,
**self._get_model_kwargs(stream=True, **kwargs),
):
blocks = []
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
if delta is None:
continue
# check if this chunk is the start of a function call
if delta.tool_calls:
is_function = True
# update using deltas
role = delta.role or MessageRole.ASSISTANT
content_delta = delta.content or ""
content += content_delta
blocks.append(TextBlock(text=content))
additional_kwargs = {}
if is_function:
tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
if tool_calls:
additional_kwargs["tool_calls"] = tool_calls
for tool_call in tool_calls:
if tool_call.function:
blocks.append(
ToolCallBlock(
tool_call_id=tool_call.id,
tool_kwargs=tool_call.function.arguments or {},
tool_name=tool_call.function.name or "",
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
@llm_retry_decorator
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
client = self._get_client()
all_kwargs = self._get_model_kwargs(**kwargs)
self._update_max_tokens(all_kwargs, prompt)
if self.reuse_client:
response = client.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
else:
with client:
response = client.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
text = response.choices[0].text
openai_completion_logprobs = response.choices[0].logprobs
logprobs = None
if openai_completion_logprobs:
logprobs = from_openai_completion_logprobs(openai_completion_logprobs)
return CompletionResponse(
text=text,
raw=response,
logprobs=logprobs,
additional_kwargs=self._get_response_token_counts(response),
)
@llm_retry_decorator
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
client = self._get_client()
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
self._update_max_tokens(all_kwargs, prompt)
def gen() -> CompletionResponseGen:
text = ""
for response in client.completions.create(
prompt=prompt,
**all_kwargs,
):
if len(response.choices) > 0:
delta = response.choices[0].text
if delta is None:
delta = ""
else:
delta = ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _update_max_tokens(self, all_kwargs: Dict[str, Any], prompt: str) -> None:
"""Infer max_tokens for the payload, if possible."""
if self.max_tokens is not None or self._tokenizer is None:
return
# NOTE: non-chat completion endpoint requires max_tokens to be set
num_tokens = len(self._tokenizer.encode(prompt))
max_tokens = self.metadata.context_window - num_tokens
if max_tokens <= 0:
raise ValueError(
f"The prompt has {num_tokens} tokens, which is too long for"
" the model. Please use a prompt that fits within"
f" {self.metadata.context_window} tokens."
)
all_kwargs["max_tokens"] = max_tokens
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if hasattr(raw_response, "usage"):
try:
prompt_tokens = raw_response.usage.prompt_tokens
completion_tokens = raw_response.usage.completion_tokens
total_tokens = raw_response.usage.total_tokens
except AttributeError:
return {}
elif isinstance(raw_response, dict):
usage = raw_response.get("usage", {})
# NOTE: other model providers that use the OpenAI client may not report usage
if usage is None:
return {}
# Backwards compatibility with old dict type
prompt_tokens = usage.get("prompt_tokens", 0)
completion_tokens = usage.get("completion_tokens", 0)
total_tokens = usage.get("total_tokens", 0)
else:
return {}
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": total_tokens,
}
# ===== Async Endpoints =====
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
achat_fn: Callable[..., Awaitable[ChatResponse]]
if self._use_chat_completions(kwargs):
achat_fn = self._achat
else:
achat_fn = acompletion_to_chat_decorator(self._acomplete)
return await achat_fn(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]]
if self._use_chat_completions(kwargs):
astream_chat_fn = self._astream_chat
else:
astream_chat_fn = astream_completion_to_chat_decorator(
self._astream_complete
)
return await astream_chat_fn(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self.modalities and "audio" in self.modalities:
raise ValueError(
"Audio is not supported for completion. Use chat/achat instead."
)
if self._use_chat_completions(kwargs):
acomplete_fn = achat_to_completion_decorator(self._achat)
else:
acomplete_fn = self._acomplete
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._use_chat_completions(kwargs):
astream_complete_fn = astream_chat_to_completion_decorator(
self._astream_chat
)
else:
astream_complete_fn = self._astream_complete
return await astream_complete_fn(prompt, **kwargs)
@llm_retry_decorator
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
aclient = self._get_aclient()
message_dicts = to_openai_message_dicts(
messages,
model=self.model,
)
if self.reuse_client:
response = await aclient.chat.completions.create(
messages=message_dicts, stream=False, **self._get_model_kwargs(**kwargs)
)
else:
async with aclient:
response = await aclient.chat.completions.create(
messages=message_dicts,
stream=False,
**self._get_model_kwargs(**kwargs),
)
openai_message = response.choices[0].message
message = from_openai_message(
openai_message, modalities=self.modalities or ["text"]
)
openai_token_logprobs = response.choices[0].logprobs
logprobs = None
if openai_token_logprobs and openai_token_logprobs.content:
logprobs = from_openai_token_logprobs(openai_token_logprobs.content)
return ChatResponse(
message=message,
raw=response,
logprobs=logprobs,
additional_kwargs=self._get_response_token_counts(response),
)
@llm_retry_decorator
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if self.modalities and "audio" in self.modalities:
raise ValueError("Audio is not supported for chat streaming")
aclient = self._get_aclient()
message_dicts = to_openai_message_dicts(
messages,
model=self.model,
)
async def gen() -> ChatResponseAsyncGen:
content = ""
tool_calls: List[ChoiceDeltaToolCall] = []
is_function = False
first_chat_chunk = True
async for response in await aclient.chat.completions.create(
messages=message_dicts,
**self._get_model_kwargs(stream=True, **kwargs),
):
blocks = []
response = cast(ChatCompletionChunk, response)
if len(response.choices) > 0:
# check if the first chunk has neither content nor tool_calls
# this happens when 1106 models end up calling multiple tools
if (
first_chat_chunk
and response.choices[0].delta.content is None
and response.choices[0].delta.tool_calls is None
):
first_chat_chunk = False
continue
delta = response.choices[0].delta
else:
delta = ChoiceDelta()
first_chat_chunk = False
if delta is None:
continue
# check if this chunk is the start of a function call
if delta.tool_calls:
is_function = True
# update using deltas
role = delta.role or MessageRole.ASSISTANT
content_delta = delta.content or ""
content += content_delta
blocks.append(TextBlock(text=content))
additional_kwargs = {}
if is_function:
tool_calls = update_tool_calls(tool_calls, delta.tool_calls)
if tool_calls:
additional_kwargs["tool_calls"] = tool_calls
for tool_call in tool_calls:
if tool_call.function:
blocks.append(
ToolCallBlock(
tool_call_id=tool_call.id,
tool_kwargs=tool_call.function.arguments or {},
tool_name=tool_call.function.name or "",
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
@llm_retry_decorator
async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
aclient = self._get_aclient()
all_kwargs = self._get_model_kwargs(**kwargs)
self._update_max_tokens(all_kwargs, prompt)
if self.reuse_client:
response = await aclient.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
else:
async with aclient:
response = await aclient.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
text = response.choices[0].text
openai_completion_logprobs = response.choices[0].logprobs
logprobs = None
if openai_completion_logprobs:
logprobs = from_openai_completion_logprobs(openai_completion_logprobs)
return CompletionResponse(
text=text,
raw=response,
logprobs=logprobs,
additional_kwargs=self._get_response_token_counts(response),
)
@llm_retry_decorator
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
aclient = self._get_aclient()
all_kwargs = self._get_model_kwargs(stream=True, **kwargs)
self._update_max_tokens(all_kwargs, prompt)
async def gen() -> CompletionResponseAsyncGen:
text = ""
async for response in await aclient.completions.create(
prompt=prompt,
**all_kwargs,
):
if len(response.choices) > 0:
delta = response.choices[0].text
if delta is None:
delta = ""
else:
delta = ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _prepare_chat_with_tools(
self,
tools: Sequence["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
tool_choice: Optional[Union[str, dict]] = None,
strict: Optional[bool] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Predict and call the tool."""
tool_specs = [
tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools
]
# if strict is passed in, use, else default to the class-level attribute, else default to True`
if strict is not None:
strict = strict
else:
strict = self.strict
if self.metadata.is_function_calling_model:
for tool_spec in tool_specs:
if tool_spec["type"] == "function":
tool_spec["function"]["strict"] = strict
# in current openai 1.40.0 it is always false.
tool_spec["function"]["parameters"]["additionalProperties"] = False
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
return {
"messages": messages,
"tools": tool_specs or None,
"tool_choice": resolve_tool_choice(tool_choice, tool_required)
if tool_specs
else None,
**kwargs,
}
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: Sequence["BaseTool"],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = [
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
if tool_calls:
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
# this should handle both complete and partial jsons
try:
if isinstance(tool_call.tool_kwargs, str):
argument_dict = parse_partial_json(tool_call.tool_kwargs)
else:
argument_dict = tool_call.tool_kwargs
except (ValueError, TypeError, JSONDecodeError):
argument_dict = {}
tool_selections.append(
ToolSelection(
tool_id=tool_call.tool_call_id or "",
tool_name=tool_call.tool_name,
tool_kwargs=argument_dict,
)
)
return tool_selections
else: # keep it backward-compatible
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
if tool_call.type != "function":
raise ValueError("Invalid tool type. Unsupported by OpenAI llm")
# this should handle both complete and partial jsons
try:
argument_dict = parse_partial_json(tool_call.function.arguments)
except (ValueError, TypeError, JSONDecodeError):
argument_dict = {}
tool_selections.append(
ToolSelection(
tool_id=tool_call.id,
tool_name=tool_call.function.name,
tool_kwargs=argument_dict,
)
)
return tool_selections
def _prepare_schema(
self, llm_kwargs: Optional[Dict[str, Any]], output_cls: Type[Model]
) -> Dict[str, Any]:
from openai.resources.chat.completions.completions import (
_type_to_response_format,
)
llm_kwargs = llm_kwargs or {}
llm_kwargs["response_format"] = _type_to_response_format(output_cls)
if "tool_choice" in llm_kwargs:
del llm_kwargs["tool_choice"]
return llm_kwargs
def _should_use_structure_outputs(self) -> bool:
return (
self.pydantic_program_mode == PydanticProgramMode.DEFAULT
and is_json_schema_supported(self.model)
)
@dispatcher.span
def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
"""Structured predict."""
llm_kwargs = llm_kwargs or {}
if self._should_use_structure_outputs():
messages = self._extend_messages(prompt.format_messages(**prompt_args))
llm_kwargs = self._prepare_schema(llm_kwargs, output_cls)
response = self.chat(messages, **llm_kwargs)
return output_cls.model_validate_json(str(response.message.content))
# when uses function calling to extract structured outputs
# here we force tool_choice to be required
llm_kwargs["tool_choice"] = (
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
)
return super().structured_predict(
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
)
@dispatcher.span
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
"""Structured predict."""
llm_kwargs = llm_kwargs or {}
if self._should_use_structure_outputs():
messages = self._extend_messages(prompt.format_messages(**prompt_args))
llm_kwargs = self._prepare_schema(llm_kwargs, output_cls)
response = await self.achat(messages, **llm_kwargs)
return output_cls.model_validate_json(str(response.message.content))
# when uses function calling to extract structured outputs
# here we force tool_choice to be required
llm_kwargs["tool_choice"] = (
"required" if "tool_choice" not in llm_kwargs else llm_kwargs["tool_choice"]
)
return await super().astructured_predict(
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
)
def _structured_stream_call(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Generator[
Union[Model, List[Model], "FlexibleModel", List["FlexibleModel"]], None, None
]:
if self._should_use_structure_outputs():
from llama_index.core.program.streaming_utils import (
process_streaming_content_incremental,
)
messages = self._extend_messages(prompt.format_messages(**prompt_args))
llm_kwargs = self._prepare_schema(llm_kwargs, output_cls)
curr = None
for response in self.stream_chat(messages, **llm_kwargs):
curr = process_streaming_content_incremental(response, output_cls, curr)
yield curr
else:
llm_kwargs["tool_choice"] = (
"required"
if "tool_choice" not in llm_kwargs
else llm_kwargs["tool_choice"]
)
yield from super()._structured_stream_call(
output_cls, prompt, llm_kwargs, **prompt_args
)
async def _structured_astream_call(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> AsyncGenerator[
Union[Model, List[Model], "FlexibleModel", List["FlexibleModel"]], None
]:
if self._should_use_structure_outputs():
async def gen(
llm_kwargs=llm_kwargs,
) -> AsyncGenerator[
Union[Model, List[Model], FlexibleModel, List[FlexibleModel]], None
]:
from llama_index.core.program.streaming_utils import (
process_streaming_content_incremental,
)
messages = self._extend_messages(prompt.format_messages(**prompt_args))
llm_kwargs = self._prepare_schema(llm_kwargs, output_cls)
curr = None
async for response in await self.astream_chat(messages, **llm_kwargs):
curr = process_streaming_content_incremental(
response, output_cls, curr
)
yield curr
return gen()
else:
llm_kwargs["tool_choice"] = (
"required"
if "tool_choice" not in llm_kwargs
else llm_kwargs["tool_choice"]
)
return await super()._structured_astream_call(
output_cls, prompt, llm_kwargs, **prompt_args
)
@dispatcher.span
def stream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Generator[Union[Model, FlexibleModel], None, None]:
"""Stream structured predict."""
llm_kwargs = llm_kwargs or {}
return super().stream_structured_predict(
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
)
@dispatcher.span
async def astream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> AsyncGenerator[Union[Model, FlexibleModel], None]:
"""Stream structured predict."""
llm_kwargs = llm_kwargs or {}
return await super().astream_structured_predict(
output_cls, prompt, llm_kwargs=llm_kwargs, **prompt_args
)
| OpenAI |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 699,
"end": 834
} | class ____(ASTNode):
func: ASTNode
args: List[ASTNode]
keyword: Optional[ASTNode]
keywords: List[ASTNode]
@dataclass
| Call |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 93965,
"end": 94720
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
output_link: Optional[str] = Field(
None, description="The link to find the output results."
)
query_text: Optional[str] = Field(
None,
description=(
"The text of the SQL query. Can Run permission of the SQL query is required"
" to view this field."
),
)
sql_statements: Optional[SqlStatementOutput] = Field(
None, description="Information about SQL statements executed in the run."
)
warehouse_id: Optional[str] = Field(
None, description="The canonical identifier of the SQL warehouse."
)
| SqlQueryOutput |
python | kamyu104__LeetCode-Solutions | Python/coupon-code-validator.py | {
"start": 79,
"end": 698
} | class ____(object):
def validateCoupons(self, code, businessLine, isActive):
"""
:type code: List[str]
:type businessLine: List[str]
:type isActive: List[bool]
:rtype: List[str]
"""
LOOKUP = {"electronics":0, "grocery":1, "pharmacy":2, "restaurant":3}
sorted_codes = []
for c, b, a in itertools.izip(code, businessLine, isActive):
if a and c and b in LOOKUP and all(x.isalnum() or x == '_' for x in c):
sorted_codes.append((LOOKUP[b], c))
sorted_codes.sort()
return [c for _, c in sorted_codes]
| Solution |
python | coleifer__peewee | tests/fields.py | {
"start": 31369,
"end": 33115
} | class ____(ModelTestCase):
requires = [UUIDModel]
def test_uuid_field(self):
uu = uuid.uuid4()
u = UUIDModel.create(data=uu)
u_db = UUIDModel.get(UUIDModel.id == u.id)
self.assertEqual(u_db.data, uu)
self.assertTrue(u_db.bdata is None)
u_db2 = UUIDModel.get(UUIDModel.data == uu)
self.assertEqual(u_db2.id, u.id)
# Verify we can use hex string.
uu = uuid.uuid4()
u = UUIDModel.create(data=uu.hex)
u_db = UUIDModel.get(UUIDModel.data == uu.hex)
self.assertEqual(u.id, u_db.id)
self.assertEqual(u_db.data, uu)
# Verify we can use raw binary representation.
uu = uuid.uuid4()
u = UUIDModel.create(data=uu.bytes)
u_db = UUIDModel.get(UUIDModel.data == uu.bytes)
self.assertEqual(u.id, u_db.id)
self.assertEqual(u_db.data, uu)
def test_binary_uuid_field(self):
uu = uuid.uuid4()
u = UUIDModel.create(bdata=uu)
u_db = UUIDModel.get(UUIDModel.id == u.id)
self.assertEqual(u_db.bdata, uu)
self.assertTrue(u_db.data is None)
u_db2 = UUIDModel.get(UUIDModel.bdata == uu)
self.assertEqual(u_db2.id, u.id)
# Verify we can use hex string.
uu = uuid.uuid4()
u = UUIDModel.create(bdata=uu.hex)
u_db = UUIDModel.get(UUIDModel.bdata == uu.hex)
self.assertEqual(u.id, u_db.id)
self.assertEqual(u_db.bdata, uu)
# Verify we can use raw binary representation.
uu = uuid.uuid4()
u = UUIDModel.create(bdata=uu.bytes)
u_db = UUIDModel.get(UUIDModel.bdata == uu.bytes)
self.assertEqual(u.id, u_db.id)
self.assertEqual(u_db.bdata, uu)
| TestUUIDField |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 13497,
"end": 14232
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_duplicate"
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
# Only one block compressed, the second identical one was skipped.
self.assertEqual(1, count)
# Only 1 <script> block in returned result as well.
self.assertEqual([self._render_script("822ac7501287")], result)
rendered_template = self._render_template(engine)
# But rendering the template returns both (identical) scripts.
self.assertEqual(rendered_template, self._render_result(result * 2, ""))
| OfflineCompressSkipDuplicatesTestCase |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 96109,
"end": 105056
} | class ____(nn.Module):
"""
Decoder consisting of a projection layer, a set of `DFineEncoder`, a top-down Feature Pyramid Network
(FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069
Args:
config: DFineConfig
"""
def __init__(self, config: DFineConfig):
super().__init__()
self.config = config
self.in_channels = config.encoder_in_channels
self.num_fpn_stages = len(self.in_channels) - 1
self.feat_strides = config.feat_strides
self.encoder_hidden_dim = config.encoder_hidden_dim
self.encode_proj_layers = config.encode_proj_layers
self.positional_encoding_temperature = config.positional_encoding_temperature
self.eval_size = config.eval_size
self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels]
self.out_strides = self.feat_strides
# encoder transformer
self.encoder = nn.ModuleList([DFineEncoder(config) for _ in range(len(self.encode_proj_layers))])
# top-down fpn
self.lateral_convs = nn.ModuleList()
self.fpn_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1, 0, -1):
lateral_layer = DFineConvNormLayer(config, self.encoder_hidden_dim, self.encoder_hidden_dim, 1, 1)
self.lateral_convs.append(lateral_layer)
num_blocks = round(3 * config.depth_mult)
fpn_layer = DFineRepNCSPELAN4(config, numb_blocks=num_blocks)
self.fpn_blocks.append(fpn_layer)
# bottom-up pan
self.downsample_convs = nn.ModuleList()
self.pan_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1):
self.downsample_convs.append(DFineSCDown(config, 3, 2))
num_blocks = round(3 * config.depth_mult)
self.pan_blocks.append(DFineRepNCSPELAN4(config, numb_blocks=num_blocks))
@staticmethod
def build_2d_sincos_position_embedding(
width, height, embed_dim=256, temperature=10000.0, device="cpu", dtype=torch.float32
):
grid_w = torch.arange(torch_int(width), device=device).to(dtype)
grid_h = torch.arange(torch_int(height), device=device).to(dtype)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="xy")
if embed_dim % 4 != 0:
raise ValueError("Embed dimension must be divisible by 4 for 2D sin-cos position embedding")
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, device=device).to(dtype) / pos_dim
omega = 1.0 / (temperature**omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.concat([out_h.sin(), out_h.cos(), out_w.sin(), out_w.cos()], dim=1)[None, :, :]
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# encoder
if self.config.encoder_layers > 0:
for i, enc_ind in enumerate(self.encode_proj_layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states[enc_ind],)
height, width = hidden_states[enc_ind].shape[2:]
# flatten [batch, channel, height, width] to [batch, height*width, channel]
src_flatten = hidden_states[enc_ind].flatten(2).permute(0, 2, 1)
if self.training or self.eval_size is None:
pos_embed = self.build_2d_sincos_position_embedding(
width,
height,
self.encoder_hidden_dim,
self.positional_encoding_temperature,
device=src_flatten.device,
dtype=src_flatten.dtype,
)
else:
pos_embed = None
layer_outputs = self.encoder[i](
src_flatten,
pos_embed=pos_embed,
output_attentions=output_attentions,
)
hidden_states[enc_ind] = (
layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous()
)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states[enc_ind],)
# top-down FPN
fpn_feature_maps = [hidden_states[-1]]
for idx, (lateral_conv, fpn_block) in enumerate(zip(self.lateral_convs, self.fpn_blocks)):
backbone_feature_map = hidden_states[self.num_fpn_stages - idx - 1]
top_fpn_feature_map = fpn_feature_maps[-1]
# apply lateral block
top_fpn_feature_map = lateral_conv(top_fpn_feature_map)
fpn_feature_maps[-1] = top_fpn_feature_map
# apply fpn block
top_fpn_feature_map = F.interpolate(top_fpn_feature_map, scale_factor=2.0, mode="nearest")
fused_feature_map = torch.concat([top_fpn_feature_map, backbone_feature_map], dim=1)
new_fpn_feature_map = fpn_block(fused_feature_map)
fpn_feature_maps.append(new_fpn_feature_map)
fpn_feature_maps.reverse()
# bottom-up PAN
pan_feature_maps = [fpn_feature_maps[0]]
for idx, (downsample_conv, pan_block) in enumerate(zip(self.downsample_convs, self.pan_blocks)):
top_pan_feature_map = pan_feature_maps[-1]
fpn_feature_map = fpn_feature_maps[idx + 1]
downsampled_feature_map = downsample_conv(top_pan_feature_map)
fused_feature_map = torch.concat([downsampled_feature_map, fpn_feature_map], dim=1)
new_pan_feature_map = pan_block(fused_feature_map)
pan_feature_maps.append(new_pan_feature_map)
if not return_dict:
return tuple(v for v in [pan_feature_maps, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=pan_feature_maps, hidden_states=encoder_states, attentions=all_attentions
)
__all__ = ["DFineModel", "DFinePreTrainedModel", "DFineForObjectDetection"]
| DFineHybridEncoder |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/postgres/models.py | {
"start": 497,
"end": 1032
} | class ____(Model):
string = models.CharField(max_length=MAX_INDEXED_COLUMN_LENGTH)
organization_id = BoundedBigIntegerField()
date_added = models.DateTimeField(default=timezone.now)
last_seen = models.DateTimeField(default=timezone.now, db_index=True)
retention_days = models.IntegerField(default=90)
objects: ClassVar[BaseManager[Self]] = BaseManager(
cache_fields=("pk",), cache_ttl=settings.SENTRY_METRICS_INDEXER_CACHE_TTL
)
class Meta:
abstract = True
@region_silo_model
| BaseIndexer |
python | facebook__pyre-check | tools/incremental_test/batch.py | {
"start": 1279,
"end": 1952
} | class ____(RunnerResult):
_trace: str
def __init__(self, input: Specification, trace: str) -> None:
super().__init__(input)
self._trace = trace
def get_status(self) -> str:
return "exception"
def to_json(self, dont_show_discrepancy: bool) -> Dict[str, Any]:
return {"status": self.get_status(), "trace": self._trace}
def to_logger_sample(self) -> Sample:
return Sample(
normals={
"status": self.get_status(),
"input": json.dumps(self.input.to_json()),
"exception": self._trace,
},
integers={},
)
| ExceptionalRunnerResult |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 6304,
"end": 6455
} | class ____(BaseModel):
"""Represents an edge in the GitHub GraphQL pull requests query."""
cursor: str
node: PullRequestNode
| PullRequestEdge |
python | pydata__xarray | xarray/tests/test_indexing.py | {
"start": 1141,
"end": 9415
} | class ____:
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self) -> None:
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
arr = ReturnItem()
for i in [
arr[:],
arr[...],
arr[0, :, 10],
arr[..., 10],
arr[:5, ..., 0],
arr[..., 0, :],
arr[y],
arr[y, y],
arr[..., y, y],
arr[..., 0, 1, 2, 3, 4],
]:
j = indexing.expanded_indexer(i, x.ndim)
assert_array_equal(x[i], x[j])
assert_array_equal(self.set_to_zero(x, i), self.set_to_zero(x, j))
with pytest.raises(IndexError, match=r"too many indices"):
indexing.expanded_indexer(arr[1, 2, 3], 2)
def test_stacked_multiindex_min_max(self) -> None:
data = np.random.randn(3, 23, 4)
da = DataArray(
data,
name="value",
dims=["replicate", "rsample", "exp"],
coords=dict(
replicate=[0, 1, 2], exp=["a", "b", "c", "d"], rsample=list(range(23))
),
)
da2 = da.stack(sample=("replicate", "rsample"))
s = da2.sample
assert_array_equal(da2.loc["a", s.max()], data[2, 22, 0])
assert_array_equal(da2.loc["b", s.min()], data[0, 0, 1])
def test_group_indexers_by_index(self) -> None:
mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
data = DataArray(
np.zeros((4, 2, 2)), coords={"x": mindex, "y": [1, 2]}, dims=("x", "y", "z")
)
data.coords["y2"] = ("y", [2.0, 3.0])
grouped_indexers = indexing.group_indexers_by_index(
data, {"z": 0, "one": "a", "two": 1, "y": 0}, {}
)
for idx, indexers in grouped_indexers:
if idx is None:
assert indexers == {"z": 0}
elif idx.equals(data.xindexes["x"]):
assert indexers == {"one": "a", "two": 1}
elif idx.equals(data.xindexes["y"]):
assert indexers == {"y": 0}
assert len(grouped_indexers) == 3
with pytest.raises(KeyError, match=r"no index found for coordinate 'y2'"):
indexing.group_indexers_by_index(data, {"y2": 2.0}, {})
with pytest.raises(
KeyError, match=r"'w' is not a valid dimension or coordinate"
):
indexing.group_indexers_by_index(data, {"w": "a"}, {})
with pytest.raises(ValueError, match=r"cannot supply.*"):
indexing.group_indexers_by_index(data, {"z": 1}, {"method": "nearest"})
def test_map_index_queries(self) -> None:
def create_sel_results(
x_indexer,
x_index,
other_vars,
drop_coords,
drop_indexes,
rename_dims,
):
dim_indexers = {"x": x_indexer}
index_vars = x_index.create_variables()
indexes = dict.fromkeys(index_vars, x_index)
variables = {}
variables.update(index_vars)
variables.update(other_vars)
return indexing.IndexSelResult(
dim_indexers=dim_indexers,
indexes=indexes,
variables=variables,
drop_coords=drop_coords,
drop_indexes=drop_indexes,
rename_dims=rename_dims,
)
def test_indexer(
data: T_Xarray,
x: Any,
expected: indexing.IndexSelResult,
) -> None:
results = indexing.map_index_queries(data, {"x": x})
assert results.dim_indexers.keys() == expected.dim_indexers.keys()
assert_array_equal(results.dim_indexers["x"], expected.dim_indexers["x"])
assert results.indexes.keys() == expected.indexes.keys()
for k in results.indexes:
assert results.indexes[k].equals(expected.indexes[k])
assert results.variables.keys() == expected.variables.keys()
for k in results.variables:
assert_array_equal(results.variables[k], expected.variables[k])
assert set(results.drop_coords) == set(expected.drop_coords)
assert set(results.drop_indexes) == set(expected.drop_indexes)
assert results.rename_dims == expected.rename_dims
data = Dataset({"x": ("x", [1, 2, 3])})
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = DataArray(range(8), [("x", mindex)])
test_indexer(data, 1, indexing.IndexSelResult({"x": 0}))
test_indexer(data, np.int32(1), indexing.IndexSelResult({"x": 0}))
test_indexer(data, Variable([], 1), indexing.IndexSelResult({"x": 0}))
test_indexer(mdata, ("a", 1, -1), indexing.IndexSelResult({"x": 0}))
expected = create_sel_results(
[True, True, False, False, False, False, False, False],
PandasIndex(pd.Index([-1, -2]), "three"),
{"one": Variable((), "a"), "two": Variable((), 1)},
["x"],
["one", "two"],
{"x": "three"},
)
test_indexer(mdata, ("a", 1), expected)
expected = create_sel_results(
slice(0, 4, None),
PandasMultiIndex(
pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")),
"x",
),
{"one": Variable((), "a")},
[],
["one"],
{},
)
test_indexer(mdata, "a", expected)
expected = create_sel_results(
[True, True, True, True, False, False, False, False],
PandasMultiIndex(
pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")),
"x",
),
{"one": Variable((), "a")},
[],
["one"],
{},
)
test_indexer(mdata, ("a",), expected)
test_indexer(
mdata, [("a", 1, -1), ("b", 2, -2)], indexing.IndexSelResult({"x": [0, 7]})
)
test_indexer(
mdata, slice("a", "b"), indexing.IndexSelResult({"x": slice(0, 8, None)})
)
test_indexer(
mdata,
slice(("a", 1), ("b", 1)),
indexing.IndexSelResult({"x": slice(0, 6, None)}),
)
test_indexer(
mdata,
{"one": "a", "two": 1, "three": -1},
indexing.IndexSelResult({"x": 0}),
)
expected = create_sel_results(
[True, True, False, False, False, False, False, False],
PandasIndex(pd.Index([-1, -2]), "three"),
{"one": Variable((), "a"), "two": Variable((), 1)},
["x"],
["one", "two"],
{"x": "three"},
)
test_indexer(mdata, {"one": "a", "two": 1}, expected)
expected = create_sel_results(
[True, False, True, False, False, False, False, False],
PandasIndex(pd.Index([1, 2]), "two"),
{"one": Variable((), "a"), "three": Variable((), -1)},
["x"],
["one", "three"],
{"x": "two"},
)
test_indexer(mdata, {"one": "a", "three": -1}, expected)
expected = create_sel_results(
[True, True, True, True, False, False, False, False],
PandasMultiIndex(
pd.MultiIndex.from_product([[1, 2], [-1, -2]], names=("two", "three")),
"x",
),
{"one": Variable((), "a")},
[],
["one"],
{},
)
test_indexer(mdata, {"one": "a"}, expected)
def test_read_only_view(self) -> None:
arr = DataArray(
np.random.rand(3, 3),
coords={"x": np.arange(3), "y": np.arange(3)},
dims=("x", "y"),
) # Create a 2D DataArray
arr = arr.expand_dims({"z": 3}, -1) # New dimension 'z'
arr["z"] = np.arange(3) # New coords to dimension 'z'
with pytest.raises(ValueError, match=r"Do you want to .copy()"):
arr.loc[0, 0, 0] = 999
| TestIndexers |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 271234,
"end": 272775
} | class ____(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def _warn_on_const_assignment(self, lhs, rhs):
rhs_t = rhs.type
lhs_t = lhs.type
if rhs_t.is_ptr and rhs_t.base_type.is_const and lhs_t.is_ptr and not lhs_t.base_type.is_const:
warning(self.pos, "Assigning to '{}' from '{}' discards const qualifier".format(lhs_t, rhs_t), level=1)
def _check_const_assignment(self, node):
if isinstance(node, AssignmentNode):
self._warn_on_const_assignment(node.lhs, node.rhs)
def analyse_expressions(self, env):
node = self.analyse_types(env)
self._check_const_assignment(node)
if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode):
if node.rhs.type.is_unowned_view and node.rhs.is_ephemeral():
error(self.pos, "Storing unsafe C derivative of temporary Python reference")
return node
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
| AssignmentNode |
python | kamyu104__LeetCode-Solutions | Python/find-the-power-of-k-size-subarrays-ii.py | {
"start": 60,
"end": 524
} | class ____(object):
def resultsArray(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
result = [-1]*(len(nums)-k+1)
left = 0
for right in xrange(len(nums)):
if nums[right]-nums[left] != right-left:
left = right
if right-left+1 == k:
result[left] = nums[right]
left += 1
return result
| Solution |
python | langchain-ai__langchain | libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py | {
"start": 256,
"end": 537
} | class ____(BaseStoreSyncTests[str]):
@pytest.fixture
@override
def three_values(self) -> tuple[str, str, str]:
return "foo", "bar", "buzz"
@pytest.fixture
@override
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
| TestInMemoryStore |
python | python__mypy | mypy/type_visitor.py | {
"start": 14496,
"end": 19975
} | class ____(SyntheticTypeVisitor[bool]):
"""Visitor for performing recursive queries of types with a bool result.
Use TypeQuery if you need non-bool results.
'strategy' is used to combine results for a series of types. It must
be ANY_STRATEGY or ALL_STRATEGY.
Note: This visitor keeps an internal state (tracks type aliases to avoid
recursion), so it should *never* be reused for querying different types
unless you call reset() first.
"""
def __init__(self, strategy: int) -> None:
self.strategy = strategy
if strategy == ANY_STRATEGY:
self.default = False
else:
assert strategy == ALL_STRATEGY
self.default = True
# Keep track of the type aliases already visited. This is needed to avoid
# infinite recursion on types like A = Union[int, List[A]]. An empty set is
# represented as None as a micro-optimization.
self.seen_aliases: set[TypeAliasType] | None = None
# By default, we eagerly expand type aliases, and query also types in the
# alias target. In most cases this is a desired behavior, but we may want
# to skip targets in some cases (e.g. when collecting type variables).
self.skip_alias_target = False
def reset(self) -> None:
"""Clear mutable state (but preserve strategy).
This *must* be called if you want to reuse the visitor.
"""
self.seen_aliases = None
def visit_unbound_type(self, t: UnboundType, /) -> bool:
return self.query_types(t.args)
def visit_type_list(self, t: TypeList, /) -> bool:
return self.query_types(t.items)
def visit_callable_argument(self, t: CallableArgument, /) -> bool:
return t.typ.accept(self)
def visit_any(self, t: AnyType, /) -> bool:
return self.default
def visit_uninhabited_type(self, t: UninhabitedType, /) -> bool:
return self.default
def visit_none_type(self, t: NoneType, /) -> bool:
return self.default
def visit_erased_type(self, t: ErasedType, /) -> bool:
return self.default
def visit_deleted_type(self, t: DeletedType, /) -> bool:
return self.default
def visit_type_var(self, t: TypeVarType, /) -> bool:
return self.query_types([t.upper_bound, t.default] + t.values)
def visit_param_spec(self, t: ParamSpecType, /) -> bool:
return self.query_types([t.upper_bound, t.default, t.prefix])
def visit_type_var_tuple(self, t: TypeVarTupleType, /) -> bool:
return self.query_types([t.upper_bound, t.default])
def visit_unpack_type(self, t: UnpackType, /) -> bool:
return self.query_types([t.type])
def visit_parameters(self, t: Parameters, /) -> bool:
return self.query_types(t.arg_types)
def visit_partial_type(self, t: PartialType, /) -> bool:
return self.default
def visit_instance(self, t: Instance, /) -> bool:
return self.query_types(t.args)
def visit_callable_type(self, t: CallableType, /) -> bool:
# FIX generics
# Avoid allocating any objects here as an optimization.
args = self.query_types(t.arg_types)
ret = t.ret_type.accept(self)
if self.strategy == ANY_STRATEGY:
return args or ret
else:
return args and ret
def visit_tuple_type(self, t: TupleType, /) -> bool:
return self.query_types([t.partial_fallback] + t.items)
def visit_typeddict_type(self, t: TypedDictType, /) -> bool:
return self.query_types(list(t.items.values()))
def visit_raw_expression_type(self, t: RawExpressionType, /) -> bool:
return self.default
def visit_literal_type(self, t: LiteralType, /) -> bool:
return self.default
def visit_union_type(self, t: UnionType, /) -> bool:
return self.query_types(t.items)
def visit_overloaded(self, t: Overloaded, /) -> bool:
return self.query_types(t.items) # type: ignore[arg-type]
def visit_type_type(self, t: TypeType, /) -> bool:
return t.item.accept(self)
def visit_ellipsis_type(self, t: EllipsisType, /) -> bool:
return self.default
def visit_placeholder_type(self, t: PlaceholderType, /) -> bool:
return self.query_types(t.args)
def visit_type_alias_type(self, t: TypeAliasType, /) -> bool:
if self.skip_alias_target:
return self.query_types(t.args)
# Skip type aliases already visited types to avoid infinite recursion
# (also use this as a simple-minded cache).
if self.seen_aliases is None:
self.seen_aliases = set()
elif t in self.seen_aliases:
return self.default
self.seen_aliases.add(t)
return get_proper_type(t).accept(self)
def query_types(self, types: list[Type] | tuple[Type, ...]) -> bool:
"""Perform a query for a sequence of types using the strategy to combine the results."""
# Special-case for lists and tuples to allow mypyc to produce better code.
if isinstance(types, list):
if self.strategy == ANY_STRATEGY:
return any(t.accept(self) for t in types)
else:
return all(t.accept(self) for t in types)
else:
if self.strategy == ANY_STRATEGY:
return any(t.accept(self) for t in types)
else:
return all(t.accept(self) for t in types)
| BoolTypeQuery |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_split_op_test.py | {
"start": 1032,
"end": 16011
} | class ____(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
[2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
))
def _SparseTensor_4x6_empty(self, val_dtype=np.int64):
ind = np.empty(shape=(0, 2), dtype=np.int64)
val = np.array([]).astype(val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def testSplitMatrixRows(self):
for axis in (0, -2):
sp_tensors = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=2, axis=axis))
self.assertAllEqual(len(sp_tensors), 2)
self.assertAllEqual(
sp_tensors[0].indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
self.assertAllEqual(sp_tensors[0].values, [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensors[0].dense_shape, [2, 6])
self.assertAllEqual(
sp_tensors[1].indices,
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
self.assertAllEqual(sp_tensors[1].values, [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors[1].dense_shape, [2, 6])
def testSplitMatrixUnevenCols(self):
for axis in (1, -1):
sp_tensors_3 = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=axis))
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(
sp_tensors_3[0].indices,
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
self.assertAllEqual(sp_tensors_3[0].values, [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensors_3[0].dense_shape, [5, 3])
self.assertAllEqual(sp_tensors_3[1].indices,
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[1].values, [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensors_3[1].dense_shape, [5, 2])
self.assertAllEqual(sp_tensors_3[2].indices,
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[2].values, [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape, [5, 2])
sp_tensors_4 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=4, axis=axis)
self.assertAllEqual(len(sp_tensors_4), 4)
self.assertAllEqual(sp_tensors_4[0].indices,
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_4[0].values, [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensors_4[0].dense_shape, [5, 2])
self.assertAllEqual(sp_tensors_4[1].indices,
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensors_4[1].values, [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensors_4[1].dense_shape, [5, 2])
self.assertAllEqual(sp_tensors_4[2].indices,
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensors_4[2].values, [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensors_4[2].dense_shape, [5, 2])
self.assertAllEqual(sp_tensors_4[3].indices, [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensors_4[3].values, [16, 46])
self.assertAllEqual(sp_tensors_4[3].dense_shape, [5, 1])
def testSplitMatrixUnevenRows(self):
for axis in (0, -2):
sp_tensors_2 = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=2, axis=axis))
self.assertAllEqual(sp_tensors_2[0].indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensors_2[0].values,
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensors_2[0].dense_shape, [3, 7])
self.assertAllEqual(
sp_tensors_2[1].indices,
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensors_2[1].values, [30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensors_2[1].dense_shape, [2, 7])
self.assertAllEqual(len(sp_tensors_2), 2)
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=axis)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(
sp_tensors_3[0].indices,
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensors_3[0].values, [0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensors_3[0].dense_shape, [2, 7])
self.assertAllEqual(sp_tensors_3[1].values, [20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors_3[1].dense_shape, [2, 7])
self.assertAllEqual(sp_tensors_3[2].indices, [[0, 1], [0, 4], [0, 6]])
self.assertAllEqual(sp_tensors_3[2].values, [41, 44, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape, [1, 7])
def testSplitAllRows(self):
for axis in (0, -2):
sp_tensors = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=4, axis=axis))
self.assertAllEqual(len(sp_tensors), 4)
self.assertAllEqual(sp_tensors[0].indices,
[[0, 0], [0, 2], [0, 4], [0, 5]])
self.assertAllEqual(sp_tensors[0].values, [0, 2, 4, 5])
self.assertAllEqual(sp_tensors[0].dense_shape, [1, 6])
self.assertAllEqual(sp_tensors[1].indices, [[0, 1], [0, 3], [0, 4]])
self.assertAllEqual(sp_tensors[1].values, [11, 13, 14])
self.assertAllEqual(sp_tensors[1].dense_shape, [1, 6])
self.assertAllEqual(sp_tensors[2].indices, [[0, 0], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensors[2].values, [20, 23, 25])
self.assertAllEqual(sp_tensors[2].dense_shape, [1, 6])
self.assertAllEqual(sp_tensors[3].indices,
[[0, 0], [0, 2], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensors[3].values, [30, 32, 33, 35])
self.assertAllEqual(sp_tensors[3].dense_shape, [1, 6])
def testSplitColumns(self):
for axis in (1, -1):
sparse_tensors = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=axis))
self.assertAllEqual(len(sparse_tensors), 3)
self.assertAllEqual(sparse_tensors[0].indices,
[[0, 0], [1, 1], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[0].values, [0, 11, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape, [4, 2])
self.assertAllEqual(sparse_tensors[1].indices,
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensors[1].values, [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensors[1].dense_shape, [4, 2])
self.assertAllEqual(sparse_tensors[2].indices,
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensors[2].values, [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensors[2].dense_shape, [4, 2])
def testSplitAllColumns(self):
for axis in (1, -1):
sparse_tensors = self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=6, axis=axis))
self.assertAllEqual(len(sparse_tensors), 6)
self.assertAllEqual(sparse_tensors[0].indices, [[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[0].values, [0, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape, [4, 1])
self.assertAllEqual(sparse_tensors[1].indices, [[1, 0]])
self.assertAllEqual(sparse_tensors[1].values, [11])
self.assertAllEqual(sparse_tensors[1].dense_shape, [4, 1])
self.assertAllEqual(sparse_tensors[2].indices, [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[2].values, [2, 32])
self.assertAllEqual(sparse_tensors[2].dense_shape, [4, 1])
self.assertAllEqual(sparse_tensors[3].indices, [[1, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[3].dense_shape, [4, 1])
self.assertAllEqual(sparse_tensors[3].values, [13, 23, 33])
self.assertAllEqual(sparse_tensors[4].indices, [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensors[4].values, [4, 14])
self.assertAllEqual(sparse_tensors[4].dense_shape, [4, 1])
self.assertAllEqual(sparse_tensors[5].indices, [[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[5].values, [5, 25, 35])
self.assertAllEqual(sparse_tensors[5].dense_shape, [4, 1])
def testSliceConcat(self):
for sp_input in (self._SparseTensorValue_3x4x2(),
self._SparseTensor_3x4x2()):
for axis in (1, -2):
sparse_tensors = sparse_ops.sparse_split(
sp_input=sp_input, num_split=2, axis=axis)
concat_tensor = self.evaluate(
sparse_ops.sparse_concat(1, sparse_tensors))
expected_output = self._SparseTensor_3x4x2()
self.assertAllEqual(concat_tensor.indices, expected_output.indices)
def testInvalidAxis(self):
for axis in (-3, 2):
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'axis should be in range \[-2, 2\)'):
self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=axis))
def testArgumentErrors(self):
with self.assertRaisesRegex(ValueError, 'Keyword arguments are required'):
sparse_ops.sparse_split(3, 2, 1)
with self.assertRaisesRegex(ValueError, 'sp_input is required'):
sparse_ops.sparse_split()
with self.assertRaisesRegex(ValueError, 'num_split is required'):
sparse_ops.sparse_split(sp_input=1)
with self.assertRaisesRegex(ValueError, 'axis is required'):
sparse_ops.sparse_split(num_split=2, sp_input=1)
def testSplitEmpty(self):
sp_empty = self._SparseTensor_4x6_empty()
sparse_splits0 = sparse_ops.sparse_split(
sp_input=sp_empty, num_split=2, axis=0)
sparse_splits1 = sparse_ops.sparse_split(
sp_input=sp_empty, num_split=2, axis=1)
empty_inds = np.empty(shape=(0, 2), dtype=np.int64)
self.assertAllEqual(sparse_splits0[0].indices, empty_inds)
self.assertAllEqual(sparse_splits0[0].values, [])
self.assertAllEqual(sparse_splits0[0].dense_shape, [2, 6])
self.assertAllEqual(sparse_splits0[1].indices, empty_inds)
self.assertAllEqual(sparse_splits0[1].values, [])
self.assertAllEqual(sparse_splits0[1].dense_shape, [2, 6])
self.assertAllEqual(sparse_splits1[0].indices, empty_inds)
self.assertAllEqual(sparse_splits1[0].values, [])
self.assertAllEqual(sparse_splits1[0].dense_shape, [4, 3])
self.assertAllEqual(sparse_splits1[1].indices, empty_inds)
self.assertAllEqual(sparse_splits1[1].values, [])
self.assertAllEqual(sparse_splits1[1].dense_shape, [4, 3])
def testInvalidArgumentError(self):
# Test case for GitHub issue 53660.
axis = [1, 2]
with self.assertRaisesRegex(errors.InvalidArgumentError,
r'axis should be a scalar'):
self.evaluate(
sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=axis))
def testBig(self):
# This test was added after discovering a memory allocation bug in the GPU
# kernel that the existing tests did not catch due to being too small.
for n in [250, 2500, 25000]:
indices = np.zeros([n, 2], dtype=np.int64)
indices[:, 0] = np.arange(n)
values = np.zeros([n], dtype=np.float32)
dense_shape = np.array([n, 3], dtype=np.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, dense_shape)
sp_tensors = self.evaluate(
sparse_ops.sparse_split(sp_input=sp_input, num_split=2, axis=0))
self.assertAllEqual(sp_tensors[0].indices, indices[:n // 2])
self.assertAllEqual(sp_tensors[1].indices, indices[n // 2:] - [n // 2, 0])
self.assertAllEqual(sp_tensors[0].values, values[:n // 2])
self.assertAllEqual(sp_tensors[1].values, values[n // 2:])
self.assertAllEqual(sp_tensors[0].dense_shape, [n // 2, 3])
self.assertAllEqual(sp_tensors[1].dense_shape, [n // 2, 3])
def testSparseIndexOutOfBounds(self):
if test_util.is_gpu_available():
# On GPU, out-of-bounds indices are simply ignored.
self.evaluate(
gen_sparse_ops.sparse_split(
split_dim=1,
indices=[[0, 0], [1, 10], [-1, 2]],
values=[1.0, 2.0, 3.0],
shape=[3, 5],
num_split=2,
)
)
else:
# On CPU, out-of-bounds index raises error.
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), 'out of bounds'
):
self.evaluate(
gen_sparse_ops.sparse_split(
split_dim=1,
indices=[[0, 0], [1, 10], [-1, 2]],
values=[1.0, 2.0, 3.0],
shape=[3, 5],
num_split=2,
)
)
if __name__ == '__main__':
test.main()
| SparseSplitOpTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-cart/source_cart/streams.py | {
"start": 5964,
"end": 6107
} | class ____(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1products/get
"""
| Products |
python | getsentry__sentry | src/sentry/testutils/notifications/platform.py | {
"start": 1785,
"end": 2004
} | class ____(NotificationStrategy):
def __init__(self, *, targets: list[NotificationTarget]):
self.targets = targets
def get_targets(self) -> list[NotificationTarget]:
return self.targets
| MockStrategy |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI049.py | {
"start": 546,
"end": 691
} | class ____:
class _UnusedTypeDict4(TypedDict):
pass
def method(self) -> None:
_CustomClass3._UnusedTypeDict4()
| _CustomClass3 |
python | RaRe-Technologies__gensim | gensim/models/doc2vec.py | {
"start": 5632,
"end": 6354
} | class ____:
"""A dataclass shape-compatible with keyedvectors.SimpleVocab, extended to record
details of string document tags discovered during the initial vocabulary scan.
Will not be used if all presented document tags are ints. No longer used in a
completed model: just used during initial scan, and for backward compatibility.
"""
__slots__ = ('doc_count', 'index', 'word_count')
doc_count: int # number of docs where tag appeared
index: int # position in underlying array
word_count: int # number of words in associated docs
@property
def count(self):
return self.doc_count
@count.setter
def count(self, new_val):
self.doc_count = new_val
| Doctag |
python | PyCQA__pylint | tests/functional/ext/no_self_use/no_self_use.py | {
"start": 1115,
"end": 1321
} | class ____(Base):
"""a concrete class"""
def check(self, arg):
"""a concrete method, could not be a function since it need
polymorphism benefits
"""
return arg == 0
| Sub |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py | {
"start": 1400,
"end": 6495
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
yield sess
def setUp(self):
# TODO(srvasude): Lower these tolerances once specialized solve and
# determinants are implemented.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
self._atol[dtypes.complex64] = 1e-4
self._rtol[dtypes.complex64] = 1e-4
self._atol[dtypes.complex128] = 1e-9
self._rtol[dtypes.complex128] = 1e-9
@staticmethod
def skip_these_tests():
# Skip solve tests, as these could have better stability
# (currently exercises the base class).
# TODO(srvasude): Enable these when solve is implemented.
return ["cholesky", "cond", "inverse", "solve", "solve_with_broadcast"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shape_info((1, 1)),
shape_info((1, 6, 6)),
shape_info((3, 4, 4)),
shape_info((2, 1, 3, 3))
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
row = np.random.uniform(low=1., high=5., size=shape[:-1])
col = np.random.uniform(low=1., high=5., size=shape[:-1])
# Make sure first entry is the same
row[..., 0] = col[..., 0]
if ensure_self_adjoint_and_pd:
# Note that a Toeplitz matrix generated from a linearly decreasing
# non-negative sequence is positive definite. See
# https://www.math.cinvestav.mx/~grudsky/Papers/118_29062012_Albrecht.pdf
# for details.
row = np.linspace(start=10., stop=1., num=shape[-1])
# The entries for the first row and column should be the same to guarantee
# symmetric.
row = col
lin_op_row = math_ops.cast(row, dtype=dtype)
lin_op_col = math_ops.cast(col, dtype=dtype)
if use_placeholder:
lin_op_row = array_ops.placeholder_with_default(
lin_op_row, shape=None)
lin_op_col = array_ops.placeholder_with_default(
lin_op_col, shape=None)
operator = linear_operator_toeplitz.LinearOperatorToeplitz(
row=lin_op_row,
col=lin_op_col,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
flattened_row = np.reshape(row, (-1, shape[-1]))
flattened_col = np.reshape(col, (-1, shape[-1]))
flattened_toeplitz = np.zeros(
[flattened_row.shape[0], shape[-1], shape[-1]])
for i in range(flattened_row.shape[0]):
flattened_toeplitz[i] = scipy.linalg.toeplitz(
flattened_col[i],
flattened_row[i])
matrix = np.reshape(flattened_toeplitz, shape)
matrix = math_ops.cast(matrix, dtype=dtype)
return operator, matrix
def test_scalar_row_col_raises(self):
with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz(1., 1.)
with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz([1.], 1.)
with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
linear_operator_toeplitz.LinearOperatorToeplitz(1., [1.])
def test_tape_safe(self):
col = variables_module.Variable([1.])
row = variables_module.Variable([1.])
operator = linear_operator_toeplitz.LinearOperatorToeplitz(
col, row, is_self_adjoint=True, is_positive_definite=True)
self.check_tape_safe(
operator,
skip_options=[
# .diag_part, .trace depend only on `col`, so test explicitly below.
linear_operator_test_util.CheckTapeSafeSkipOptions.DIAG_PART,
linear_operator_test_util.CheckTapeSafeSkipOptions.TRACE,
])
with backprop.GradientTape() as tape:
self.assertIsNotNone(tape.gradient(operator.diag_part(), col))
with backprop.GradientTape() as tape:
self.assertIsNotNone(tape.gradient(operator.trace(), col))
def test_convert_variables_to_tensors(self):
col = variables_module.Variable([1.])
row = variables_module.Variable([1.])
operator = linear_operator_toeplitz.LinearOperatorToeplitz(
col, row, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session() as sess:
sess.run([x.initializer for x in operator.variables])
self.check_convert_variables_to_tensors(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorToeplitzTest)
test.main()
| LinearOperatorToeplitzTest |
python | getsentry__sentry | tests/sentry/notifications/test_helpers.py | {
"start": 930,
"end": 7089
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="workflow",
value="always",
)
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="deploy",
value="always",
)
def test_validate(self) -> None:
self.assertTrue(
validate(NotificationSettingEnum.ISSUE_ALERTS, NotificationSettingsOptionEnum.ALWAYS)
)
self.assertTrue(
validate(NotificationSettingEnum.ISSUE_ALERTS, NotificationSettingsOptionEnum.NEVER)
)
self.assertTrue(
validate(NotificationSettingEnum.DEPLOY, NotificationSettingsOptionEnum.ALWAYS)
)
self.assertTrue(
validate(NotificationSettingEnum.DEPLOY, NotificationSettingsOptionEnum.NEVER)
)
self.assertTrue(
validate(NotificationSettingEnum.DEPLOY, NotificationSettingsOptionEnum.COMMITTED_ONLY)
)
self.assertFalse(
validate(NotificationSettingEnum.DEPLOY, NotificationSettingsOptionEnum.SUBSCRIBE_ONLY)
)
self.assertTrue(
validate(NotificationSettingEnum.WORKFLOW, NotificationSettingsOptionEnum.ALWAYS)
)
self.assertTrue(
validate(NotificationSettingEnum.WORKFLOW, NotificationSettingsOptionEnum.NEVER)
)
self.assertTrue(
validate(
NotificationSettingEnum.WORKFLOW, NotificationSettingsOptionEnum.SUBSCRIBE_ONLY
)
)
self.assertFalse(
validate(
NotificationSettingEnum.WORKFLOW, NotificationSettingsOptionEnum.COMMITTED_ONLY
)
)
def test_get_subscription_from_attributes(self) -> None:
attrs = {"subscription": (True, True, None)}
assert get_subscription_from_attributes(attrs) == (True, {"disabled": True})
attrs = {"subscription": (True, False, None)}
assert get_subscription_from_attributes(attrs) == (False, {"disabled": True})
def test_collect_groups_by_project(self) -> None:
assert collect_groups_by_project([self.group]) == {self.project.id: {self.group}}
def test_get_group_settings_link(self) -> None:
rule: Rule = self.create_project_rule(self.project)
rule_details = get_rules([rule], self.organization, self.project, self.group.type)
link = get_group_settings_link(
self.group, self.environment.name, rule_details, 1337, extra="123"
)
parsed = urlparse(link)
query_dict = dict(map(lambda x: (x[0], x[1][0]), parse_qs(parsed.query).items()))
assert f"{parsed.scheme}://{parsed.hostname}{parsed.path}" == self.group.get_absolute_url()
assert query_dict == {
"referrer": "alert_email",
"environment": self.environment.name,
"alert_type": "email",
"alert_timestamp": str(1337),
"alert_rule_id": str(rule_details[0].id),
"extra": "123",
}
def test_get_email_link_extra_params(self) -> None:
rule: Rule = self.create_project_rule(self.project)
project2 = self.create_project()
rule2 = self.create_project_rule(project2)
rule_details = get_rules([rule, rule2], self.organization, self.project, self.group.type)
extra_params = {
k: dict(map(lambda x: (x[0], x[1][0]), parse_qs(v.strip("?")).items()))
for k, v in get_email_link_extra_params(
"digest_email", None, rule_details, 1337
).items()
}
assert extra_params == {
rule_detail.id: {
"referrer": "digest_email",
"alert_type": "email",
"alert_timestamp": str(1337),
"alert_rule_id": str(rule_detail.id),
}
for rule_detail in rule_details
}
def test_get_team_members(self) -> None:
user1 = self.create_user()
user2 = self.create_user()
team1 = self.create_team()
team2 = self.create_team()
team3 = self.create_team()
self.create_member(organization=self.organization, teams=[team1], user=user1)
self.create_member(organization=self.organization, teams=[team2], user=user2)
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert get_team_members(team1) == [Actor.from_object(user1)]
assert get_team_members(team2) == [Actor.from_object(user2)]
assert get_team_members(team3) == []
def test_team_is_valid_recipient(self) -> None:
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
team3 = self.create_team(organization=self.organization)
integration1 = self.create_integration(
organization=self.organization, provider="Slack", external_id="slack-id"
)
integration2 = self.create_integration(
organization=self.organization, provider="Jira", external_id="jira-id"
)
ExternalActor.objects.create(
team_id=team1.id,
organization=self.organization,
integration_id=integration1.id,
external_name="valid_integration",
provider=110,
)
ExternalActor.objects.create(
team_id=team2.id,
organization=self.organization,
integration_id=integration2.id,
external_name="invalid_integration",
provider=0,
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert team_is_valid_recipient(team1)
assert not team_is_valid_recipient(team2)
assert not team_is_valid_recipient(team3)
| NotificationHelpersTest |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/utils.py | {
"start": 7506,
"end": 8877
} | class ____(Provider):
max_tokens_key = "max_tokens"
def __init__(self) -> None:
self.messages_to_prompt = _messages_to_anthropic_messages
self.completion_to_prompt = completion_to_anthopic_prompt
def get_text_from_stream_response(self, response: dict) -> str:
if response["type"] == "content_block_delta":
return response["delta"]["text"]
else:
return ""
def get_text_from_response(self, response: dict) -> str:
if response["content"]:
return response["content"][0]["text"]
return ""
def get_request_body(self, prompt: Sequence[Dict], inference_parameters: dict):
if len(prompt) > 0 and prompt[0]["role"] == "system":
system_message = prompt[0]["content"]
prompt = prompt[1:]
if (
"system" in inference_parameters
and inference_parameters["system"] is not None
):
inference_parameters["system"] += system_message
else:
inference_parameters["system"] = system_message
return {
"messages": prompt,
"anthropic_version": inference_parameters.get(
"anthropic_version", "bedrock-2023-05-31"
), # Required by AWS.
**inference_parameters,
}
| AnthropicProvider |
python | Netflix__metaflow | metaflow/plugins/cards/exception.py | {
"start": 1895,
"end": 2663
} | class ____(MetaflowException):
headline = "Cannot resolve task for pathspec"
def __init__(
self,
pathspec_query,
resolved_from,
run_id=None,
):
message = "Cannot resolve task to find card."
if resolved_from == "task_pathspec":
message = "Task pathspec %s not found." % pathspec_query
elif resolved_from == "step_pathspec":
message = "Step pathspec %s not found." % pathspec_query
elif resolved_from == "stepname":
message = "Step %s not found" % pathspec_query
if run_id is not None:
message = "Step %s not found for Run('%s')." % (pathspec_query, run_id)
super().__init__(msg=message, lineno=None)
| TaskNotFoundException |
python | doocs__leetcode | solution/0200-0299/0274.H-Index/Solution.py | {
"start": 0,
"end": 231
} | class ____:
def hIndex(self, citations: List[int]) -> int:
citations.sort(reverse=True)
for h in range(len(citations), 0, -1):
if citations[h - 1] >= h:
return h
return 0
| Solution |
python | ansible__ansible | test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/eos.py | {
"start": 240,
"end": 445
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['action_plugin'] = 'eos'
return result
| ActionModule |
python | tiangolo__fastapi | scripts/notify_translations.py | {
"start": 2998,
"end": 3086
} | class ____(BaseModel):
discussions: AllDiscussionsDiscussions
| AllDiscussionsRepository |
python | django__django | tests/proxy_models/models.py | {
"start": 2978,
"end": 3165
} | class ____(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
| StateProxy |
python | python__mypy | mypyc/ir/ops.py | {
"start": 40517,
"end": 41478
} | class ____(RegisterOp):
"""result = extend src from src_type to dst_type
Extend a value from a type with fewer bits to a type with more bits.
dst_type and src_type can be native integer types, bools or tagged
integers. Tagged integers should have the tag bit unset.
If 'signed' is true, perform sign extension. Otherwise, the result will be
zero extended.
"""
error_kind = ERR_NEVER
def __init__(self, src: Value, dst_type: RType, signed: bool, line: int = -1) -> None:
super().__init__(line)
self.src = src
self.type = dst_type
self.src_type = src.type
self.signed = signed
def sources(self) -> list[Value]:
return [self.src]
def set_sources(self, new: list[Value]) -> None:
(self.src,) = new
def stolen(self) -> list[Value]:
return []
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_extend(self)
@final
| Extend |
python | pola-rs__polars | py-polars/src/polars/_utils/udfs.py | {
"start": 10357,
"end": 21229
} | class ____:
"""Introspect UDF bytecode and determine if we can rewrite as native expression."""
_map_target_name: str | None = None
_can_attempt_rewrite: bool | None = None
_caller_variables: dict[str, Any] | None = None
_col_expression: tuple[str, str] | NoDefault | None = no_default
def __init__(self, function: Callable[[Any], Any], map_target: MapTarget) -> None:
"""
Initialize BytecodeParser instance and prepare to introspect UDFs.
Parameters
----------
function : callable
The function/lambda to disassemble and introspect.
map_target : {'expr','series','frame'}
The underlying target object type of the map operation.
"""
try:
original_instructions = get_instructions(function)
except TypeError:
# in case we hit something that can't be disassembled (eg: code object
# unavailable, like a bare numpy ufunc that isn't in a lambda/function)
original_instructions = iter([])
self._function = function
self._map_target = map_target
self._param_name = self._get_param_name(function)
self._rewritten_instructions = RewrittenInstructions(
instructions=original_instructions,
caller_variables=self._caller_variables,
function=function,
)
def _omit_implicit_bool(self, expr: str) -> str:
"""Drop extraneous/implied bool (eg: `pl.col("d") & pl.col("d").dt.date()`)."""
while _RE_IMPLICIT_BOOL.search(expr):
expr = _RE_IMPLICIT_BOOL.sub(repl=r'pl.col("\1").\2', string=expr)
return expr
@staticmethod
def _get_param_name(function: Callable[[Any], Any]) -> str | None:
"""Return single function parameter name."""
try:
# note: we do not parse/handle functions with > 1 params
sig = signature(function)
except ValueError:
return None
return (
next(iter(parameters.keys()))
if len(parameters := sig.parameters) == 1
else None
)
def _inject_nesting(
self,
expression_blocks: dict[int, str],
logical_instructions: list[Instruction],
) -> list[tuple[int, str]]:
"""Inject nesting boundaries into expression blocks (as parentheses)."""
if logical_instructions:
# reconstruct nesting for mixed 'and'/'or' ops by associating control flow
# jump offsets with their target expression blocks and applying parens
if len({inst.opname for inst in logical_instructions}) > 1:
block_offsets: list[int] = list(expression_blocks.keys())
prev_end = -1
for inst in logical_instructions:
start = block_offsets[bisect_left(block_offsets, inst.offset) - 1]
end = block_offsets[bisect_left(block_offsets, inst.argval) - 1]
if not (start == 0 and end == block_offsets[-1]):
if prev_end not in (start, end):
expression_blocks[start] = "(" + expression_blocks[start]
expression_blocks[end] += ")"
prev_end = end
for inst in logical_instructions: # inject connecting "&" and "|" ops
expression_blocks[inst.offset] = OpNames.CONTROL_FLOW[inst.opname]
return sorted(expression_blocks.items())
@property
def map_target(self) -> MapTarget:
"""The map target, eg: one of 'expr', 'frame', or 'series'."""
return self._map_target
def can_attempt_rewrite(self) -> bool:
"""
Determine if we may be able to offer a native polars expression instead.
Note that `lambda x: x` is inefficient, but we ignore it because it is not
guaranteed that using the equivalent bare constant value will return the
same output. (Hopefully nobody is writing lambdas like that anyway...)
"""
if self._can_attempt_rewrite is None:
self._can_attempt_rewrite = (
self._param_name is not None
# check minimum number of ops, ensuring all are parseable
and len(self._rewritten_instructions) >= 2
and all(
inst.opname in OpNames.PARSEABLE_OPS
for inst in self._rewritten_instructions
)
# exclude constructs/functions with multiple RETURN_VALUE ops
and sum(
1
for inst in self.original_instructions
if inst.opname == "RETURN_VALUE"
)
== 1
)
return self._can_attempt_rewrite
def dis(self) -> None:
"""Print disassembled function bytecode."""
dis.dis(self._function)
@property
def function(self) -> Callable[[Any], Any]:
"""The function being parsed."""
return self._function
@property
def original_instructions(self) -> list[Instruction]:
"""The original bytecode instructions from the function we are parsing."""
return list(self._rewritten_instructions._original_instructions)
@property
def param_name(self) -> str | None:
"""The parameter name of the function being parsed."""
return self._param_name
@property
def rewritten_instructions(self) -> list[Instruction]:
"""The rewritten bytecode instructions from the function we are parsing."""
return list(self._rewritten_instructions)
def to_expression(self, col: str) -> str | None:
"""Translate postfix bytecode instructions to polars expression/string."""
if self._col_expression is not no_default and self._col_expression is not None:
col_name, expr = self._col_expression
if col != col_name:
expr = re.sub(
rf'pl\.col\("{re_escape(col_name)}"\)',
f'pl.col("{re_escape(col)}")',
expr,
)
self._col_expression = (col, expr)
return expr
self._map_target_name = None
if self._param_name is None:
self._col_expression = None
return None
# decompose bytecode into logical 'and'/'or' expression blocks (if present)
control_flow_blocks = defaultdict(list)
logical_instructions = []
jump_offset = 0
for idx, inst in enumerate(self._rewritten_instructions):
if inst.opname in OpNames.CONTROL_FLOW:
jump_offset = self._rewritten_instructions[idx + 1].offset
logical_instructions.append(inst)
else:
control_flow_blocks[jump_offset].append(inst)
# convert each block to a polars expression string
try:
expression_strings = self._inject_nesting(
{
offset: InstructionTranslator(
instructions=ops,
caller_variables=self._caller_variables,
map_target=self._map_target,
function=self._function,
).to_expression(
col=col,
param_name=self._param_name,
depth=int(bool(logical_instructions)),
)
for offset, ops in control_flow_blocks.items()
},
logical_instructions,
)
except NotImplementedError:
self._col_expression = None
return None
polars_expr = " ".join(expr for _offset, expr in expression_strings)
# note: if no 'pl.col' in the expression, it likely represents a compound
# constant value (e.g. `lambda x: CONST + 123`), so we don't want to warn
if "pl.col(" not in polars_expr:
self._col_expression = None
return None
else:
polars_expr = self._omit_implicit_bool(polars_expr)
if self._map_target == "series":
if (target_name := self._map_target_name) is None:
target_name = _get_target_name(col, polars_expr, self._map_target)
polars_expr = polars_expr.replace(f'pl.col("{col}")', target_name)
self._col_expression = (col, polars_expr)
return polars_expr
def warn(
self,
col: str,
*,
suggestion_override: str | None = None,
udf_override: str | None = None,
) -> None:
"""Generate warning that suggests an equivalent native polars expression."""
# Import these here so that udfs can be imported without polars installed.
from polars._utils.various import (
find_stacklevel,
in_terminal_that_supports_colour,
)
from polars.exceptions import PolarsInefficientMapWarning
suggested_expression = suggestion_override or self.to_expression(col)
if suggested_expression is not None:
if (target_name := self._map_target_name) is None:
target_name = _get_target_name(
col, suggested_expression, self._map_target
)
func_name = udf_override or self._function.__name__ or "..."
if func_name == "<lambda>":
func_name = f"lambda {self._param_name}: ..."
addendum = (
'Note: in list.eval context, pl.col("") should be written as pl.element()'
if 'pl.col("")' in suggested_expression
else ""
)
apitype, clsname = (
("expressions", "Expr")
if self._map_target == "expr"
else ("series", "Series")
)
before, after = (
(
f" \033[31m- {target_name}.map_elements({func_name})\033[0m\n",
f" \033[32m+ {suggested_expression}\033[0m\n{addendum}",
)
if in_terminal_that_supports_colour()
else (
f" - {target_name}.map_elements({func_name})\n",
f" + {suggested_expression}\n{addendum}",
)
)
warnings.warn(
f"\n{clsname}.map_elements is significantly slower than the native {apitype} API.\n"
"Only use if you absolutely CANNOT implement your logic otherwise.\n"
"Replace this expression...\n"
f"{before}"
"with this one instead:\n"
f"{after}",
PolarsInefficientMapWarning,
stacklevel=find_stacklevel(),
)
| BytecodeParser |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 11820,
"end": 11983
} | class ____(models.Model):
title = models.CharField(max_length=20)
attribute = models.ForeignKey(AttributeModel, on_delete=models.CASCADE)
| SearchFilterModelFk |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/misc.py | {
"start": 4344,
"end": 4538
} | class ____(SearchStrategy[bool]):
def do_draw(self, data: ConjectureData) -> bool:
return data.draw_boolean()
def __repr__(self) -> str:
return "booleans()"
| BooleansStrategy |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 259167,
"end": 259594
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "team_discussion_comment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
team_discussion_comment = sgqlc.types.Field(
"TeamDiscussionComment", graphql_name="teamDiscussionComment"
)
| CreateTeamDiscussionCommentPayload |
python | vyperlang__vyper | vyper/codegen/expr.py | {
"start": 1665,
"end": 32545
} | class ____:
# TODO: Once other refactors are made reevaluate all inline imports
def __init__(self, node, context, is_stmt=False):
assert isinstance(node, vy_ast.VyperNode)
node = node.reduced()
self.expr = node
self.context = context
self.is_stmt = is_stmt # this came from an Expr node
fn_name = f"parse_{type(node).__name__}"
with tag_exceptions(node, fallback_exception_type=CodegenPanic, note=fn_name):
fn = getattr(self, fn_name)
self.ir_node = fn()
assert isinstance(self.ir_node, IRnode), self.ir_node
writes = set(access.variable for access in get_expr_writes(self.expr))
self.ir_node._writes = writes
self.ir_node.annotation = self.expr.get("node_source_code")
self.ir_node.ast_source = self.expr
def parse_Int(self):
typ = self.expr._metadata["type"]
return IRnode.from_list(self.expr.value, typ=typ)
def parse_Decimal(self):
val = self.expr.value * DECIMAL_DIVISOR
# sanity check that type checker did its job
assert isinstance(val, decimal.Decimal)
assert math.ceil(val) == math.floor(val)
val = int(val)
lo, hi = DecimalT().int_bounds
# sanity check
assert lo <= val <= hi
return IRnode.from_list(val, typ=DecimalT())
def parse_Hex(self):
hexstr = self.expr.value
t = self.expr._metadata["type"]
n_bytes = (len(hexstr) - 2) // 2 # e.g. "0x1234" is 2 bytes
if t == AddressT():
# sanity check typechecker did its job
assert len(hexstr) == 42 and is_checksum_encoded(hexstr)
return IRnode.from_list(int(self.expr.value, 16), typ=t)
elif is_bytes_m_type(t):
assert n_bytes == t.m
# bytes_m types are left padded with zeros
val = int(hexstr, 16) << 8 * (32 - n_bytes)
return IRnode.from_list(val, typ=t)
# String literals
def parse_Str(self):
bytez = self.expr.value.encode("utf-8")
return self._make_bytelike(self.context, StringT, bytez)
# Byte literals
def parse_Bytes(self):
return self._make_bytelike(self.context, BytesT, self.expr.value)
def parse_HexBytes(self):
# HexBytes already has value as bytes
assert isinstance(self.expr.value, bytes)
return self._make_bytelike(self.context, BytesT, self.expr.value)
@classmethod
def _make_bytelike(cls, context, typeclass, bytez):
bytez_length = len(bytez)
btype = typeclass(bytez_length)
placeholder = context.new_internal_variable(btype)
seq = []
seq.append(["mstore", placeholder, bytez_length])
for i in range(0, len(bytez), 32):
seq.append(
[
"mstore",
["add", placeholder, i + 32],
bytes_to_int((bytez + b"\x00" * 31)[i : i + 32]),
]
)
ret = IRnode.from_list(
["seq"] + seq + [placeholder],
typ=btype,
location=MEMORY,
annotation=f"Create {btype}: {bytez}",
)
ret.is_source_bytes_literal = True
return ret
# True, False, None constants
def parse_NameConstant(self):
assert isinstance(self.expr.value, bool)
val = int(self.expr.value)
return IRnode.from_list(val, typ=BoolT())
# Variable names
def parse_Name(self):
varname = self.expr.id
if varname == "self":
return IRnode.from_list(["address"], typ=AddressT())
varinfo = self.expr._expr_info.var_info
assert varinfo is not None
# local variable
if varname in self.context.vars:
ret = self.context.lookup_var(varname).as_ir_node()
ret._referenced_variables = {varinfo}
return ret
if varinfo.is_constant:
return Expr.parse_value_expr(varinfo.decl_node.value, self.context)
if varinfo.is_immutable:
mutable = self.context.is_ctor_context
location = data_location_to_address_space(
varinfo.location, self.context.is_ctor_context
)
ret = IRnode.from_list(
varinfo.position.position,
typ=varinfo.typ,
location=location,
annotation=varname,
mutable=mutable,
)
ret._referenced_variables = {varinfo}
return ret
raise CompilerPanic("unreachable") # pragma: nocover
# x.y or x[5]
def parse_Attribute(self):
typ = self.expr._metadata["type"]
# check if we have a flag constant, e.g.
# [lib1].MyFlag.FOO
if isinstance(typ, FlagT) and is_type_t(self.expr.value._metadata["type"], FlagT):
# 0, 1, 2, .. 255
flag_id = typ._flag_members[self.expr.attr]
value = 2**flag_id # 0 => 0001, 1 => 0010, 2 => 0100, etc.
return IRnode.from_list(value, typ=typ)
# x.balance: balance of address x
if self.expr.attr == "balance":
addr = Expr.parse_value_expr(self.expr.value, self.context)
if addr.typ == AddressT():
if isinstance(self.expr.value, vy_ast.Name) and self.expr.value.id == "self":
seq = ["selfbalance"]
else:
seq = ["balance", addr]
return IRnode.from_list(seq, typ=UINT256_T)
# x.codesize: codesize of address x
elif self.expr.attr == "codesize" or self.expr.attr == "is_contract":
addr = Expr.parse_value_expr(self.expr.value, self.context)
if addr.typ == AddressT():
if self.expr.attr == "codesize":
if self.expr.get("value.id") == "self":
eval_code = ["codesize"]
else:
eval_code = ["extcodesize", addr]
output_type = UINT256_T
else:
eval_code = ["gt", ["extcodesize", addr], 0]
output_type = BoolT()
return IRnode.from_list(eval_code, typ=output_type)
# x.codehash: keccak of address x
elif self.expr.attr == "codehash":
addr = Expr.parse_value_expr(self.expr.value, self.context)
if addr.typ == AddressT():
return IRnode.from_list(["extcodehash", addr], typ=BYTES32_T)
# x.code: codecopy/extcodecopy of address x
elif self.expr.attr == "code":
addr = Expr.parse_value_expr(self.expr.value, self.context)
if addr.typ == AddressT():
# These adhoc nodes will be replaced with a valid node in `Slice.build_IR`
if addr.value == "address": # for `self.code`
return IRnode.from_list(["~selfcode"], typ=BytesT(0))
return IRnode.from_list(["~extcode", addr], typ=BytesT(0))
# Reserved keywords
elif (
isinstance(self.expr.value, vy_ast.Name) and self.expr.value.id in ENVIRONMENT_VARIABLES
):
key = f"{self.expr.value.id}.{self.expr.attr}"
if key == "msg.sender":
return IRnode.from_list(["caller"], typ=AddressT())
elif key == "msg.data":
# This adhoc node will be replaced with a valid node in `Slice/Len.build_IR`
return IRnode.from_list(["~calldata"], typ=BytesT(0))
elif key == "msg.value" and self.context.is_payable:
return IRnode.from_list(["callvalue"], typ=UINT256_T)
elif key in ("msg.gas", "msg.mana"):
# NOTE: `msg.mana` is an alias for `msg.gas`
return IRnode.from_list(["gas"], typ=UINT256_T)
elif key == "block.prevrandao":
if not version_check(begin="paris"):
warning = "tried to use block.prevrandao in pre-Paris "
warning += "environment! Suggest using block.difficulty instead."
vyper_warn(VyperWarning(warning, self.expr))
return IRnode.from_list(["prevrandao"], typ=BYTES32_T)
elif key == "block.difficulty":
if version_check(begin="paris"):
warning = "tried to use block.difficulty in post-Paris "
warning += "environment! Suggest using block.prevrandao instead."
vyper_warn(VyperWarning(warning, self.expr))
return IRnode.from_list(["difficulty"], typ=UINT256_T)
elif key == "block.timestamp":
return IRnode.from_list(["timestamp"], typ=UINT256_T)
elif key == "block.coinbase":
return IRnode.from_list(["coinbase"], typ=AddressT())
elif key == "block.number":
return IRnode.from_list(["number"], typ=UINT256_T)
elif key == "block.gaslimit":
return IRnode.from_list(["gaslimit"], typ=UINT256_T)
elif key == "block.basefee":
return IRnode.from_list(["basefee"], typ=UINT256_T)
elif key == "block.blobbasefee":
if not version_check(begin="cancun"):
raise EvmVersionException(
"`block.blobbasefee` is not available pre-cancun", self.expr
)
return IRnode.from_list(["blobbasefee"], typ=UINT256_T)
elif key == "block.prevhash":
return IRnode.from_list(["blockhash", ["sub", "number", 1]], typ=BYTES32_T)
elif key == "tx.origin":
return IRnode.from_list(["origin"], typ=AddressT())
elif key == "tx.gasprice":
return IRnode.from_list(["gasprice"], typ=UINT256_T)
elif key == "chain.id":
return IRnode.from_list(["chainid"], typ=UINT256_T)
# Other variables
# self.x: global attribute
if (varinfo := self.expr._expr_info.var_info) is not None:
if varinfo.is_constant:
return Expr.parse_value_expr(varinfo.decl_node.value, self.context)
location = data_location_to_address_space(
varinfo.location, self.context.is_ctor_context
)
ret = IRnode.from_list(
varinfo.position.position,
typ=varinfo.typ,
location=location,
annotation="self." + self.expr.attr,
)
ret._referenced_variables = {varinfo}
return ret
sub = Expr(self.expr.value, self.context).ir_node
# contract type
if isinstance(sub.typ, InterfaceT):
# MyInterface.address
assert self.expr.attr == "address"
sub.typ = typ
return sub
if isinstance(sub.typ, StructT) and self.expr.attr in sub.typ.member_types:
return get_element_ptr(sub, self.expr.attr)
def parse_Subscript(self):
sub = Expr(self.expr.value, self.context).ir_node
if sub.value == "multi":
# force literal to memory, e.g.
# MY_LIST: constant(decimal[6])
# ...
# return MY_LIST[ix]
sub = ensure_in_memory(sub, self.context)
if isinstance(sub.typ, HashMapT):
# TODO sanity check we are in a self.my_map[i] situation
index = Expr(self.expr.slice, self.context).ir_node
if isinstance(index.typ, _BytestringT):
# we have to hash the key to get a storage location
index = keccak256_helper(index, self.context)
elif is_array_like(sub.typ):
index = Expr.parse_value_expr(self.expr.slice, self.context)
if read_write_overlap(sub, index):
raise CompilerPanic("risky overlap")
elif is_tuple_like(sub.typ):
# should we annotate expr.slice in the frontend with the
# folded value instead of calling reduced() here?
index = self.expr.slice.reduced().value
# note: this check should also happen in get_element_ptr
if not 0 <= index < len(sub.typ.member_types):
raise TypeCheckFailure("unreachable")
else:
raise TypeCheckFailure("unreachable")
ir_node = get_element_ptr(sub, index)
ir_node.mutable = sub.mutable
return ir_node
def parse_BinOp(self):
left = Expr.parse_value_expr(self.expr.left, self.context)
right = Expr.parse_value_expr(self.expr.right, self.context)
return Expr.handle_binop(self.expr.op, left, right, self.context)
@classmethod
def handle_binop(cls, op, left, right, context):
assert not left.is_pointer
assert not right.is_pointer
is_shift_op = isinstance(op, (vy_ast.LShift, vy_ast.RShift))
if is_shift_op:
assert is_numeric_type(left.typ) or is_bytes_m_type(left.typ)
assert is_numeric_type(right.typ)
else:
# Sanity check - ensure that we aren't dealing with different types
# This should be unreachable due to the type check pass
if left.typ != right.typ:
raise TypeCheckFailure(f"unreachable: {left.typ} != {right.typ}")
assert is_numeric_type(left.typ) or is_flag_type(left.typ) or is_bytes_m_type(left.typ)
out_typ = left.typ
if isinstance(op, vy_ast.BitAnd):
return IRnode.from_list(["and", left, right], typ=out_typ)
if isinstance(op, vy_ast.BitOr):
return IRnode.from_list(["or", left, right], typ=out_typ)
if isinstance(op, vy_ast.BitXor):
return IRnode.from_list(["xor", left, right], typ=out_typ)
if isinstance(op, vy_ast.LShift):
new_typ = left.typ
if is_numeric_type(new_typ) and new_typ.bits != 256:
# TODO implement me. ["and", 2**bits - 1, shl(right, left)]
raise TypeCheckFailure("unreachable")
if is_bytes_m_type(new_typ) and new_typ.m_bits != 256:
raise TypeCheckFailure("unreachable")
return IRnode.from_list(shl(right, left), typ=new_typ)
if isinstance(op, vy_ast.RShift):
new_typ = left.typ
if is_numeric_type(new_typ) and new_typ.bits != 256:
# TODO implement me. promote_signed_int(op(right, left), bits)
raise TypeCheckFailure("unreachable")
if is_bytes_m_type(new_typ) and new_typ.m_bits != 256:
raise TypeCheckFailure("unreachable")
op = shr if (is_bytes_m_type(left.typ) or not left.typ.is_signed) else sar
return IRnode.from_list(op(right, left), typ=new_typ)
# flags can only do bit ops, not arithmetic.
assert is_numeric_type(left.typ)
with left.cache_when_complex("x") as (b1, x), right.cache_when_complex("y") as (b2, y):
if isinstance(op, vy_ast.Add):
ret = arithmetic.safe_add(x, y)
elif isinstance(op, vy_ast.Sub):
ret = arithmetic.safe_sub(x, y)
elif isinstance(op, vy_ast.Mult):
ret = arithmetic.safe_mul(x, y)
elif isinstance(op, (vy_ast.Div, vy_ast.FloorDiv)):
ret = arithmetic.safe_div(x, y)
elif isinstance(op, vy_ast.Mod):
ret = arithmetic.safe_mod(x, y)
elif isinstance(op, vy_ast.Pow):
ret = arithmetic.safe_pow(x, y)
else: # pragma: nocover
raise CompilerPanic("Unreachable")
return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=out_typ)
def build_in_comparator(self):
left = Expr(self.expr.left, self.context).ir_node
right = Expr(self.expr.right, self.context).ir_node
# temporary kludge to block #2637 bug
# TODO actually fix the bug
if not left.typ._is_prim_word:
raise TypeMismatch(
"`in` not allowed for arrays of non-base types, tracked in issue #2637", self.expr
)
left = unwrap_location(left)
if isinstance(self.expr.op, vy_ast.In):
found, not_found = 1, 0
elif isinstance(self.expr.op, vy_ast.NotIn):
found, not_found = 0, 1
else: # pragma: no cover
raise TypeCheckFailure("unreachable")
i = IRnode.from_list(self.context.fresh_varname("in_ix"), typ=UINT256_T)
found_ptr = self.context.new_internal_variable(BoolT())
ret = ["seq"]
with left.cache_when_complex("needle") as (b1, left), right.cache_when_complex(
"haystack"
) as (b2, right):
# unroll the loop for compile-time list literals
if right.value == "multi":
# empty list literals should be rejected at typechecking time
assert len(right.args) > 0
args = [unwrap_location(val) for val in right.args]
if isinstance(self.expr.op, vy_ast.In):
checks = [["eq", left, val] for val in args]
return b1.resolve(b2.resolve(Expr._logical_or(checks)))
if isinstance(self.expr.op, vy_ast.NotIn):
checks = [["ne", left, val] for val in args]
return b1.resolve(b2.resolve(Expr._logical_and(checks)))
return # fail
# general case: loop over the list and check each element
# for equality
# location of i'th item from list
ith_element_ptr = get_element_ptr(right, i, array_bounds_check=False)
ith_element = unwrap_location(ith_element_ptr)
if isinstance(right.typ, SArrayT):
len_ = right.typ.count
else:
len_ = get_dyn_array_count(right)
# Condition repeat loop has to break on.
# TODO maybe put result on the stack
loop_body = [
"if",
["eq", left, ith_element],
["seq", ["mstore", found_ptr, found], "break"], # store true.
]
loop = ["repeat", i, 0, len_, right.typ.count, loop_body]
ret.append(["seq", ["mstore", found_ptr, not_found], loop, ["mload", found_ptr]])
return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=BoolT())
@staticmethod
def _signed_to_unsigned_comparison_op(op):
translation_map = {"sgt": "gt", "sge": "ge", "sle": "le", "slt": "lt"}
if op in translation_map:
return translation_map[op]
else:
return op
def parse_Compare(self):
left = Expr.parse_value_expr(self.expr.left, self.context)
right = Expr.parse_value_expr(self.expr.right, self.context)
if right.value is None:
raise TypeCheckFailure("unreachable")
if isinstance(self.expr.op, (vy_ast.In, vy_ast.NotIn)):
if is_array_like(right.typ):
return self.build_in_comparator()
else:
assert isinstance(right.typ, FlagT), right.typ
intersection = ["and", left, right]
if isinstance(self.expr.op, vy_ast.In):
return IRnode.from_list(["iszero", ["iszero", intersection]], typ=BoolT())
elif isinstance(self.expr.op, vy_ast.NotIn):
return IRnode.from_list(["iszero", intersection], typ=BoolT())
if isinstance(self.expr.op, vy_ast.Gt):
op = "sgt"
elif isinstance(self.expr.op, vy_ast.GtE):
op = "sge"
elif isinstance(self.expr.op, vy_ast.LtE):
op = "sle"
elif isinstance(self.expr.op, vy_ast.Lt):
op = "slt"
elif isinstance(self.expr.op, vy_ast.Eq):
op = "eq"
elif isinstance(self.expr.op, vy_ast.NotEq):
op = "ne"
else: # pragma: nocover
return
# Compare (limited to 32) byte arrays.
if isinstance(left.typ, _BytestringT) and isinstance(right.typ, _BytestringT):
left = Expr(self.expr.left, self.context).ir_node
right = Expr(self.expr.right, self.context).ir_node
left_keccak = keccak256_helper(left, self.context)
right_keccak = keccak256_helper(right, self.context)
if op not in ("eq", "ne"):
return # raises
else:
# use hash even for Bytes[N<=32], because there could be dirty
# bytes past the bytes data.
return IRnode.from_list([op, left_keccak, right_keccak], typ=BoolT())
# Compare other types.
elif is_numeric_type(left.typ) and is_numeric_type(right.typ):
if left.typ == right.typ and right.typ == UINT256_T:
# signed comparison ops work for any integer
# type BESIDES uint256
op = self._signed_to_unsigned_comparison_op(op)
elif left.typ._is_prim_word and right.typ._is_prim_word:
if op not in ("eq", "ne"):
raise TypeCheckFailure("unreachable")
else:
# kludge to block behavior in #2638
# TODO actually implement equality for complex types
raise TypeMismatch(
f"operation not yet supported for {left.typ}, {right.typ}, see issue #2638",
self.expr.op,
)
return IRnode.from_list([op, left, right], typ=BoolT())
def parse_BoolOp(self):
values = []
for value in self.expr.values:
# Check for boolean operations with non-boolean inputs
ir_val = Expr.parse_value_expr(value, self.context)
assert ir_val.typ == BoolT()
values.append(ir_val)
assert len(values) >= 2, "bad BoolOp"
if isinstance(self.expr.op, vy_ast.And):
return Expr._logical_and(values)
if isinstance(self.expr.op, vy_ast.Or):
return Expr._logical_or(values)
raise TypeCheckFailure(f"Unexpected boolop: {self.expr.op}") # pragma: nocover
@staticmethod
def _logical_and(values):
# return the logical and of a list of IRnodes
# create a nested if statement starting from the
# innermost node. note this also serves as the base case
# (`_logical_and([x]) == x`)
ir_node = values[-1]
# iterate backward through the remaining values,
# nesting further at each step
for val in values[-2::-1]:
# `x and y` => `if x { then y } { else 0 }`
ir_node = ["if", val, ir_node, 0]
return IRnode.from_list(ir_node, typ=BoolT())
@staticmethod
def _logical_or(values):
# return the logical or of a list of IRnodes
# create a nested if statement starting from the
# innermost node. note this also serves as the base case
# (`_logical_or([x]) == x`)
ir_node = values[-1]
# iterate backward through the remaining values,
# nesting further at each step
for val in values[-2::-1]:
# `x or y` => `if x { then 1 } { else y }`
ir_node = ["if", val, 1, ir_node]
return IRnode.from_list(ir_node, typ=BoolT())
# Unary operations (only "not" supported)
def parse_UnaryOp(self):
operand = Expr.parse_value_expr(self.expr.operand, self.context)
if isinstance(self.expr.op, vy_ast.Not):
if operand.typ._is_prim_word and operand.typ == BoolT():
return IRnode.from_list(["iszero", operand], typ=BoolT())
if isinstance(self.expr.op, vy_ast.Invert):
if isinstance(operand.typ, FlagT):
n_members = len(operand.typ._flag_members)
# use (xor 0b11..1 operand) to flip all the bits in
# `operand`. `mask` could be a very large constant and
# hurt codesize, but most user flags will likely have few
# enough members that the mask will not be large.
mask = (2**n_members) - 1
return IRnode.from_list(["xor", mask, operand], typ=operand.typ)
if operand.typ in (UINT256_T, BYTES32_T):
return IRnode.from_list(["not", operand], typ=operand.typ)
# block `~` for all other types, since reasoning
# about dirty bits is not entirely trivial. maybe revisit
# this at a later date.
raise UnimplementedException(f"~ is not supported for {operand.typ}", self.expr)
if isinstance(self.expr.op, vy_ast.USub) and is_numeric_type(operand.typ):
assert operand.typ.is_signed
# Clamp on minimum signed integer value as we cannot negate that
# value (all other integer values are fine)
min_int_val, _ = operand.typ.int_bounds
return IRnode.from_list(["sub", 0, clamp("sgt", operand, min_int_val)], typ=operand.typ)
# Function calls
def parse_Call(self):
# TODO fix cyclic import
from vyper.builtins._signatures import BuiltinFunctionT
func = self.expr.func
func_t = func._metadata["type"]
if isinstance(func_t, BuiltinFunctionT):
return func_t.build_IR(self.expr, self.context)
# Struct constructor
if is_type_t(func_t, StructT):
assert not self.is_stmt # sanity check typechecker
return self.handle_struct_literal()
# Interface constructor. Bar(<address>).
if is_type_t(func_t, InterfaceT) or func.get("attr") == "__at__":
assert not self.is_stmt # sanity check typechecker
# magic: do sanity checks for module.__at__
if func.get("attr") == "__at__":
assert isinstance(func_t, MemberFunctionT)
assert isinstance(func.value._metadata["type"], ModuleT)
(arg0,) = self.expr.args
arg_ir = Expr(arg0, self.context).ir_node
assert arg_ir.typ == AddressT()
arg_ir.typ = self.expr._metadata["type"]
return arg_ir
if isinstance(func_t, MemberFunctionT):
# TODO consider moving these to builtins or a dedicated file
darray = Expr(func.value, self.context).ir_node
assert isinstance(darray.typ, DArrayT)
args = [Expr(x, self.context).ir_node for x in self.expr.args]
if func.attr == "pop":
darray = Expr(func.value, self.context).ir_node
assert len(self.expr.args) == 0
return_item = not self.is_stmt
return pop_dyn_array(darray, return_popped_item=return_item)
elif func.attr == "append":
(arg,) = args
check_assign(
dummy_node_for_type(darray.typ.value_type), dummy_node_for_type(arg.typ)
)
ret = ["seq"]
if potential_overlap(darray, arg):
tmp = self.context.new_internal_variable(arg.typ)
ret.append(make_setter(tmp, arg))
arg = tmp
ret.append(append_dyn_array(darray, arg))
return IRnode.from_list(ret)
raise CompilerPanic("unreachable!") # pragma: nocover
assert isinstance(func_t, ContractFunctionT)
assert func_t.is_internal or func_t.is_constructor
return self_call.ir_for_self_call(self.expr, self.context)
@classmethod
def handle_external_call(cls, expr, context):
# TODO fix cyclic import
from vyper.builtins._signatures import BuiltinFunctionT
call_node = expr.value
assert isinstance(call_node, vy_ast.Call)
func_t = call_node.func._metadata["type"]
if isinstance(func_t, BuiltinFunctionT):
return func_t.build_IR(call_node, context)
return external_call.ir_for_external_call(call_node, context)
def parse_ExtCall(self):
return self.handle_external_call(self.expr, self.context)
def parse_StaticCall(self):
return self.handle_external_call(self.expr, self.context)
def parse_List(self):
typ = self.expr._metadata["type"]
if len(self.expr.elements) == 0:
return IRnode.from_list("~empty", typ=typ)
multi_ir = [Expr(x, self.context).ir_node for x in self.expr.elements]
return IRnode.from_list(["multi"] + multi_ir, typ=typ)
def parse_Tuple(self):
tuple_elements = [Expr(x, self.context).ir_node for x in self.expr.elements]
typ = TupleT([x.typ for x in tuple_elements])
multi_ir = IRnode.from_list(["multi"] + tuple_elements, typ=typ)
return multi_ir
def parse_IfExp(self):
test = Expr.parse_value_expr(self.expr.test, self.context)
assert test.typ == BoolT() # sanity check
body = Expr(self.expr.body, self.context).ir_node
orelse = Expr(self.expr.orelse, self.context).ir_node
# if they are in the same location, we can skip copying
# into memory. also for the case where either body or orelse are
# literal `multi` values (ex. for tuple or arrays), copy to
# memory (to avoid crashing in make_setter, XXX fixme).
if body.location != orelse.location or body.value == "multi":
body = ensure_in_memory(body, self.context)
orelse = ensure_in_memory(orelse, self.context)
assert body.location == orelse.location
# check this once compare_type has no side effects:
# assert body.typ.compare_type(orelse.typ)
typ = self.expr._metadata["type"]
location = body.location
return IRnode.from_list(["if", test, body, orelse], typ=typ, location=location)
def handle_struct_literal(self):
expr = self.expr
typ = expr._metadata["type"]
member_subs = {}
for kwarg in expr.keywords:
assert kwarg.arg not in member_subs
sub = Expr(kwarg.value, self.context).ir_node
member_subs[kwarg.arg] = sub
return IRnode.from_list(
["multi"] + [member_subs[key] for key in member_subs.keys()], typ=typ
)
# Parse an expression that results in a value
@classmethod
def parse_value_expr(cls, expr, context):
return unwrap_location(cls(expr, context).ir_node)
# Parse an expression that represents a pointer to memory/calldata or storage.
@classmethod
def parse_pointer_expr(cls, expr, context):
o = cls(expr, context).ir_node
if not o.location:
raise StructureException("Looking for a variable location, instead got a value", expr)
return o
| Expr |
python | ray-project__ray | rllib/examples/_old_api_stack/models/centralized_critic_models.py | {
"start": 5194,
"end": 6916
} | class ____(TorchModelV2, nn.Module):
"""Multi-agent model that implements a centralized value function.
It assumes the observation is a dict with 'own_obs' and 'opponent_obs', the
former of which can be used for computing actions (i.e., decentralized
execution), and the latter for optimization (i.e., centralized learning).
This model has two parts:
- An action model that looks at just 'own_obs' to compute actions
- A value model that also looks at the 'opponent_obs' / 'opponent_action'
to compute the value (it does this by using the 'obs_flat' tensor).
"""
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
TorchModelV2.__init__(
self, obs_space, action_space, num_outputs, model_config, name
)
nn.Module.__init__(self)
self.action_model = TorchFC(
Box(low=0, high=1, shape=(6,)), # one-hot encoded Discrete(6)
action_space,
num_outputs,
model_config,
name + "_action",
)
self.value_model = TorchFC(
obs_space, action_space, 1, model_config, name + "_vf"
)
self._model_in = None
def forward(self, input_dict, state, seq_lens):
# Store model-input for possible `value_function()` call.
self._model_in = [input_dict["obs_flat"], state, seq_lens]
return self.action_model({"obs": input_dict["obs"]["own_obs"]}, state, seq_lens)
def value_function(self):
value_out, _ = self.value_model(
{"obs": self._model_in[0]}, self._model_in[1], self._model_in[2]
)
return torch.reshape(value_out, [-1])
| YetAnotherTorchCentralizedCriticModel |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/auth.py | {
"start": 949,
"end": 1138
} | class ____(BaseModel):
"""Menu Item Collection serializer for responses."""
authorized_menu_items: list[MenuItem]
extra_menu_items: list[ExtraMenuItem]
| MenuItemCollectionResponse |
python | scrapy__scrapy | tests/test_spidermiddleware_process_start.py | {
"start": 441,
"end": 634
} | class ____:
async def process_start(self, start):
await sleep(SLEEP_SECONDS)
async for item_or_request in start:
yield item_or_request
| AsyncioSleepSpiderMiddleware |
python | django__django | tests/check_framework/template_test_apps/same_tags_app_2/apps.py | {
"start": 36,
"end": 140
} | class ____(AppConfig):
name = "check_framework.template_test_apps.same_tags_app_2"
| SameTagsApp2AppConfig |
python | sanic-org__sanic | examples/exception_monitoring.py | {
"start": 700,
"end": 1785
} | class ____(ErrorHandler):
def default(self, request, exception):
# Here, we have access to the exception object
# and can do anything with it (log, send to external service, etc)
# Some exceptions are trivial and built into Sanic (404s, etc)
if not isinstance(exception, SanicException):
print(exception)
# Then, we must finish handling the exception by returning
# our response to the client
# For this we can just call the super class' default handler
return super().default(request, exception)
"""
This is an ordinary Sanic server, with the exception that we set the
server's error_handler to an instance of our CustomHandler
"""
handler = CustomHandler()
app = Sanic("Example", error_handler=handler)
@app.route("/")
async def test(request):
# Here, something occurs which causes an unexpected exception
# This exception will flow to our custom handler.
raise SanicException("You Broke It!")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=True)
| CustomHandler |
python | crytic__slither | slither/utils/output_capture.py | {
"start": 432,
"end": 3225
} | class ____:
"""
Redirects and captures standard output/errors.
"""
original_stdout = None
original_stderr = None
original_logger_handlers = None
@staticmethod
def enable(block_original: bool = True) -> None:
"""
Redirects stdout and stderr to a capturable StringIO.
:param block_original: If True, blocks all output to the original stream. If False, duplicates output.
:return: None
"""
# Redirect stdout
if StandardOutputCapture.original_stdout is None:
StandardOutputCapture.original_stdout = sys.stdout
sys.stdout = CapturingStringIO(
None if block_original else StandardOutputCapture.original_stdout
)
# Redirect stderr
if StandardOutputCapture.original_stderr is None:
StandardOutputCapture.original_stderr = sys.stderr
sys.stderr = CapturingStringIO(
None if block_original else StandardOutputCapture.original_stderr
)
# Backup and swap root logger handlers
root_logger = logging.getLogger()
StandardOutputCapture.original_logger_handlers = root_logger.handlers
root_logger.handlers = [logging.StreamHandler(sys.stderr)]
@staticmethod
def disable() -> None:
"""
Disables redirection of stdout/stderr, if previously enabled.
:return: None
"""
# If we have a stdout backup, restore it.
if StandardOutputCapture.original_stdout is not None:
sys.stdout.close()
sys.stdout = StandardOutputCapture.original_stdout
StandardOutputCapture.original_stdout = None
# If we have an stderr backup, restore it.
if StandardOutputCapture.original_stderr is not None:
sys.stderr.close()
sys.stderr = StandardOutputCapture.original_stderr
StandardOutputCapture.original_stderr = None
# Restore our logging handlers
if StandardOutputCapture.original_logger_handlers is not None:
root_logger = logging.getLogger()
root_logger.handlers = StandardOutputCapture.original_logger_handlers
StandardOutputCapture.original_logger_handlers = None
@staticmethod
def get_stdout_output() -> str:
"""
Obtains the output from the currently set stdout
:return: Returns stdout output as a string
"""
sys.stdout.seek(0)
return sys.stdout.read()
@staticmethod
def get_stderr_output() -> str:
"""
Obtains the output from the currently set stderr
:return: Returns stderr output as a string
"""
sys.stderr.seek(0)
return sys.stderr.read()
| StandardOutputCapture |
python | sympy__sympy | sympy/polys/solvers.py | {
"start": 625,
"end": 13535
} | class ____(MutableDenseMatrix):
"""
.. deprecated:: 1.9
This class fundamentally is broken by design. Use ``DomainMatrix`` if
you want a matrix over the polys domains or ``Matrix`` for a matrix
with ``Expr`` elements. The ``RawMatrix`` class will be removed/broken
in future in order to reestablish the invariant that the elements of a
Matrix should be of type ``Expr``.
"""
_sympify = staticmethod(lambda x, *args, **kwargs: x) # type: ignore
def __init__(self, *args, **kwargs):
sympy_deprecation_warning(
"""
The RawMatrix class is deprecated. Use either DomainMatrix or
Matrix instead.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-rawmatrix",
)
domain = ZZ
for i in range(self.rows):
for j in range(self.cols):
val = self[i,j]
if getattr(val, 'is_Poly', False):
K = val.domain[val.gens]
val_sympy = val.as_expr()
elif hasattr(val, 'parent'):
K = val.parent()
val_sympy = K.to_sympy(val)
elif isinstance(val, (int, Integer)):
K = ZZ
val_sympy = sympify(val)
elif isinstance(val, Rational):
K = QQ
val_sympy = val
else:
for K in ZZ, QQ:
if K.of_type(val):
val_sympy = K.to_sympy(val)
break
else:
raise TypeError
domain = domain.unify(K)
self[i,j] = val_sympy
self.ring = domain
def eqs_to_matrix(eqs_coeffs, eqs_rhs, gens, domain):
"""Get matrix from linear equations in dict format.
Explanation
===========
Get the matrix representation of a system of linear equations represented
as dicts with low-level DomainElement coefficients. This is an
*internal* function that is used by solve_lin_sys.
Parameters
==========
eqs_coeffs: list[dict[Symbol, DomainElement]]
The left hand sides of the equations as dicts mapping from symbols to
coefficients where the coefficients are instances of
DomainElement.
eqs_rhs: list[DomainElements]
The right hand sides of the equations as instances of
DomainElement.
gens: list[Symbol]
The unknowns in the system of equations.
domain: Domain
The domain for coefficients of both lhs and rhs.
Returns
=======
The augmented matrix representation of the system as a DomainMatrix.
Examples
========
>>> from sympy import symbols, ZZ
>>> from sympy.polys.solvers import eqs_to_matrix
>>> x, y = symbols('x, y')
>>> eqs_coeff = [{x:ZZ(1), y:ZZ(1)}, {x:ZZ(1), y:ZZ(-1)}]
>>> eqs_rhs = [ZZ(0), ZZ(-1)]
>>> eqs_to_matrix(eqs_coeff, eqs_rhs, [x, y], ZZ)
DomainMatrix([[1, 1, 0], [1, -1, 1]], (2, 3), ZZ)
See also
========
solve_lin_sys: Uses :func:`~eqs_to_matrix` internally
"""
sym2index = {x: n for n, x in enumerate(gens)}
nrows = len(eqs_coeffs)
ncols = len(gens) + 1
rows = [[domain.zero] * ncols for _ in range(nrows)]
for row, eq_coeff, eq_rhs in zip(rows, eqs_coeffs, eqs_rhs):
for sym, coeff in eq_coeff.items():
row[sym2index[sym]] = domain.convert(coeff)
row[-1] = -domain.convert(eq_rhs)
return DomainMatrix(rows, (nrows, ncols), domain)
def sympy_eqs_to_ring(eqs, symbols):
"""Convert a system of equations from Expr to a PolyRing
Explanation
===========
High-level functions like ``solve`` expect Expr as inputs but can use
``solve_lin_sys`` internally. This function converts equations from
``Expr`` to the low-level poly types used by the ``solve_lin_sys``
function.
Parameters
==========
eqs: List of Expr
A list of equations as Expr instances
symbols: List of Symbol
A list of the symbols that are the unknowns in the system of
equations.
Returns
=======
Tuple[List[PolyElement], Ring]: The equations as PolyElement instances
and the ring of polynomials within which each equation is represented.
Examples
========
>>> from sympy import symbols
>>> from sympy.polys.solvers import sympy_eqs_to_ring
>>> a, x, y = symbols('a, x, y')
>>> eqs = [x-y, x+a*y]
>>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y])
>>> eqs_ring
[x - y, x + a*y]
>>> type(eqs_ring[0])
<class 'sympy.polys.rings.PolyElement'>
>>> ring
ZZ(a)[x,y]
With the equations in this form they can be passed to ``solve_lin_sys``:
>>> from sympy.polys.solvers import solve_lin_sys
>>> solve_lin_sys(eqs_ring, ring)
{y: 0, x: 0}
"""
try:
K, eqs_K = sring(eqs, symbols, field=True, extension=True)
except NotInvertible:
# https://github.com/sympy/sympy/issues/18874
K, eqs_K = sring(eqs, symbols, domain=EX)
return eqs_K, K.to_domain()
def solve_lin_sys(eqs, ring, _raw=True):
"""Solve a system of linear equations from a PolynomialRing
Explanation
===========
Solves a system of linear equations given as PolyElement instances of a
PolynomialRing. The basic arithmetic is carried out using instance of
DomainElement which is more efficient than :class:`~sympy.core.expr.Expr`
for the most common inputs.
While this is a public function it is intended primarily for internal use
so its interface is not necessarily convenient. Users are suggested to use
the :func:`sympy.solvers.solveset.linsolve` function (which uses this
function internally) instead.
Parameters
==========
eqs: list[PolyElement]
The linear equations to be solved as elements of a
PolynomialRing (assumed equal to zero).
ring: PolynomialRing
The polynomial ring from which eqs are drawn. The generators of this
ring are the unknowns to be solved for and the domain of the ring is
the domain of the coefficients of the system of equations.
_raw: bool
If *_raw* is False, the keys and values in the returned dictionary
will be of type Expr (and the unit of the field will be removed from
the keys) otherwise the low-level polys types will be returned, e.g.
PolyElement: PythonRational.
Returns
=======
``None`` if the system has no solution.
dict[Symbol, Expr] if _raw=False
dict[Symbol, DomainElement] if _raw=True.
Examples
========
>>> from sympy import symbols
>>> from sympy.polys.solvers import solve_lin_sys, sympy_eqs_to_ring
>>> x, y = symbols('x, y')
>>> eqs = [x - y, x + y - 2]
>>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y])
>>> solve_lin_sys(eqs_ring, ring)
{y: 1, x: 1}
Passing ``_raw=False`` returns the same result except that the keys are
``Expr`` rather than low-level poly types.
>>> solve_lin_sys(eqs_ring, ring, _raw=False)
{x: 1, y: 1}
See also
========
sympy_eqs_to_ring: prepares the inputs to ``solve_lin_sys``.
linsolve: ``linsolve`` uses ``solve_lin_sys`` internally.
sympy.solvers.solvers.solve: ``solve`` uses ``solve_lin_sys`` internally.
"""
as_expr = not _raw
assert ring.domain.is_Field
eqs_dict = [dict(eq) for eq in eqs]
one_monom = ring.one.monoms()[0]
zero = ring.domain.zero
eqs_rhs = []
eqs_coeffs = []
for eq_dict in eqs_dict:
eq_rhs = eq_dict.pop(one_monom, zero)
eq_coeffs = {}
for monom, coeff in eq_dict.items():
if sum(monom) != 1:
msg = "Nonlinear term encountered in solve_lin_sys"
raise PolyNonlinearError(msg)
eq_coeffs[ring.gens[monom.index(1)]] = coeff
if not eq_coeffs:
if not eq_rhs:
continue
else:
return None
eqs_rhs.append(eq_rhs)
eqs_coeffs.append(eq_coeffs)
result = _solve_lin_sys(eqs_coeffs, eqs_rhs, ring)
if result is not None and as_expr:
def to_sympy(x):
as_expr = getattr(x, 'as_expr', None)
if as_expr:
return as_expr()
else:
return ring.domain.to_sympy(x)
tresult = {to_sympy(sym): to_sympy(val) for sym, val in result.items()}
# Remove 1.0x
result = {}
for k, v in tresult.items():
if k.is_Mul:
c, s = k.as_coeff_Mul()
result[s] = v/c
else:
result[k] = v
return result
def _solve_lin_sys(eqs_coeffs, eqs_rhs, ring):
"""Solve a linear system from dict of PolynomialRing coefficients
Explanation
===========
This is an **internal** function used by :func:`solve_lin_sys` after the
equations have been preprocessed. The role of this function is to split
the system into connected components and pass those to
:func:`_solve_lin_sys_component`.
Examples
========
Setup a system for $x-y=0$ and $x+y=2$ and solve:
>>> from sympy import symbols, sring
>>> from sympy.polys.solvers import _solve_lin_sys
>>> x, y = symbols('x, y')
>>> R, (xr, yr) = sring([x, y], [x, y])
>>> eqs = [{xr:R.one, yr:-R.one}, {xr:R.one, yr:R.one}]
>>> eqs_rhs = [R.zero, -2*R.one]
>>> _solve_lin_sys(eqs, eqs_rhs, R)
{y: 1, x: 1}
See also
========
solve_lin_sys: This function is used internally by :func:`solve_lin_sys`.
"""
V = ring.gens
E = []
for eq_coeffs in eqs_coeffs:
syms = list(eq_coeffs)
E.extend(zip(syms[:-1], syms[1:]))
G = V, E
components = connected_components(G)
sym2comp = {}
for n, component in enumerate(components):
for sym in component:
sym2comp[sym] = n
subsystems = [([], []) for _ in range(len(components))]
for eq_coeff, eq_rhs in zip(eqs_coeffs, eqs_rhs):
sym = next(iter(eq_coeff), None)
sub_coeff, sub_rhs = subsystems[sym2comp[sym]]
sub_coeff.append(eq_coeff)
sub_rhs.append(eq_rhs)
sol = {}
for subsystem in subsystems:
subsol = _solve_lin_sys_component(subsystem[0], subsystem[1], ring)
if subsol is None:
return None
sol.update(subsol)
return sol
def _solve_lin_sys_component(eqs_coeffs, eqs_rhs, ring):
"""Solve a linear system from dict of PolynomialRing coefficients
Explanation
===========
This is an **internal** function used by :func:`solve_lin_sys` after the
equations have been preprocessed. After :func:`_solve_lin_sys` splits the
system into connected components this function is called for each
component. The system of equations is solved using Gauss-Jordan
elimination with division followed by back-substitution.
Examples
========
Setup a system for $x-y=0$ and $x+y=2$ and solve:
>>> from sympy import symbols, sring
>>> from sympy.polys.solvers import _solve_lin_sys_component
>>> x, y = symbols('x, y')
>>> R, (xr, yr) = sring([x, y], [x, y])
>>> eqs = [{xr:R.one, yr:-R.one}, {xr:R.one, yr:R.one}]
>>> eqs_rhs = [R.zero, -2*R.one]
>>> _solve_lin_sys_component(eqs, eqs_rhs, R)
{y: 1, x: 1}
See also
========
solve_lin_sys: This function is used internally by :func:`solve_lin_sys`.
"""
# transform from equations to matrix form
matrix = eqs_to_matrix(eqs_coeffs, eqs_rhs, ring.gens, ring.domain)
# convert to a field for rref
if not matrix.domain.is_Field:
matrix = matrix.to_field()
# solve by row-reduction
echelon, pivots = matrix.rref()
# construct the returnable form of the solutions
keys = ring.gens
if pivots and pivots[-1] == len(keys):
return None
if len(pivots) == len(keys):
sol = []
for s in [row[-1] for row in echelon.rep.to_ddm()]:
a = s
sol.append(a)
sols = dict(zip(keys, sol))
else:
sols = {}
g = ring.gens
# Extract ground domain coefficients and convert to the ring:
if hasattr(ring, 'ring'):
convert = ring.ring.ground_new
else:
convert = ring.ground_new
echelon = echelon.rep.to_ddm()
vals_set = {v for row in echelon for v in row}
vals_map = {v: convert(v) for v in vals_set}
echelon = [[vals_map[eij] for eij in ei] for ei in echelon]
for i, p in enumerate(pivots):
v = echelon[i][-1] - sum(echelon[i][j]*g[j] for j in range(p+1, len(g)) if echelon[i][j])
sols[keys[p]] = v
return sols
| RawMatrix |
python | getsentry__sentry | src/sentry/seer/explorer/client_models.py | {
"start": 183,
"end": 324
} | class ____(BaseModel):
"""A tool call in a message."""
function: str
args: str
class Config:
extra = "allow"
| ToolCall |
python | Netflix__metaflow | metaflow/_vendor/click/parser.py | {
"start": 3791,
"end": 5339
} | class ____(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError("Invalid start character for option ({})".format(opt))
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = "store"
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ("store", "append")
def process(self, value, state):
if self.action == "store":
state.opts[self.dest] = value
elif self.action == "store_const":
state.opts[self.dest] = self.const
elif self.action == "append":
state.opts.setdefault(self.dest, []).append(value)
elif self.action == "append_const":
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == "count":
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError("unknown action '{}'".format(self.action))
state.order.append(self.obj)
| Option |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/selected/_marker.py | {
"start": 233,
"end": 3620
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl.selected"
_path_str = "scatterpolargl.selected.marker"
_valid_props = {"color", "opacity", "size"}
@property
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def size(self):
"""
Sets the marker size of selected points.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.selected.Marker`
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.selected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._set_property("size", arg, size)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/unitofwork.py | {
"start": 17349,
"end": 19026
} | class ____(_IterateMappersMixin):
__slots__ = (
"dependency_processor",
"fromparent",
"processed",
"setup_flush_actions",
)
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if delete_states or save_states:
if not self.setup_flush_actions and (
self.dependency_processor.prop_has_changes(
uow, delete_states, True
)
or self.dependency_processor.prop_has_changes(
uow, save_states, False
)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
| _Preprocess |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_group_autofix_setup_check.py | {
"start": 8339,
"end": 13307
} | class ____(APITestCase, SnubaTestCase):
def test_missing_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.organization_integration.delete()
group = self.create_group()
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/autofix/setup/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["integration"] == {
"ok": False,
"reason": "integration_missing",
}
@patch(
"sentry.seer.endpoints.group_autofix_setup_check.get_repos_and_access",
return_value=[
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
"ok": False,
},
{
"provider": "github",
"owner": "getsentry",
"name": "sentry",
"external_id": "234",
"ok": True,
},
],
)
def test_repo_write_access_not_ready(self, mock_get_repos_and_access: MagicMock) -> None:
group = self.create_group()
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/autofix/setup/?check_write_access=true"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["githubWriteIntegration"] == {
"ok": False,
"repos": [
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
"ok": False,
},
{
"provider": "github",
"owner": "getsentry",
"name": "sentry",
"external_id": "234",
"ok": True,
},
],
}
@patch(
"sentry.seer.endpoints.group_autofix_setup_check.get_repos_and_access",
return_value=[],
)
def test_repo_write_access_no_repos(self, mock_get_repos_and_access: MagicMock) -> None:
group = self.create_group()
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/autofix/setup/?check_write_access=true"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["githubWriteIntegration"] == {
"ok": False,
"repos": [],
}
@patch("sentry.seer.endpoints.group_autofix_setup_check.requests.post")
@patch(
"sentry.seer.endpoints.group_autofix_setup_check.get_autofix_repos_from_project_code_mappings",
return_value=[
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
}
],
)
def test_non_github_provider(self, mock_get_repos: MagicMock, mock_post: MagicMock) -> None:
# Mock the response from the Seer service
mock_response = mock_post.return_value
mock_response.json.return_value = {"has_access": True}
group = self.create_group()
result = get_repos_and_access(self.project, group.id)
# Verify the result
assert result == [
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
"ok": True,
}
]
# Verify the API call was made correctly
mock_post.assert_called_once()
call_kwargs = mock_post.call_args.kwargs
assert "data" in call_kwargs
assert "headers" in call_kwargs
assert "content-type" in call_kwargs["headers"]
@patch("sentry.seer.endpoints.group_autofix_setup_check.requests.post")
@patch(
"sentry.seer.endpoints.group_autofix_setup_check.get_autofix_repos_from_project_code_mappings",
return_value=[
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
}
],
)
def test_repo_without_access(self, mock_get_repos: MagicMock, mock_post: MagicMock) -> None:
# Mock the response to indicate no access
mock_response = mock_post.return_value
mock_response.json.return_value = {"has_access": False}
group = self.create_group()
result = get_repos_and_access(self.project, group.id)
assert result == [
{
"provider": "github",
"owner": "getsentry",
"name": "seer",
"external_id": "123",
"ok": False,
}
]
| GroupAIAutofixEndpointFailureTest |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 17709,
"end": 18985
} | class ____(object):
# StructOfStructsOfStructsT
def __init__(
self,
a = None,
):
self.a = a # type: Optional[StructOfStructsT]
@classmethod
def InitFromBuf(cls, buf, pos):
structOfStructsOfStructs = StructOfStructsOfStructs()
structOfStructsOfStructs.Init(buf, pos)
return cls.InitFromObj(structOfStructsOfStructs)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, structOfStructsOfStructs):
x = StructOfStructsOfStructsT()
x._UnPack(structOfStructsOfStructs)
return x
# StructOfStructsOfStructsT
def _UnPack(self, structOfStructsOfStructs):
if structOfStructsOfStructs is None:
return
if structOfStructsOfStructs.A(StructOfStructs()) is not None:
self.a = StructOfStructsT.InitFromObj(structOfStructsOfStructs.A(StructOfStructs()))
# StructOfStructsOfStructsT
def Pack(self, builder):
return CreateStructOfStructsOfStructs(builder, self.a.a.id, self.a.a.distance, self.a.b.a, self.a.b.b, self.a.c.id, self.a.c.distance)
| StructOfStructsOfStructsT |
python | huggingface__transformers | src/transformers/utils/quantization_config.py | {
"start": 55276,
"end": 56463
} | class ____(QuantizationConfigMixin):
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using `eetq`.
Args:
weights (`str`, *optional*, defaults to `"int8"`):
The target dtype for the weights. Supported value is only "int8"
modules_to_not_convert (`list`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have
some modules left in their original precision.
"""
def __init__(
self,
weights: str = "int8",
modules_to_not_convert: list | None = None,
**kwargs,
):
self.quant_method = QuantizationMethod.EETQ
self.weights = weights
self.modules_to_not_convert = modules_to_not_convert
self.post_init()
def post_init(self):
r"""
Safety checker that arguments are correct
"""
accepted_weights = ["int8"]
if self.weights not in accepted_weights:
raise ValueError(f"Only support weights in {accepted_weights} but found {self.weights}")
| EetqConfig |
python | getsentry__sentry | src/sentry/api/serializers/models/role.py | {
"start": 426,
"end": 628
} | class ____(TypedDict):
id: str
name: str
desc: str
scopes: frozenset[str]
allowed: bool
isAllowed: bool
isRetired: bool
isTeamRolesAllowed: bool
| BaseRoleSerializerResponse |
python | doocs__leetcode | solution/0800-0899/0863.All Nodes Distance K in Binary Tree/Solution.py | {
"start": 164,
"end": 834
} | class ____:
def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:
def dfs(root, fa):
if root is None:
return
g[root] = fa
dfs(root.left, root)
dfs(root.right, root)
def dfs2(root, fa, k):
if root is None:
return
if k == 0:
ans.append(root.val)
return
for nxt in (root.left, root.right, g[root]):
if nxt != fa:
dfs2(nxt, root, k - 1)
g = {}
dfs(root, None)
ans = []
dfs2(target, None, k)
return ans
| Solution |
python | explosion__spaCy | spacy/displacy/render.py | {
"start": 11492,
"end": 20189
} | class ____:
"""Render dependency parses as SVGs."""
style = "dep"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (compact, word_spacing,
arrow_spacing, arrow_width, arrow_stroke, distance, offset_x,
color, bg, font)
"""
self.compact = options.get("compact", False)
self.word_spacing = options.get("word_spacing", 45)
self.arrow_spacing = options.get("arrow_spacing", 12 if self.compact else 20)
self.arrow_width = options.get("arrow_width", 6 if self.compact else 10)
self.arrow_stroke = options.get("arrow_stroke", 2)
self.distance = options.get("distance", 150 if self.compact else 175)
self.offset_x = options.get("offset_x", 50)
self.color = options.get("color", "#000000")
self.bg = options.get("bg", "#ffffff")
self.font = options.get("font", "Arial")
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered SVG or HTML markup.
"""
# Create a random ID prefix to make sure parses don't receive the
# same ID, even if they're identical
id_prefix = uuid.uuid4().hex
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
render_id = f"{id_prefix}-{i}"
svg = self.render_svg(render_id, p["words"], p["arcs"])
if p.get("title"):
svg = TPL_TITLE.format(title=p.get("title")) + svg
rendered.append(svg)
if page:
content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered])
markup = TPL_PAGE.format(
content=content, lang=self.lang, dir=self.direction
)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_svg(
self,
render_id: Union[int, str],
words: List[Dict[str, Any]],
arcs: List[Dict[str, Any]],
) -> str:
"""Render SVG.
render_id (Union[int, str]): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (str): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = max(self.levels.values(), default=0)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words_svg = [
self.render_word(w["text"], w["tag"], w.get("lemma", None), i)
for i, w in enumerate(words)
]
arcs_svg = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words_svg) + "".join(arcs_svg)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
)
def render_word(self, text: str, tag: str, lemma: str, i: int) -> str:
"""Render individual word.
text (str): Word text.
tag (str): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (str): Rendered SVG markup.
"""
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
if lemma is not None:
return TPL_DEP_WORDS_LEMMA.format(
text=html_text, tag=tag, lemma=lemma, x=x, y=y
)
return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
def render_arrow(
self, label: str, start: int, end: int, direction: str, i: int
) -> str:
"""Render individual arrow.
label (str): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (str): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (str): Rendered SVG markup.
"""
if start < 0 or end < 0:
error_args = dict(start=start, end=end, label=label, dir=direction)
raise ValueError(Errors.E157.format(**error_args))
level = self.levels[(start, end, label)]
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and max(self.levels.values(), default=0) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
)
def get_arc(self, x_start: int, y: int, y_curve: int, x_end: int) -> str:
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
def get_arrowhead(self, direction: str, x: int, y: int, end: int) -> str:
"""Render individual arrow head.
direction (str): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
p1, p2, p3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
p1, p2, p3 = (end, end + self.arrow_width - 2, end - self.arrow_width + 2)
return f"M{p1},{y + 2} L{p2},{y - self.arrow_width} {p3},{y - self.arrow_width}"
def get_levels(self, arcs: List[Dict[str, Any]]) -> Dict[Tuple[int, int, str], int]:
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (dict): Arc levels keyed by (start, end, label).
"""
arcs = [dict(t) for t in {tuple(sorted(arc.items())) for arc in arcs}]
length = max([arc["end"] for arc in arcs], default=0)
max_level = [0] * length
levels = {}
for arc in sorted(arcs, key=lambda arc: arc["end"] - arc["start"]):
level = max(max_level[arc["start"] : arc["end"]]) + 1
for i in range(arc["start"], arc["end"]):
max_level[i] = level
levels[(arc["start"], arc["end"], arc["label"])] = level
return levels
| DependencyRenderer |
python | mlflow__mlflow | mlflow/pyspark/ml/__init__.py | {
"start": 20456,
"end": 55641
} | class ____:
"""
This class is designed for holding information which is used by autologging metrics
It will hold information of:
(1) a map of "prediction result object id" to a tuple of dataset name(the dataset is
the one which generate the prediction result) and run_id.
Note: We need this map instead of setting the run_id into the "prediction result object"
because the object maybe a numpy array which does not support additional attribute
assignment.
(2) _log_post_training_metrics_enabled flag, in the following method scope:
`Estimator.fit`, `Model.transform`, `Evaluator.evaluate`,
in order to avoid nested/duplicated autologging metric, when run into these scopes,
we need temporarily disable the metric autologging.
(3) _eval_dataset_info_map, it is a double level map:
`_eval_dataset_info_map[run_id][eval_dataset_var_name]` will get a list, each
element in the list is an id of "eval_dataset" instance.
This data structure is used for:
* generating unique dataset name key when autologging metric. For each eval dataset object,
if they have the same eval_dataset_var_name, but object ids are different,
then they will be assigned different name (via appending index to the
eval_dataset_var_name) when autologging.
(4) _evaluator_call_info, it is a double level map:
`_metric_api_call_info[run_id][metric_name]` will get a list of tuples, each tuple is:
(logged_metric_key, evaluator_information)
Evaluator information includes evaluator class name and params, these information
will also be logged into "metric_info.json" artifacts.
Note: this class is not thread-safe.
Design rule for this class:
Because this class instance is a global instance, in order to prevent memory leak, it should
only holds IDs and other small objects references. This class internal data structure should
avoid reference to user dataset variables or model variables.
"""
def __init__(self):
self._pred_result_id_to_dataset_name_and_run_id = {}
self._eval_dataset_info_map = defaultdict(lambda: defaultdict(list))
self._evaluator_call_info = defaultdict(lambda: defaultdict(list))
self._log_post_training_metrics_enabled = True
self._metric_info_artifact_need_update = defaultdict(lambda: False)
def should_log_post_training_metrics(self):
"""
Check whether we should run patching code for autologging post training metrics.
This checking should surround the whole patched code due to the safe guard checking,
See following note.
Note: It includes checking `_SparkTrainingSession.is_active()`, This is a safe guarding
for meta-estimator (e.g. CrossValidator/TrainValidationSplit) case:
running CrossValidator.fit, the nested `estimator.fit` will be called in parallel,
but, the _autolog_training_status is a global status without thread-safe lock protecting.
This safe guarding will prevent code run into this case.
"""
return not _SparkTrainingSession.is_active() and self._log_post_training_metrics_enabled
def disable_log_post_training_metrics(self):
class LogPostTrainingMetricsDisabledScope:
def __enter__(inner_self):
inner_self.old_status = self._log_post_training_metrics_enabled
self._log_post_training_metrics_enabled = False
def __exit__(inner_self, exc_type, exc_val, exc_tb):
self._log_post_training_metrics_enabled = inner_self.old_status
return LogPostTrainingMetricsDisabledScope()
@staticmethod
def get_run_id_for_model(model):
return getattr(model, "_mlflow_run_id", None)
@staticmethod
def is_metric_value_loggable(metric_value):
"""
check whether the specified `metric_value` is a numeric value which can be logged
as an MLflow metric.
"""
return isinstance(metric_value, (int, float, np.number)) and not isinstance(
metric_value, bool
)
def register_model(self, model, run_id):
"""
In `patched_fit`, we need register the model with the run_id used in `patched_fit`
So that in following metric autologging, the metric will be logged into the registered
run_id
"""
model._mlflow_run_id = run_id
@staticmethod
def gen_name_with_index(name, index):
assert index >= 0
if index == 0:
return name
else:
# Use '-' as the separator between name and index,
# The '-' is not valid character in python var name
# so it can prevent name conflicts after appending index.
return f"{name}-{index + 1}"
def register_prediction_input_dataset(self, model, eval_dataset):
"""
Register prediction input dataset into eval_dataset_info_map, it will do:
1. inspect eval dataset var name.
2. check whether eval_dataset_info_map already registered this eval dataset.
will check by object id.
3. register eval dataset with id.
4. return eval dataset name with index.
Note: this method include inspecting argument variable name.
So should be called directly from the "patched method", to ensure it capture
correct argument variable name.
"""
eval_dataset_name = _inspect_original_var_name(
eval_dataset, fallback_name="unknown_dataset"
)
eval_dataset_id = id(eval_dataset)
run_id = self.get_run_id_for_model(model)
registered_dataset_list = self._eval_dataset_info_map[run_id][eval_dataset_name]
for i, id_i in enumerate(registered_dataset_list):
if eval_dataset_id == id_i:
index = i
break
else:
index = len(registered_dataset_list)
if index == len(registered_dataset_list):
# register new eval dataset
registered_dataset_list.append(eval_dataset_id)
return self.gen_name_with_index(eval_dataset_name, index)
def register_prediction_result(self, run_id, eval_dataset_name, predict_result):
"""
Register the relationship
id(prediction_result) --> (eval_dataset_name, run_id)
into map `_pred_result_id_to_dataset_name_and_run_id`
"""
value = (eval_dataset_name, run_id)
prediction_result_id = id(predict_result)
self._pred_result_id_to_dataset_name_and_run_id[prediction_result_id] = value
def clean_id(id_):
_AUTOLOGGING_METRICS_MANAGER._pred_result_id_to_dataset_name_and_run_id.pop(id_, None)
# When the `predict_result` object being GCed, its ID may be reused, so register a finalizer
# to clear the ID from the dict for preventing wrong ID mapping.
weakref.finalize(predict_result, clean_id, prediction_result_id)
def get_run_id_and_dataset_name_for_evaluator_call(self, pred_result_dataset):
"""
Given a registered prediction result dataset object,
return a tuple of (run_id, eval_dataset_name)
"""
if id(pred_result_dataset) in self._pred_result_id_to_dataset_name_and_run_id:
dataset_name, run_id = self._pred_result_id_to_dataset_name_and_run_id[
id(pred_result_dataset)
]
return run_id, dataset_name
else:
return None, None
def gen_evaluator_info(self, evaluator):
"""
Generate evaluator information, include evaluator class name and params.
"""
class_name = _get_fully_qualified_class_name(evaluator)
param_map = _truncate_dict(
_get_param_map(evaluator), MAX_ENTITY_KEY_LENGTH, MAX_PARAM_VAL_LENGTH
)
return {"evaluator_class": class_name, "params": param_map}
def register_evaluator_call(self, run_id, metric_name, dataset_name, evaluator_info):
"""
Register the `Evaluator.evaluate` call, including register the evaluator information
(See doc of `gen_evaluator_info` method) into the corresponding run_id and metric_name
entry in the registry table.
"""
evaluator_call_info_list = self._evaluator_call_info[run_id][metric_name]
index = len(evaluator_call_info_list)
metric_name_with_index = self.gen_name_with_index(metric_name, index)
metric_key = f"{metric_name_with_index}_{dataset_name}"
evaluator_call_info_list.append((metric_key, evaluator_info))
# Set the flag to true, represent the metric info in this run need update.
# Later when `log_eval_metric` called, it will generate a new metric_info artifact
# and overwrite the old artifact.
self._metric_info_artifact_need_update[run_id] = True
return metric_key
def log_post_training_metric(self, run_id, key, value):
"""
Log the metric into the specified mlflow run.
and it will also update the metric_info artifact if needed.
"""
# Note: if the case log the same metric key multiple times,
# newer value will overwrite old value
client = MlflowClient()
client.log_metric(run_id=run_id, key=key, value=value)
if self._metric_info_artifact_need_update[run_id]:
evaluator_call_list = []
for v in self._evaluator_call_info[run_id].values():
evaluator_call_list.extend(v)
evaluator_call_list.sort(key=lambda x: x[0])
dict_to_log = OrderedDict(evaluator_call_list)
client.log_dict(run_id=run_id, dictionary=dict_to_log, artifact_file="metric_info.json")
self._metric_info_artifact_need_update[run_id] = False
# The global `_AutologgingMetricsManager` instance which holds information used in
# post-training metric autologging. See doc of class `_AutologgingMetricsManager` for details.
_AUTOLOGGING_METRICS_MANAGER = _AutologgingMetricsManager()
def _get_columns_with_unsupported_data_type(df):
from pyspark.ml.linalg import VectorUDT
from mlflow.types.schema import DataType
supported_spark_types = DataType.get_spark_types()
return [
field
for field in df.schema.fields
if (field.dataType not in supported_spark_types)
and not isinstance(field.dataType, VectorUDT)
]
def _check_or_set_model_prediction_column(spark_model, input_spark_df):
from pyspark.ml import PipelineModel
prediction_column = "prediction"
if isinstance(spark_model, PipelineModel) and spark_model.stages[-1].hasParam("outputCol"):
from mlflow.utils._spark_utils import _get_active_spark_session
spark = _get_active_spark_session()
# do a transform with an empty input DataFrame
# to get the schema of the transformed DataFrame
transformed_df = spark_model.transform(spark.createDataFrame([], input_spark_df.schema))
# Ensure prediction column doesn't already exist
if prediction_column not in transformed_df.columns:
# make sure predict work by default for Transformers
spark_model.stages[-1].setOutputCol(prediction_column)
return prediction_column
def _infer_spark_model_signature(spark_model, input_example_spark_df):
from mlflow.models import infer_signature
prediction_column = _check_or_set_model_prediction_column(spark_model, input_example_spark_df)
model_output = spark_model.transform(input_example_spark_df).select(prediction_column)
# TODO: Remove this once we support non-scalar spark data types
if unsupported_columns := _get_columns_with_unsupported_data_type(model_output):
_logger.warning(
"Model outputs contain unsupported Spark data types: "
f"{unsupported_columns}. Output schema is not be logged."
)
model_output = None
signature = infer_signature(input_example_spark_df, model_output)
if signature.outputs:
# We only have one prediction column output,
# convert it to unnamed output schema to keep consistent with old MLflow version.
signature.outputs.inputs[0].name = None
return signature
@autologging_integration(AUTOLOGGING_INTEGRATION_NAME)
def autolog(
log_models=True,
log_datasets=True,
disable=False,
exclusive=False,
disable_for_unsupported_versions=False,
silent=False,
log_post_training_metrics=True,
registered_model_name=None,
log_input_examples=False,
log_model_signatures=True,
log_model_allowlist=None,
extra_tags=None,
):
"""
Enables (or disables) and configures autologging for pyspark ml estimators.
This method is not threadsafe.
This API requires Spark 3.0 or above.
**When is autologging performed?**
Autologging is performed when you call ``Estimator.fit`` except for estimators (featurizers)
under ``pyspark.ml.feature``.
**Logged information**
**Parameters**
- Parameters obtained by ``estimator.params``. If a param value is also an ``Estimator``,
then params in the the wrapped estimator will also be logged, the nested param key
will be `{estimator_uid}.{param_name}`
**Tags**
- An estimator class name (e.g. "LinearRegression").
- A fully qualified estimator class name
(e.g. "pyspark.ml.regression.LinearRegression").
.. _post training metrics:
**Post training metrics**
When users call evaluator APIs after model training, MLflow tries to capture the
`Evaluator.evaluate` results and log them as MLflow metrics to the Run associated with
the model. All pyspark ML evaluators are supported.
For post training metrics autologging, the metric key format is:
"{metric_name}[-{call_index}]_{dataset_name}"
- The metric name is the name returned by `Evaluator.getMetricName()`
- If multiple calls are made to the same pyspark ML evaluator metric, each subsequent call
adds a "call_index" (starting from 2) to the metric key.
- MLflow uses the prediction input dataset variable name as the "dataset_name" in the
metric key. The "prediction input dataset variable" refers to the variable which was
used as the `dataset` argument of `model.transform` call.
Note: MLflow captures the "prediction input dataset" instance in the outermost call
frame and fetches the variable name in the outermost call frame. If the "prediction
input dataset" instance is an intermediate expression without a defined variable
name, the dataset name is set to "unknown_dataset". If multiple "prediction input
dataset" instances have the same variable name, then subsequent ones will append an
index (starting from 2) to the inspected dataset name.
**Limitations**
- MLflow cannot find run information for other objects derived from a given prediction
result (e.g. by doing some transformation on the prediction result dataset).
**Artifacts**
- An MLflow Model with the :py:mod:`mlflow.spark` flavor containing a fitted estimator
(logged by :py:func:`mlflow.spark.log_model()`). Note that large models may not be
autologged for performance and storage space considerations, and autologging for
Pipelines and hyperparameter tuning meta-estimators (e.g. CrossValidator) is not yet
supported.
See ``log_models`` param below for details.
- For post training metrics API calls, a "metric_info.json" artifact is logged. This is a
JSON object whose keys are MLflow post training metric names
(see "Post training metrics" section for the key format) and whose values are the
corresponding evaluator information, including evaluator class name and evaluator params.
**How does autologging work for meta estimators?**
When a meta estimator (e.g. `Pipeline`_, `CrossValidator`_, `TrainValidationSplit`_,
`OneVsRest`_)
calls ``fit()``, it internally calls ``fit()`` on its child estimators. Autologging
does NOT perform logging on these constituent ``fit()`` calls.
A "estimator_info.json" artifact is logged, which includes a `hierarchy` entry
describing the hierarchy of the meta estimator. The hierarchy includes expanded
entries for all nested stages, such as nested pipeline stages.
**Parameter search**
In addition to recording the information discussed above, autologging for parameter
search meta estimators (`CrossValidator`_ and `TrainValidationSplit`_) records child runs
with metrics for each set of explored parameters, as well as artifacts and parameters
for the best model and the best parameters (if available).
For better readability, the "estimatorParamMaps" param in parameter search estimator
will be recorded inside "estimator_info" artifact, see following description.
Inside "estimator_info.json" artifact, in addition to the "hierarchy", records 2 more
items: "tuning_parameter_map_list": a list contains all parameter maps used in tuning,
and "tuned_estimator_parameter_map": the parameter map of the tuned estimator.
Records a "best_parameters.json" artifacts, contains the best parameter it searched out.
Records a "search_results.csv" artifacts, contains search results, it is a table with
2 columns: "params" and "metric".
.. _OneVsRest:
https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.classification.OneVsRest.html#pyspark.ml.classification.OneVsRest
.. _Pipeline:
https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.Pipeline.html#pyspark.ml.Pipeline
.. _CrossValidator:
https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.tuning.CrossValidator.html#pyspark.ml.tuning.CrossValidator
.. _TrainValidationSplit:
https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.tuning.TrainValidationSplit.html#pyspark.ml.tuning.TrainValidationSplit
Args:
log_models: If ``True``, if trained models are in allowlist, they are logged as MLflow
model artifacts. If ``False``, trained models are not logged.
Note: the built-in allowlist excludes some models (e.g. ALS models) which
can be large. To specify a custom allowlist, create a file containing a
newline-delimited list of fully-qualified estimator classnames, and set
the "spark.mlflow.pysparkml.autolog.logModelAllowlistFile" Spark config
to the path of your allowlist file.
log_datasets: If ``True``, dataset information is logged to MLflow Tracking.
If ``False``, dataset information is not logged.
disable: If ``True``, disables the PySpark ML autologging integration. If ``False``,
enables the pyspark ML autologging integration.
exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
If ``False``, autologged content is logged to the active fluent run,
which may be user-created.
disable_for_unsupported_versions: If ``True``, disable autologging for versions of
pyspark that have not been tested against this version of the MLflow
client or are incompatible.
silent: If ``True``, suppress all event logs and warnings from MLflow during pyspark ML
autologging. If ``False``, show all events and warnings during pyspark ML
autologging.
log_post_training_metrics: If ``True``, post training metrics are logged. Defaults to
``True``. See the `post training metrics`_ section for more
details.
registered_model_name: If given, each time a model is trained, it is registered as a
new model version of the registered model with this name.
The registered model is created if it does not already exist.
log_input_examples: If ``True``, input examples from training datasets are collected and
logged along with pyspark ml model artifacts during training. If
``False``, input examples are not logged.
log_model_signatures: If ``True``,
:py:class:`ModelSignatures <mlflow.models.ModelSignature>`
describing model inputs and outputs are collected and logged along
with spark ml pipeline/estimator artifacts during training.
If ``False`` signatures are not logged.
.. warning::
Currently, only scalar Spark data types are supported. If
model inputs/outputs contain non-scalar Spark data types such
as ``pyspark.ml.linalg.Vector``, signatures are not logged.
log_model_allowlist: If given, it overrides the default log model allowlist in mlflow.
This takes precedence over the spark configuration of
"spark.mlflow.pysparkml.autolog.logModelAllowlistFile".
**The default log model allowlist in mlflow**
.. literalinclude:: ../../../../mlflow/pyspark/ml/log_model_allowlist.txt
:language: text
extra_tags: A dictionary of extra tags to set on each managed run created by autologging.
"""
from pyspark.ml.base import Estimator, Model
from pyspark.ml.evaluation import Evaluator
from mlflow.tracking.context import registry as context_registry
global _log_model_allowlist
if log_model_allowlist:
_log_model_allowlist = {model.strip() for model in log_model_allowlist}
else:
_log_model_allowlist = _read_log_model_allowlist()
def _log_pretraining_metadata(estimator, params, input_df):
if params and isinstance(params, dict):
estimator = estimator.copy(params)
autologging_metadata = _gen_estimator_metadata(estimator)
artifact_dict = {}
param_map = _get_instance_param_map(estimator, autologging_metadata.uid_to_indexed_name_map)
if _should_log_hierarchy(estimator):
artifact_dict["hierarchy"] = autologging_metadata.hierarchy
for param_search_estimator in autologging_metadata.param_search_estimators:
param_search_estimator_name = (
f"{autologging_metadata.uid_to_indexed_name_map[param_search_estimator.uid]}"
)
artifact_dict[param_search_estimator_name] = {}
artifact_dict[param_search_estimator_name]["tuning_parameter_map_list"] = (
_get_tuning_param_maps(
param_search_estimator, autologging_metadata.uid_to_indexed_name_map
)
)
artifact_dict[param_search_estimator_name]["tuned_estimator_parameter_map"] = (
_get_instance_param_map_recursively(
param_search_estimator.getEstimator(),
1,
autologging_metadata.uid_to_indexed_name_map,
)
)
if artifact_dict:
mlflow.log_dict(artifact_dict, artifact_file="estimator_info.json")
_log_estimator_params(param_map)
mlflow.set_tags(_get_estimator_info_tags(estimator))
if log_datasets:
try:
context_tags = context_registry.resolve_tags()
code_source = CodeDatasetSource(context_tags)
dataset = SparkDataset(
df=input_df,
source=code_source,
)
mlflow.log_input(dataset, "train")
except Exception as e:
_logger.warning(
"Failed to log training dataset information to MLflow Tracking. Reason: %s", e
)
def _log_posttraining_metadata(estimator, spark_model, params, input_df):
if _is_parameter_search_estimator(estimator):
try:
# Fetch environment-specific tags (e.g., user and source) to ensure that lineage
# information is consistent with the parent run
child_tags = context_registry.resolve_tags()
child_tags.update({MLFLOW_AUTOLOGGING: AUTOLOGGING_INTEGRATION_NAME})
_create_child_runs_for_parameter_search(
parent_estimator=estimator,
parent_model=spark_model,
parent_run=mlflow.active_run(),
child_tags=child_tags,
)
except Exception:
msg = (
"Encountered exception during creation of child runs for parameter search."
f" Child runs may be missing. Exception: {traceback.format_exc()}"
)
_logger.warning(msg)
estimator_param_maps = _get_tuning_param_maps(
estimator, estimator._autologging_metadata.uid_to_indexed_name_map
)
metrics_dict, best_index = _get_param_search_metrics_and_best_index(
estimator, spark_model
)
_log_parameter_search_results_as_artifact(
estimator_param_maps, metrics_dict, mlflow.active_run().info.run_id
)
# Log best_param_map as JSON artifact
best_param_map = estimator_param_maps[best_index]
mlflow.log_dict(best_param_map, artifact_file="best_parameters.json")
# Log best_param_map as autologging parameters as well
_log_estimator_params(
{
f"best_{param_name}": param_value
for param_name, param_value in best_param_map.items()
}
)
if log_models:
if _should_log_model(spark_model):
from mlflow.pyspark.ml._autolog import (
cast_spark_df_with_vector_to_array,
get_feature_cols,
)
def _get_input_example_spark_df():
feature_cols = list(get_feature_cols(input_df, spark_model))
return input_df.select(feature_cols).limit(INPUT_EXAMPLE_SAMPLE_ROWS)
def _infer_model_signature(input_example_slice):
return _infer_spark_model_signature(spark_model, input_example_slice)
# TODO: Remove this once we support non-scalar spark data types
nonlocal log_model_signatures
if log_model_signatures:
if unsupported_columns := _get_columns_with_unsupported_data_type(input_df):
_logger.warning(
"Model inputs contain unsupported Spark data types: "
f"{unsupported_columns}. Model signature is not logged."
)
log_model_signatures = False
# `_infer_spark_model_signature` mutates the model. Copy the model to preserve the
# original model.
try:
spark_model = spark_model.copy()
except Exception:
_logger.debug(
"Failed to copy the model, using the original model.", exc_info=True
)
input_example_spark_df, signature = resolve_input_example_and_signature(
_get_input_example_spark_df,
_infer_model_signature,
log_input_examples,
log_model_signatures,
_logger,
)
if input_example_spark_df:
input_example = cast_spark_df_with_vector_to_array(
input_example_spark_df
).toPandas()
else:
input_example = None
mlflow.spark.log_model(
spark_model,
"model",
registered_model_name=registered_model_name,
input_example=input_example,
signature=signature,
)
if _is_parameter_search_model(spark_model):
mlflow.spark.log_model(
spark_model.bestModel,
"best_model",
)
else:
_logger.warning(_get_warning_msg_for_skip_log_model(spark_model))
def fit_mlflow(original, self, *args, **kwargs):
params = get_method_call_arg_value(1, "params", None, args, kwargs)
# Do not perform autologging on direct calls to fit() for featurizers.
# Note that featurizers will be autologged when they're fit as part of a Pipeline.
if _get_fully_qualified_class_name(self).startswith("pyspark.ml.feature."):
return original(self, *args, **kwargs)
elif isinstance(params, (list, tuple)):
# skip the case params is a list or tuple, this case it will call
# fitMultiple and return a model iterator
_logger.warning(_get_warning_msg_for_fit_call_with_a_list_of_params(self))
return original(self, *args, **kwargs)
else:
# we need generate estimator param map so we call `self.copy(params)` to construct
# an estimator with the extra params.
from pyspark.storagelevel import StorageLevel
estimator = self.copy(params) if params is not None else self
input_training_df = args[0].persist(StorageLevel.MEMORY_AND_DISK)
_log_pretraining_metadata(estimator, params, input_training_df)
spark_model = original(self, *args, **kwargs)
_log_posttraining_metadata(estimator, spark_model, params, input_training_df)
input_training_df.unpersist()
return spark_model
def patched_fit(original, self, *args, **kwargs):
should_log_post_training_metrics = (
log_post_training_metrics
and _AUTOLOGGING_METRICS_MANAGER.should_log_post_training_metrics()
)
with _SparkTrainingSession(estimator=self, allow_children=False) as t:
if t.should_log():
with _AUTOLOGGING_METRICS_MANAGER.disable_log_post_training_metrics():
fit_result = fit_mlflow(original, self, *args, **kwargs)
# In some cases the `fit_result` may be an iterator of spark models.
if should_log_post_training_metrics and isinstance(fit_result, Model):
_AUTOLOGGING_METRICS_MANAGER.register_model(
fit_result, mlflow.active_run().info.run_id
)
return fit_result
else:
return original(self, *args, **kwargs)
def patched_transform(original, self, *args, **kwargs):
run_id = _AUTOLOGGING_METRICS_MANAGER.get_run_id_for_model(self)
if _AUTOLOGGING_METRICS_MANAGER.should_log_post_training_metrics() and run_id:
predict_result = original(self, *args, **kwargs)
eval_dataset = get_method_call_arg_value(0, "dataset", None, args, kwargs)
eval_dataset_name = _AUTOLOGGING_METRICS_MANAGER.register_prediction_input_dataset(
self, eval_dataset
)
_AUTOLOGGING_METRICS_MANAGER.register_prediction_result(
run_id, eval_dataset_name, predict_result
)
return predict_result
else:
return original(self, *args, **kwargs)
def patched_evaluate(original, self, *args, **kwargs):
if _AUTOLOGGING_METRICS_MANAGER.should_log_post_training_metrics():
with _AUTOLOGGING_METRICS_MANAGER.disable_log_post_training_metrics():
metric = original(self, *args, **kwargs)
if _AUTOLOGGING_METRICS_MANAGER.is_metric_value_loggable(metric):
params = get_method_call_arg_value(1, "params", None, args, kwargs)
# we need generate evaluator param map so we call `self.copy(params)` to construct
# an evaluator with the extra evaluation params.
evaluator = self.copy(params) if params is not None else self
metric_name = evaluator.getMetricName()
evaluator_info = _AUTOLOGGING_METRICS_MANAGER.gen_evaluator_info(evaluator)
pred_result_dataset = get_method_call_arg_value(0, "dataset", None, args, kwargs)
(
run_id,
dataset_name,
) = _AUTOLOGGING_METRICS_MANAGER.get_run_id_and_dataset_name_for_evaluator_call(
pred_result_dataset
)
if run_id and dataset_name:
metric_key = _AUTOLOGGING_METRICS_MANAGER.register_evaluator_call(
run_id, metric_name, dataset_name, evaluator_info
)
_AUTOLOGGING_METRICS_MANAGER.log_post_training_metric(
run_id, metric_key, metric
)
if log_datasets:
try:
context_tags = context_registry.resolve_tags()
code_source = CodeDatasetSource(context_tags)
dataset = SparkDataset(
df=pred_result_dataset,
source=code_source,
)
tags = [InputTag(key=MLFLOW_DATASET_CONTEXT, value="eval")]
dataset_input = DatasetInput(
dataset=dataset._to_mlflow_entity(), tags=tags
)
client = MlflowClient()
client.log_inputs(run_id, [dataset_input])
except Exception as e:
_logger.warning(
"Failed to log evaluation dataset information to MLflow Tracking. "
"Reason: %s",
e,
)
return metric
else:
return original(self, *args, **kwargs)
safe_patch(
AUTOLOGGING_INTEGRATION_NAME,
Estimator,
"fit",
patched_fit,
manage_run=True,
extra_tags=extra_tags,
)
if log_post_training_metrics:
safe_patch(
AUTOLOGGING_INTEGRATION_NAME,
Model,
"transform",
patched_transform,
manage_run=False,
)
safe_patch(
AUTOLOGGING_INTEGRATION_NAME,
Evaluator,
"evaluate",
patched_evaluate,
manage_run=False,
)
| _AutologgingMetricsManager |
python | explosion__spaCy | spacy/lang/ne/__init__.py | {
"start": 216,
"end": 309
} | class ____(Language):
lang = "ne"
Defaults = NepaliDefaults
__all__ = ["Nepali"]
| Nepali |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_smooth.py | {
"start": 910,
"end": 1195
} | class ____:
def test_execute(self, caplog):
op = SmoothOperator(task_id="test")
op.execute(None)
with caplog.at_level(logging.INFO):
assert "Enjoy Sade - Smooth Operator: https://www.youtube.com/watch?v=4TYv2PhG89A" in caplog.text
| TestSmoothOperator |
python | readthedocs__readthedocs.org | readthedocs/api/v3/filters.py | {
"start": 1002,
"end": 1396
} | class ____(filters.FilterSet):
slug = filters.CharFilter(lookup_expr="icontains")
verbose_name = filters.CharFilter(lookup_expr="icontains")
class Meta:
model = Version
fields = [
"verbose_name",
"privacy_level",
"active",
"built",
"uploaded",
"slug",
"type",
]
| VersionFilter |
python | getsentry__sentry | tests/sentry/middleware/test_staff.py | {
"start": 337,
"end": 687
} | class ____(Endpoint):
permission_classes = (AllowAny,)
def get(self, request):
return Response(status=200)
urlpatterns = [
re_path(
r"^api/0/test/$",
APITestEndpoint.as_view(),
name="test-endpoint",
),
]
@no_silo_test
@override_settings(ROOT_URLCONF=__name__, SENTRY_SELF_HOSTED=False)
| APITestEndpoint |
python | python-openxml__python-docx | tests/image/test_tiff.py | {
"start": 14292,
"end": 14588
} | class ____:
def it_can_parse_a_rational_IFD_entry(self):
bytes_ = b"\x00\x00\x00\x2a\x00\x00\x00\x54"
stream_rdr = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
val = _RationalIfdEntry._parse_value(stream_rdr, None, 1, 0)
assert val == 0.5
| Describe_RationalIfdEntry |
python | numpy__numpy | numpy/tests/test_ctypeslib.py | {
"start": 9889,
"end": 12832
} | class ____:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_le__)
dt = np.dtype('>u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_be__)
dt = np.dtype('u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16)
@pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)")
def test_subarray(self):
dt = np.dtype((np.int32, (2, 3)))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, 2 * (3 * ctypes.c_int32))
def test_structure(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
])
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
@pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)")
def test_structure_aligned(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
], align=True)
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('', ctypes.c_char * 2), # padding
('b', ctypes.c_uint32),
])
def test_union(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 0],
'formats': [np.uint16, np.uint32]
})
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
@pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)")
def test_padded_union(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 0],
'formats': [np.uint16, np.uint32],
'itemsize': 5,
})
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
('', ctypes.c_char * 5), # padding
])
def test_overlapping(self):
dt = np.dtype({
'names': ['a', 'b'],
'offsets': [0, 2],
'formats': [np.uint32, np.uint32]
})
assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
| TestAsCtypesType |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/contrib/telnet/server.py | {
"start": 2028,
"end": 3120
} | class ____:
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection: socket.socket, encoding: str) -> None:
self._encoding = encoding
self._connection = connection
self._errors = "strict"
self._buffer: list[bytes] = []
self._closed = False
def write(self, data: str) -> None:
data = data.replace("\n", "\r\n")
self._buffer.append(data.encode(self._encoding, errors=self._errors))
self.flush()
def isatty(self) -> bool:
return True
def flush(self) -> None:
try:
if not self._closed:
self._connection.send(b"".join(self._buffer))
except OSError as e:
logger.warning(f"Couldn't send data over socket: {e}")
self._buffer = []
def close(self) -> None:
self._closed = True
@property
def encoding(self) -> str:
return self._encoding
@property
def errors(self) -> str:
return self._errors
| _ConnectionStdout |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/trad_relationship_uselist.py | {
"start": 656,
"end": 1285
} | class ____(Base):
__tablename__ = "user"
id = mapped_column(Integer, primary_key=True)
name = mapped_column(String, nullable=False)
addresses_style_one: Mapped[List["Address"]] = relationship(
"Address", uselist=True
)
addresses_style_two: Mapped[Set["Address"]] = relationship(
"Address", collection_class=set
)
addresses_style_three = relationship("Address", collection_class=set)
addresses_style_three_cast = relationship(
cast(Type["Address"], "Address"), collection_class=set
)
addresses_style_four = relationship("Address", collection_class=list)
| User |
python | kamyu104__LeetCode-Solutions | Python/minimum-fuel-cost-to-report-to-the-capital.py | {
"start": 1310,
"end": 1944
} | class ____(object):
def minimumFuelCost(self, roads, seats):
"""
:type roads: List[List[int]]
:type seats: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def dfs(u, p, d):
cnt = 1+sum(dfs(v, u, d+1) for v in adj[u] if v != p)
if d:
result[0] += ceil_divide(cnt, seats)
return cnt
adj = [[] for _ in xrange(len(roads)+1)]
for u, v in roads:
adj[u].append(v)
adj[v].append(u)
result = [0]
dfs(0, -1, 0)
return result[0]
| Solution |
python | astropy__astropy | astropy/time/core.py | {
"start": 12153,
"end": 14422
} | class ____(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
| TimeInfo |
python | pypa__pipenv | pipenv/patched/pip/_internal/operations/prepare.py | {
"start": 3154,
"end": 7401
} | class ____:
path: str
content_type: Optional[str] = None
def __post_init__(self) -> None:
if self.content_type is None:
# Try to guess the file's MIME type. If the system MIME tables
# can't be loaded, give up.
try:
self.content_type = mimetypes.guess_type(self.path)[0]
except OSError:
pass
def get_http_url(
link: Link,
download: Downloader,
download_dir: Optional[str] = None,
hashes: Optional[Hashes] = None,
) -> File:
temp_dir = TempDirectory(kind="unpack", globally_managed=True)
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir, hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = None
else:
# let's download to a tmp dir
from_path, content_type = download(link, temp_dir.path)
if hashes:
hashes.check_against_path(from_path)
return File(from_path, content_type)
def get_file_url(
link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None
) -> File:
"""Get file and optionally check its hash."""
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir, hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link.file_path
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(from_path)
return File(from_path, None)
def unpack_url(
link: Link,
location: str,
download: Downloader,
verbosity: int,
download_dir: Optional[str] = None,
hashes: Optional[Hashes] = None,
) -> Optional[File]:
"""Unpack link into location, downloading if required.
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if link.is_vcs:
unpack_vcs_link(link, location, verbosity=verbosity)
return None
assert not link.is_existing_dir()
# file urls
if link.is_file:
file = get_file_url(link, download_dir, hashes=hashes)
# http urls
else:
file = get_http_url(
link,
download,
download_dir,
hashes=hashes,
)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies, except wheels
if not link.is_wheel:
unpack_file(file.path, location, file.content_type)
return file
def _check_download_dir(
link: Link,
download_dir: str,
hashes: Optional[Hashes],
warn_on_hash_mismatch: bool = True,
) -> Optional[str]:
"""Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if not os.path.exists(download_path):
return None
# If already downloaded, does its hash match?
logger.info("File was already downloaded %s", download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
if warn_on_hash_mismatch:
logger.warning(
"Previously-downloaded file %s has bad hash. Re-downloading.",
download_path,
)
os.unlink(download_path)
return None
return download_path
| File |
python | coleifer__peewee | tests/models.py | {
"start": 1147,
"end": 1257
} | class ____(TestModel):
name = CharField(primary_key=True)
is_neutral = BooleanField(default=False)
| Color |
python | pytorch__pytorch | test/test_utils.py | {
"start": 33670,
"end": 35230
} | class ____(TestCase):
def test_basic(self):
source = """\
def f(x):
def g(x):
raise RuntimeError # HEYA
x = x * 3
return g(x) + 1
"""
out: dict[str, Any] = {}
scope = {"__compile_source__": source}
exec(source, scope, out)
try:
with report_compile_source_on_error():
out["f"](1)
except RuntimeError as e:
self.assertIn("HEYA", "".join(traceback.format_tb(e.__traceback__)))
def test_format_traceback_short(self):
try:
raise RuntimeError
except RuntimeError as e:
self.assertRegex(
format_traceback_short(e.__traceback__),
r".*test_utils.py:\d+ in test_format_traceback_short",
)
def test_captured_traceback(self):
self.assertIn(
"test_captured_traceback", "".join(CapturedTraceback.extract().format())
)
def test_captured_traceback_format_all(self):
rs = CapturedTraceback.format_all(
[CapturedTraceback.extract(), CapturedTraceback.extract()]
)
self.assertEqual(len(rs), 2)
self.assertIn("test_captured_traceback_format_all", "".join(rs[0]))
def test_captured_traceback_format_all_cached(self):
tb = CapturedTraceback.extract()
tb.format() # cached
rs = CapturedTraceback.format_all([tb, CapturedTraceback.extract()])
self.assertEqual(len(rs), 2)
self.assertIn("test_captured_traceback_format_all", "".join(rs[0]))
| TestTraceback |
python | huggingface__transformers | src/transformers/models/trocr/modeling_trocr.py | {
"start": 6231,
"end": 12941
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper."""
def __init__(
self,
config,
embed_dim: int,
num_heads: int,
kdim: Optional[int] = None,
vdim: Optional[int] = None,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
is_cross_attention: Optional[bool] = False,
layer_idx: Optional[bool] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if not (self.head_dim * num_heads == self.embed_dim):
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
| TrOCRAttention |
python | pytorch__pytorch | test/test_overrides.py | {
"start": 45697,
"end": 46009
} | class ____(TestCase):
""" Regression test for gh-47090 """
def test_max(self):
x = torch.tensor([1, 2])
xs = x.as_subclass(SubTensor2)
r = torch.max(x, dim=0)
rs = torch.max(xs, dim=0)
self.assertEqual(type(r), type(rs))
self.assertEqual(r, rs)
| TestNamedTuple |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/resolver.py | {
"start": 3297,
"end": 11808
} | class ____:
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {} # type: Dict[Any, Any]
yaml_path_resolvers = {} # type: Dict[Any, Any]
def __init__(self, loadumper=None):
# type: (Any, Any) -> None
self.loadumper = loadumper
if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
self.loadumper._resolver = self.loadumper
self._loader_version = None # type: Any
self.resolver_exact_paths = [] # type: List[Any]
self.resolver_prefix_paths = [] # type: List[Any]
@property
def parser(self):
# type: () -> Any
if self.loadumper is not None:
if hasattr(self.loadumper, 'typ'):
return self.loadumper.parser
return self.loadumper._parser
return None
@classmethod
def add_implicit_resolver_base(cls, tag, regexp, first):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
cls.yaml_implicit_resolvers = dict(
(k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
)
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
cls.yaml_implicit_resolvers = dict(
(k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
)
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
# @classmethod
# def add_implicit_resolver(cls, tag, regexp, first):
@classmethod
def add_path_resolver(cls, tag, path, kind=None):
# type: (Any, Any, Any) -> None
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if 'yaml_path_resolvers' not in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = [] # type: List[Any]
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError(
_F('Invalid path element: {element!s}', element=element)
)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif (
node_check not in [ScalarNode, SequenceNode, MappingNode]
and not isinstance(node_check, str)
and node_check is not None
):
raise ResolverError(
_F('Invalid node checker: {node_check!s}', node_check=node_check)
)
if not isinstance(index_check, (str, int)) and index_check is not None:
raise ResolverError(
_F('Invalid index checker: {index_check!s}', index_check=index_check)
)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
raise ResolverError(_F('Invalid node kind: {kind!s}', kind=kind))
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
# type: (Any, Any) -> None
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
# type: () -> None
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
# type: (int, Any, Any, Any, Any) -> bool
node_check, index_check = path[depth - 1]
if isinstance(node_check, str):
if current_node.tag != node_check:
return False
elif node_check is not None:
if not isinstance(current_node, node_check):
return False
if index_check is True and current_index is not None:
return False
if (index_check is False or index_check is None) and current_index is None:
return False
if isinstance(index_check, str):
if not (
isinstance(current_index, ScalarNode) and index_check == current_index.value
):
return False
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return False
return True
def resolve(self, kind, value, implicit):
# type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
if value == "":
resolvers = self.yaml_implicit_resolvers.get("", [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if bool(self.yaml_path_resolvers):
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
@property
def processing_version(self):
# type: () -> Any
return None
| BaseResolver |
python | huggingface__transformers | src/transformers/models/sam2/configuration_sam2.py | {
"start": 6933,
"end": 11236
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Sam2VisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/sam2.1-hiera-tiny](https://huggingface.co/facebook/sam2.1-hiera-tiny) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
backbone_channel_list (`List[int]`, *optional*, defaults to `[768, 384, 192, 96]`):
The list of channel dimensions for the backbone.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`):
The spatial sizes of the feature maps from the backbone.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
fpn_kernel_size (`int`, *optional*, defaults to 1):
The kernel size for the convolutions in the neck.
fpn_stride (`int`, *optional*, defaults to 1):
The stride for the convolutions in the neck.
fpn_padding (`int`, *optional*, defaults to 0):
The padding for the convolutions in the neck.
fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`):
The levels for the top-down FPN connections.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels from the FPN to use.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "sam2_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
backbone_channel_list=None,
backbone_feature_sizes=None,
fpn_hidden_size=256,
fpn_kernel_size=1,
fpn_stride=1,
fpn_padding=0,
fpn_top_down_levels=None,
num_feature_levels=3,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
backbone_channel_list = [768, 384, 192, 96] if backbone_channel_list is None else backbone_channel_list
backbone_feature_sizes = (
[[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes
)
fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "sam2_hiera_det_model")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif isinstance(backbone_config, Sam2HieraDetConfig):
pass
elif backbone_config is None:
backbone_config = Sam2HieraDetConfig()
self.backbone_config = backbone_config
# Neck
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
self.fpn_kernel_size = fpn_kernel_size
self.fpn_stride = fpn_stride
self.fpn_padding = fpn_padding
self.fpn_top_down_levels = fpn_top_down_levels
self.num_feature_levels = num_feature_levels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
| Sam2VisionConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.