language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/tests/unit/test_runtime_env_validation.py | {
"start": 2037,
"end": 3499
} | class ____:
def test_validate_bad_path(self):
with pytest.raises(ValueError, match="a valid path"):
parse_and_validate_working_dir("/does/not/exist")
def test_validate_bad_uri(self):
with pytest.raises(ValueError, match="a valid URI"):
parse_and_validate_working_dir("unknown://abc")
def test_validate_invalid_type(self):
with pytest.raises(TypeError):
parse_and_validate_working_dir(1)
def test_validate_remote_invalid_extensions(self):
for uri in [
"https://some_domain.com/path/file",
"s3://bucket/file",
"gs://bucket/file",
]:
with pytest.raises(
ValueError, match="Only .zip or .whl files supported for remote URIs."
):
parse_and_validate_working_dir(uri)
def test_validate_remote_valid_input(self):
for uri in [
"https://some_domain.com/path/file.zip",
"s3://bucket/file.zip",
"gs://bucket/file.zip",
]:
working_dir = parse_and_validate_working_dir(uri)
assert working_dir == uri
def test_validate_path_valid_input(self, test_directory):
test_dir, _, _, _ = test_directory
valid_working_dir_path = str(test_dir)
working_dir = parse_and_validate_working_dir(str(valid_working_dir_path))
assert working_dir == valid_working_dir_path
| TestValidateWorkingDir |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/5.3_Dueling_DQN/RL_brain.py | {
"start": 272,
"end": 6714
} | class ____:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.001,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=200,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
dueling=True,
sess=None,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.dueling = dueling # decide to use dueling DQN or not
self.learn_step_counter = 0
self.memory = np.zeros((self.memory_size, n_features*2+2))
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
if sess is None:
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
else:
self.sess = sess
if output_graph:
tf.summary.FileWriter("logs/", self.sess.graph)
self.cost_his = []
def _build_net(self):
def build_layers(s, c_names, n_l1, w_initializer, b_initializer):
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(s, w1) + b1)
if self.dueling:
# Dueling DQN
with tf.variable_scope('Value'):
w2 = tf.get_variable('w2', [n_l1, 1], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, 1], initializer=b_initializer, collections=c_names)
self.V = tf.matmul(l1, w2) + b2
with tf.variable_scope('Advantage'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.A = tf.matmul(l1, w2) + b2
with tf.variable_scope('Q'):
out = self.V + (self.A - tf.reduce_mean(self.A, axis=1, keep_dims=True)) # Q = V(s) + A(s,a)
else:
with tf.variable_scope('Q'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
out = tf.matmul(l1, w2) + b2
return out
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 20, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
self.q_eval = build_layers(self.s, c_names, n_l1, w_initializer, b_initializer)
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
self.q_next = build_layers(self.s_, c_names, n_l1, w_initializer, b_initializer)
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon: # choosing action
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next = self.sess.run(self.q_next, feed_dict={self.s_: batch_memory[:, -self.n_features:]}) # next observation
q_eval = self.sess.run(self.q_eval, {self.s: batch_memory[:, :self.n_features]})
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
| DuelingDQN |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 18210,
"end": 19576
} | class ____(TestCase):
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
def test_0D_array(self):
a = np.array(1)
b = np.array(2)
res = dstack([a, b])
desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = np.array([1])
b = np.array([2])
res = dstack([a, b])
desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = np.array([[1], [2]])
b = np.array([[1], [2]])
res = dstack([a, b])
desired = np.array(
[
[[1, 1]],
[
[
2,
2,
]
],
]
)
assert_array_equal(res, desired)
def test_2D_array2(self):
a = np.array([1, 2])
b = np.array([1, 2])
res = dstack([a, b])
desired = np.array([[[1, 1], [2, 2]]])
assert_array_equal(res, desired)
def test_generator(self):
# numpy 1.24 emits a warning but we don't
# with assert_warns(FutureWarning):
dstack([np.arange(3) for _ in range(2)])
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
| TestDstack |
python | chroma-core__chroma | chromadb/api/configuration.py | {
"start": 13081,
"end": 13958
} | class ____(ConfigurationInternal):
"""Internal representation of the collection configuration.
Used for validation, defaults, and serialization / deserialization."""
definitions = {
"hnsw_configuration": ConfigurationDefinition(
name="hnsw_configuration",
validator=lambda value: isinstance(value, HNSWConfigurationInternal),
is_static=True,
default_value=HNSWConfigurationInternal(),
),
}
@override
def configuration_validator(self) -> None:
pass
# This is the user-facing interface for HNSW index configuration parameters.
# Internally, we pass around HNSWConfigurationInternal objects, which perform
# validation, serialization and deserialization. Users don't need to know
# about that and instead get a clean constructor with default arguments.
| CollectionConfigurationInternal |
python | celery__celery | t/integration/test_security.py | {
"start": 349,
"end": 3833
} | class ____:
@pytest.fixture(autouse=True, scope='class')
def class_certs(self, request):
self.tmpdir = tempfile.mkdtemp()
self.key_name = 'worker.key'
self.cert_name = 'worker.pem'
key = self.gen_private_key()
cert = self.gen_certificate(key=key,
common_name='celery cecurity integration')
pem_key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
pem_cert = cert.public_bytes(
encoding=serialization.Encoding.PEM,
)
with open(self.tmpdir + '/' + self.key_name, 'wb') as key:
key.write(pem_key)
with open(self.tmpdir + '/' + self.cert_name, 'wb') as cert:
cert.write(pem_cert)
request.cls.tmpdir = self.tmpdir
request.cls.key_name = self.key_name
request.cls.cert_name = self.cert_name
yield
os.remove(self.tmpdir + '/' + self.key_name)
os.remove(self.tmpdir + '/' + self.cert_name)
os.rmdir(self.tmpdir)
@pytest.fixture(autouse=True)
def _prepare_setup(self, manager):
manager.app.conf.update(
security_key=f'{self.tmpdir}/{self.key_name}',
security_certificate=f'{self.tmpdir}/{self.cert_name}',
security_cert_store=f'{self.tmpdir}/*.pem',
task_serializer='auth',
event_serializer='auth',
accept_content=['auth'],
result_accept_content=['json']
)
manager.app.setup_security()
def gen_private_key(self):
"""generate a private key with cryptography"""
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend(),
)
def gen_certificate(self, key, common_name, issuer=None, sign_key=None):
"""generate a certificate with cryptography"""
now = datetime.datetime.now(datetime.timezone.utc)
certificate = x509.CertificateBuilder().subject_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])
).issuer_name(
x509.Name([
x509.NameAttribute(
NameOID.COMMON_NAME,
issuer or common_name
)
])
).not_valid_before(
now
).not_valid_after(
now + datetime.timedelta(seconds=86400)
).serial_number(
x509.random_serial_number()
).public_key(
key.public_key()
).add_extension(
x509.BasicConstraints(ca=True, path_length=0), critical=True
).sign(
private_key=sign_key or key,
algorithm=hashes.SHA256(),
backend=default_backend()
)
return certificate
@pytest.mark.xfail(reason="Issue #5269")
def test_security_task_done(self):
t1 = add.apply_async((1, 1))
try:
result = t1.get(timeout=10) # redis backend will timeout
assert result == 2
except (socket.timeout, TimeoutError) as e:
pytest.fail(
f"Timed out waiting for task result. Task was likely dropped by "
f"worker due to security misconfig. Exception details: {e}"
)
| test_security |
python | kamyu104__LeetCode-Solutions | Python/number-of-ships-in-a-rectangle.py | {
"start": 396,
"end": 471
} | class ____(object):
def __init__(self, x, y):
self.x = x
self.y = y
| Point |
python | getsentry__sentry | src/sentry/integrations/opsgenie/actions/form.py | {
"start": 1035,
"end": 3953
} | class ____(forms.Form):
"""Used for notifying a specific team."""
account = forms.ChoiceField(choices=(), widget=forms.Select())
team = forms.ChoiceField(required=False, choices=(), widget=forms.Select())
def __init__(self, *args, **kwargs):
self.org_id = kwargs.pop("org_id")
self._integrations = [(i.id, i.name) for i in kwargs.pop("integrations")]
self._teams = kwargs.pop("teams")
super().__init__(*args, **kwargs)
if self._integrations:
self.fields["account"].initial = self._integrations[0][0]
set_field_choices(self.fields["account"], self._integrations)
if self._teams:
self.fields["team"].initial = self._teams[0][0]
set_field_choices(self.fields["team"], self._teams)
def _get_team_status(
self,
team_id: str | None,
org_integration: RpcOrganizationIntegration,
) -> int:
team = get_team(team_id, org_integration)
if not team or not team_id:
return INVALID_TEAM
return VALID_TEAM
def _validate_team(self, team_id: str | None, integration_id: int | None) -> None:
with record_event(OnCallInteractionType.VERIFY_TEAM).capture() as lifecyle:
params = {
"account": dict(self._integrations).get(integration_id),
"team": dict(self._teams).get(team_id),
}
integration = integration_service.get_integration(
integration_id=integration_id, provider=IntegrationProviderSlug.OPSGENIE.value
)
org_integration = integration_service.get_organization_integration(
integration_id=integration_id,
organization_id=self.org_id,
)
if integration is None or org_integration is None:
lifecyle.record_halt(OnCallIntegrationsHaltReason.INVALID_TEAM)
raise forms.ValidationError(
_("The Opsgenie integration does not exist."),
code="invalid_integration",
params=params,
)
team_status = self._get_team_status(team_id=team_id, org_integration=org_integration)
if team_status == INVALID_TEAM:
lifecyle.record_halt(OnCallIntegrationsHaltReason.INVALID_TEAM)
raise forms.ValidationError(
_('The team "%(team)s" does not belong to the %(account)s Opsgenie account.'),
code="invalid_team",
params=params,
)
def clean(self) -> dict[str, Any] | None:
cleaned_data = super().clean()
if cleaned_data:
integration_id = _validate_int_field("account", cleaned_data)
team_id = cleaned_data.get("team")
self._validate_team(team_id, integration_id)
return cleaned_data
| OpsgenieNotifyTeamForm |
python | tensorflow__tensorflow | tensorflow/core/function/polymorphism/function_cache_test.py | {
"start": 3616,
"end": 8153
} | class ____(test.TestCase):
def testConcreteFunctionDictRetainsInsertedKeys(self):
cache = function_cache.FunctionCache()
f_type_1 = make_type(1)
self.assertIsNone(cache.lookup(f_type_1))
f_type_2 = make_type(2)
f_type_3 = make_type(3)
cache.add(MockFunction(f_type_1, "test_1"))
cache.add(MockFunction(f_type_2, "test_2"))
self.assertEqual(cache.lookup(f_type_1).test_string, "test_1")
self.assertEqual(cache.lookup(f_type_2).test_string, "test_2")
self.assertIsNone(cache.lookup(f_type_3))
def testClearRemovesAllConcreteFunctions(self):
cache = function_cache.FunctionCache()
f_type_1 = make_type(1)
f_type_2 = make_type(2)
f_type_3 = make_type(3)
cache.add(MockFunction(f_type_1, "test_1"))
cache.add(MockFunction(f_type_2, "test_2"))
self.assertEqual(cache.lookup(f_type_1).test_string, "test_1")
self.assertEqual(cache.lookup(f_type_2).test_string, "test_2")
self.assertIsNone(cache.lookup(f_type_3))
cache.clear()
self.assertIsNone(cache.lookup(f_type_1))
self.assertIsNone(cache.lookup(f_type_2))
self.assertIsNone(cache.lookup(f_type_3))
def testDeleteRemovesConcreteFunctions(self):
cache = function_cache.FunctionCache()
f_type_1 = make_type(1)
cache.add(MockFunction(f_type_1, "test_1"))
self.assertEqual(cache.lookup(f_type_1).test_string, "test_1")
cache.delete(f_type_1)
self.assertIsNone(cache.lookup(f_type_1))
f_type_2 = make_single_param_type(MockSubtypeOf2(2))
cache.add(MockFunction(f_type_2, "test_2"))
self.assertEqual(cache.lookup(f_type_2).test_string, "test_2")
f_type_3 = make_single_param_type(MockSubtypeOf2(3))
self.assertEqual(cache.lookup(f_type_3).test_string, "test_2")
cache.delete(f_type_2)
self.assertIsNone(cache.lookup(f_type_2))
self.assertIsNone(cache.lookup(f_type_3))
def testMostSpecificFunctionCacheKeyIsLookedUp(self):
ctx = function_cache.FunctionContext(0)
cache = function_cache.FunctionCache()
cache.add(
MockFunction(make_single_param_type(MockShape(1, 2, None)), "a"), ctx
)
cache.add(
MockFunction(make_single_param_type(MockShape(1, 2, 3)), "b"), ctx
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(1, 2, 3)), ctx
).test_string,
"b",
)
def testFirstMostSpecificFunctionCacheKeyIsLookedUp(self):
ctx = function_cache.FunctionContext(0)
cache = function_cache.FunctionCache()
cache.add(
MockFunction(make_single_param_type(MockShape(1, 2, None)), "a"), ctx
)
cache.add(
MockFunction(
make_single_param_type(MockShape(1, None, 3)),
"b",
),
ctx,
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(1, 2, 3)), ctx
).test_string,
"a",
)
def testMostSpecificFunctionCacheKeyIsOrderAgnostic(self):
ctx = function_cache.FunctionContext(0)
keys = [
(MockFunction(make_single_param_type(MockShape(1, 1, 1)), "a"), ctx),
(MockFunction(make_single_param_type(MockShape(1, None, 1)), "b"), ctx),
(
MockFunction(make_single_param_type(MockShape(None, None, 1)), "c"),
ctx,
),
(
MockFunction(
make_single_param_type(MockShape(None, None, None)), "d"
),
ctx,
),
]
for permutation in itertools.permutations(keys):
cache = function_cache.FunctionCache()
cache.add(
permutation[0][0],
permutation[0][1],
)
cache.add(
permutation[1][0],
permutation[1][1],
)
cache.add(
permutation[2][0],
permutation[2][1],
)
cache.add(
permutation[3][0],
permutation[3][1],
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(1, 1, 1)), ctx
).test_string,
"a",
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(1, 2, 1)), ctx
).test_string,
"b",
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(2, 2, 1)), ctx
).test_string,
"c",
)
self.assertEqual(
cache.lookup(
make_single_param_type(MockShape(2, 2, 2)), ctx
).test_string,
"d",
)
| FunctionCacheTest |
python | jina-ai__jina | tests/docker_compose/test-executor-torch/debug_executor.py | {
"start": 78,
"end": 3251
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
self._name = self.runtime_args.name
@requests(on='/debug')
def debug(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
key = 'traversed-executors'
for doc in docs:
if key not in doc.tags:
doc.tags[key] = []
traversed = list(doc.tags.get(key))
traversed.append(self._name)
doc.tags[key] = traversed
doc.tags['parallel'] = self.runtime_args.replicas
doc.tags['shards'] = self.runtime_args.shards
doc.tags['shard_id'] = self.runtime_args.shard_id
doc.tags['hostname'] = socket.gethostname()
@requests(on='/env')
def env(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
for doc in docs:
doc.tags['k1'] = os.environ.get('k1')
doc.tags['k2'] = os.environ.get('k2')
doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL')
doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')}
doc.tags['SECRET_USERNAME'] = os.environ.get('SECRET_USERNAME')
doc.tags['SECRET_PASSWORD'] = os.environ.get('SECRET_PASSWORD')
@requests(on='/cuda')
def cuda(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
import kubernetes
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
try:
# try loading kube config from disk first
kubernetes.config.load_kube_config()
except kubernetes.config.config_exception.ConfigException:
# if the config could not be read from disk, try loading in cluster config
# this works if we are running inside k8s
kubernetes.config.load_incluster_config()
pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod]
pod_spec = pods[0].spec # V1PodSpec
pod_container = pod_spec.containers[0] # V1Container
pod_resources = pod_container.resources # V1ResourceRequirements
for doc in docs:
doc.tags['resources']['limits'] = pod_resources.limits
@requests(on='/workspace')
def foo_workspace(self, docs: DocumentArray, **kwargs):
import torch
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
self.logger.debug(f'Workspace {self.workspace}.')
for doc in docs:
doc.tags['workspace'] = self.workspace
doc.embedding = torch.rand(1000)
doc.tensor = torch.rand(1000)
| TestExecutor |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 223576,
"end": 223853
} | class ____(VegaLiteSchema):
"""ConditionalMarkPropFieldOrDatumDef schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalMarkPropFieldOrDatumDef"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalMarkPropFieldOrDatumDef |
python | joke2k__faker | faker/providers/phone_number/th_TH/__init__.py | {
"start": 49,
"end": 1826
} | class ____(PhoneNumberProvider):
# as per https://en.wikipedia.org/wiki/Telephone_numbers_in_Thailand
formats = (
# landline (9 digits, starts with 02, 03, 04, 05, or 07)
"+66 2### ####",
"+662 ### ####",
"+66 (0) 2### ####",
"02#######",
"0 2### ####",
"02# ######",
"02#-######",
"0-2###-####",
"02 ### ####",
"+66 3### ####",
"+663 ### ####",
"+66 (0) 3### ####",
"03#######",
"0 3### ####",
"03# ######",
"03#-######",
"0-3###-####",
"03 ### ####",
"+66 4### ####",
"+664 ### ####",
"+66 (0) 4### ####",
"04#######",
"0 4### ####",
"04# ######",
"04#-######",
"0-4###-####",
"04 ### ####",
"+66 5### ####",
"+665 ### ####",
"+66 (0) 5### ####",
"05#######",
"0 5### ####",
"05# ######",
"05#-######",
"0-5###-####",
"05 ### ####",
"+66 7### ####",
"+667 ### ####",
"+66 (0) 7### ####",
"07#######",
"0 7### ####",
"07# ######",
"07#-######",
"0-7###-####",
"07 ### ####",
# mobile (10 digits, starts with 06, 08, or 09)
"+66 6## ### ###",
"+66 (0) 6## ### ###",
"06########",
"0 6## ### ###",
"06# ### ####",
"06#-###-####",
"+66 8## ### ###",
"+66 (0) 8## ### ###",
"08########",
"0 8## ### ###",
"08# ### ####",
"08#-###-####",
"+66 9## ### ###",
"+66 (0) 9## ### ###",
"09########",
"0 9## ### ###",
"09# ### ####",
"09#-###-####",
)
| Provider |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modeling_glm4v_moe.py | {
"start": 17930,
"end": 18835
} | class ____(nn.Module):
def __init__(self, config: Glm4vMoeTextConfig):
super().__init__()
self.config = config
self.top_k = config.num_experts_per_tok
self.n_routed_experts = config.n_routed_experts
self.routed_scaling_factor = config.routed_scaling_factor
self.n_group = config.n_group
self.topk_group = config.topk_group
self.norm_topk_prob = config.norm_topk_prob
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
def forward(self, hidden_states):
hidden_states = hidden_states.view(-1, self.config.hidden_size)
router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
return router_logits
| Glm4vMoeTextTopkRouter |
python | keras-team__keras | keras/src/ops/operation_utils_test.py | {
"start": 176,
"end": 7659
} | class ____(testing.TestCase):
def test_get_source_inputs(self):
x1 = backend.KerasTensor(shape=(2,))
x2 = backend.KerasTensor(shape=(2,))
x = x1 + x2
x += 2
x = ops.square(x)
self.assertEqual(operation_utils.get_source_inputs(x), [x1, x2])
def test_get_source_inputs_return_input_tensor(self):
inputs = input_layer.Input(shape=(10,))
self.assertIs(operation_utils.get_source_inputs(inputs)[0], inputs)
def test_compute_expand_dims_output_shape(self):
input_shape = (2, 3, 4)
axis = -1
output_shape = operation_utils.compute_expand_dims_output_shape(
input_shape, axis
)
expected_output_shape = (2, 3, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
input_shape = (2, 3, 4)
axis = (1, -1)
output_shape = operation_utils.compute_expand_dims_output_shape(
input_shape, axis
)
expected_output_shape = (2, 1, 3, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 2, 2, 1))
def test_compute_pooling_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 2, 2, 3))
def test_compute_pooling_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (1, 1)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_conv_output_shape(self):
input_shape = (1, 4, 4, 1)
filters = 1
kernel_size = (3, 3)
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 1, 1, 1))
def test_compute_conv_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 1, 1, 3))
def test_compute_conv_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_reshape_output_shape(self):
input_shape = (1, 4, 4, 1)
target_shape = (16, 1)
output_shape = operation_utils.compute_reshape_output_shape(
input_shape, newshape=target_shape, newshape_arg_name="New shape"
)
self.assertEqual(output_shape, target_shape)
def test_reduce_shape_no_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape)
expected_output_shape = ()
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_no_axes_with_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape, keepdims=True)
expected_output_shape = (1, 1, 1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_with_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(
input_shape, axes, keepdims=True
)
expected_output_shape = (1, 1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_multiple_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1, 2]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_out_of_order_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [2, 1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_negative_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [-2, -3]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
| OperationUtilsTest |
python | MongoEngine__mongoengine | tests/fields/test_float_field.py | {
"start": 83,
"end": 1781
} | class ____(MongoDBTestCase):
def test_float_ne_operator(self):
class TestDocument(Document):
float_fld = FloatField()
TestDocument.drop_collection()
TestDocument(float_fld=None).save()
TestDocument(float_fld=1).save()
assert 1 == TestDocument.objects(float_fld__ne=None).count()
assert 1 == TestDocument.objects(float_fld__ne=1).count()
def test_validation(self):
"""Ensure that invalid values cannot be assigned to float fields."""
class Person(Document):
height = FloatField(min_value=0.1, max_value=3.5)
class BigPerson(Document):
height = FloatField()
person = Person()
person.height = 1.89
person.validate()
person.height = "2.0"
with pytest.raises(ValidationError):
person.validate()
person.height = 0.01
with pytest.raises(ValidationError):
person.validate()
person.height = 4.0
with pytest.raises(ValidationError):
person.validate()
person_2 = Person(height="something invalid")
with pytest.raises(ValidationError):
person_2.validate()
big_person = BigPerson()
big_person.height = int(0)
big_person.validate()
big_person.height = 2**500
big_person.validate()
big_person.height = 2**100000 # Too big for a float value
with pytest.raises(ValidationError):
big_person.validate()
def test_query_none_value_dont_raise(self):
class BigPerson(Document):
height = FloatField()
_ = list(BigPerson.objects(height=None))
| TestFloatField |
python | paramiko__paramiko | paramiko/buffered_pipe.py | {
"start": 1248,
"end": 7225
} | class ____:
"""
A buffer that obeys normal read (with timeout) & close semantics for a
file or socket, but is fed data from another thread. This is used by
`.Channel`.
"""
def __init__(self):
self._lock = threading.Lock()
self._cv = threading.Condition(self._lock)
self._event = None
self._buffer = array.array("B")
self._closed = False
def _buffer_frombytes(self, data):
self._buffer.frombytes(data)
def _buffer_tobytes(self, limit=None):
return self._buffer[:limit].tobytes()
def set_event(self, event):
"""
Set an event on this buffer. When data is ready to be read (or the
buffer has been closed), the event will be set. When no data is
ready, the event will be cleared.
:param threading.Event event: the event to set/clear
"""
self._lock.acquire()
try:
self._event = event
# Make sure the event starts in `set` state if we appear to already
# be closed; otherwise, if we start in `clear` state & are closed,
# nothing will ever call `.feed` and the event (& OS pipe, if we're
# wrapping one - see `Channel.fileno`) will permanently stay in
# `clear`, causing deadlock if e.g. `select`ed upon.
if self._closed or len(self._buffer) > 0:
event.set()
else:
event.clear()
finally:
self._lock.release()
def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
:param data: the data to add, as a ``str`` or ``bytes``
"""
self._lock.acquire()
try:
if self._event is not None:
self._event.set()
self._buffer_frombytes(b(data))
self._cv.notify_all()
finally:
self._lock.release()
def read_ready(self):
"""
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
"""
self._lock.acquire()
try:
if len(self._buffer) == 0:
return False
return True
finally:
self._lock.release()
def read(self, nbytes, timeout=None):
"""
Read data from the pipe. The return value is a string representing
the data received. The maximum amount of data to be received at once
is specified by ``nbytes``. If a string of length zero is returned,
the pipe has been closed.
The optional ``timeout`` argument can be a nonnegative float expressing
seconds, or ``None`` for no timeout. If a float is given, a
`.PipeTimeout` will be raised if the timeout period value has elapsed
before any data arrives.
:param int nbytes: maximum number of bytes to read
:param float timeout:
maximum seconds to wait (or ``None``, the default, to wait forever)
:return: the read data, as a ``str`` or ``bytes``
:raises:
`.PipeTimeout` -- if a timeout was specified and no data was ready
before that timeout
"""
out = bytes()
self._lock.acquire()
try:
if len(self._buffer) == 0:
if self._closed:
return out
# should we block?
if timeout == 0.0:
raise PipeTimeout()
# loop here in case we get woken up but a different thread has
# grabbed everything in the buffer.
while (len(self._buffer) == 0) and not self._closed:
then = time.time()
self._cv.wait(timeout)
if timeout is not None:
timeout -= time.time() - then
if timeout <= 0.0:
raise PipeTimeout()
# something's in the buffer and we have the lock!
if len(self._buffer) <= nbytes:
out = self._buffer_tobytes()
del self._buffer[:]
if (self._event is not None) and not self._closed:
self._event.clear()
else:
out = self._buffer_tobytes(nbytes)
del self._buffer[:nbytes]
finally:
self._lock.release()
return out
def empty(self):
"""
Clear out the buffer and return all data that was in it.
:return:
any data that was in the buffer prior to clearing it out, as a
`str`
"""
self._lock.acquire()
try:
out = self._buffer_tobytes()
del self._buffer[:]
if (self._event is not None) and not self._closed:
self._event.clear()
return out
finally:
self._lock.release()
def close(self):
"""
Close this pipe object. Future calls to `read` after the buffer
has been emptied will return immediately with an empty string.
"""
self._lock.acquire()
try:
self._closed = True
self._cv.notify_all()
if self._event is not None:
self._event.set()
finally:
self._lock.release()
def __len__(self):
"""
Return the number of bytes buffered.
:return: number (`int`) of bytes buffered
"""
self._lock.acquire()
try:
return len(self._buffer)
finally:
self._lock.release()
| BufferedPipe |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_load_centrality.py | {
"start": 39,
"end": 11343
} | class ____:
@classmethod
def setup_class(cls):
G = nx.Graph()
G.add_edge(0, 1, weight=3)
G.add_edge(0, 2, weight=2)
G.add_edge(0, 3, weight=6)
G.add_edge(0, 4, weight=4)
G.add_edge(1, 3, weight=5)
G.add_edge(1, 5, weight=5)
G.add_edge(2, 4, weight=1)
G.add_edge(3, 4, weight=2)
G.add_edge(3, 5, weight=1)
G.add_edge(4, 5, weight=4)
cls.G = G
cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0}
cls.K = nx.krackhardt_kite_graph()
cls.P3 = nx.path_graph(3)
cls.P4 = nx.path_graph(4)
cls.K5 = nx.complete_graph(5)
cls.P2 = nx.path_graph(2)
cls.C4 = nx.cycle_graph(4)
cls.T = nx.balanced_tree(r=2, h=2)
cls.Gb = nx.Graph()
cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
cls.F = nx.florentine_families_graph()
cls.LM = nx.les_miserables_graph()
cls.D = nx.cycle_graph(3, create_using=nx.DiGraph())
cls.D.add_edges_from([(3, 0), (4, 3)])
def test_not_strongly_connected(self):
b = nx.load_centrality(self.D)
result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000}
for n in sorted(self.D):
assert result[n] == pytest.approx(b[n], abs=1e-3)
assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3)
def test_P2_normalized_load(self):
G = self.P2
c = nx.load_centrality(G, normalized=True)
d = {0: 0.000, 1: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_weighted_load(self):
b = nx.load_centrality(self.G, weight="weight", normalized=False)
for n in sorted(self.G):
assert b[n] == self.exact_weighted[n]
def test_k5_load(self):
G = self.K5
c = nx.load_centrality(G)
d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_p3_load(self):
G = self.P3
c = nx.load_centrality(G)
d = {0: 0.000, 1: 1.000, 2: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
c = nx.load_centrality(G, v=1)
assert c == pytest.approx(1.0, abs=1e-7)
c = nx.load_centrality(G, v=1, normalized=True)
assert c == pytest.approx(1.0, abs=1e-7)
def test_p2_load(self):
G = nx.path_graph(2)
c = nx.load_centrality(G)
d = {0: 0.000, 1: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_krackhardt_load(self):
G = self.K
c = nx.load_centrality(G)
d = {
0: 0.023,
1: 0.023,
2: 0.000,
3: 0.102,
4: 0.000,
5: 0.231,
6: 0.231,
7: 0.389,
8: 0.222,
9: 0.000,
}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_florentine_families_load(self):
G = self.F
c = nx.load_centrality(G)
d = {
"Acciaiuoli": 0.000,
"Albizzi": 0.211,
"Barbadori": 0.093,
"Bischeri": 0.104,
"Castellani": 0.055,
"Ginori": 0.000,
"Guadagni": 0.251,
"Lamberteschi": 0.000,
"Medici": 0.522,
"Pazzi": 0.000,
"Peruzzi": 0.022,
"Ridolfi": 0.117,
"Salviati": 0.143,
"Strozzi": 0.106,
"Tornabuoni": 0.090,
}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_les_miserables_load(self):
G = self.LM
c = nx.load_centrality(G)
d = {
"Napoleon": 0.000,
"Myriel": 0.177,
"MlleBaptistine": 0.000,
"MmeMagloire": 0.000,
"CountessDeLo": 0.000,
"Geborand": 0.000,
"Champtercier": 0.000,
"Cravatte": 0.000,
"Count": 0.000,
"OldMan": 0.000,
"Valjean": 0.567,
"Labarre": 0.000,
"Marguerite": 0.000,
"MmeDeR": 0.000,
"Isabeau": 0.000,
"Gervais": 0.000,
"Listolier": 0.000,
"Tholomyes": 0.043,
"Fameuil": 0.000,
"Blacheville": 0.000,
"Favourite": 0.000,
"Dahlia": 0.000,
"Zephine": 0.000,
"Fantine": 0.128,
"MmeThenardier": 0.029,
"Thenardier": 0.075,
"Cosette": 0.024,
"Javert": 0.054,
"Fauchelevent": 0.026,
"Bamatabois": 0.008,
"Perpetue": 0.000,
"Simplice": 0.009,
"Scaufflaire": 0.000,
"Woman1": 0.000,
"Judge": 0.000,
"Champmathieu": 0.000,
"Brevet": 0.000,
"Chenildieu": 0.000,
"Cochepaille": 0.000,
"Pontmercy": 0.007,
"Boulatruelle": 0.000,
"Eponine": 0.012,
"Anzelma": 0.000,
"Woman2": 0.000,
"MotherInnocent": 0.000,
"Gribier": 0.000,
"MmeBurgon": 0.026,
"Jondrette": 0.000,
"Gavroche": 0.164,
"Gillenormand": 0.021,
"Magnon": 0.000,
"MlleGillenormand": 0.047,
"MmePontmercy": 0.000,
"MlleVaubois": 0.000,
"LtGillenormand": 0.000,
"Marius": 0.133,
"BaronessT": 0.000,
"Mabeuf": 0.028,
"Enjolras": 0.041,
"Combeferre": 0.001,
"Prouvaire": 0.000,
"Feuilly": 0.001,
"Courfeyrac": 0.006,
"Bahorel": 0.002,
"Bossuet": 0.032,
"Joly": 0.002,
"Grantaire": 0.000,
"MotherPlutarch": 0.000,
"Gueulemer": 0.005,
"Babet": 0.005,
"Claquesous": 0.005,
"Montparnasse": 0.004,
"Toussaint": 0.000,
"Child1": 0.000,
"Child2": 0.000,
"Brujon": 0.000,
"MmeHucheloup": 0.000,
}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_unnormalized_k5_load(self):
G = self.K5
c = nx.load_centrality(G, normalized=False)
d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_unnormalized_p3_load(self):
G = self.P3
c = nx.load_centrality(G, normalized=False)
d = {0: 0.000, 1: 2.000, 2: 0.000}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_unnormalized_krackhardt_load(self):
G = self.K
c = nx.load_centrality(G, normalized=False)
d = {
0: 1.667,
1: 1.667,
2: 0.000,
3: 7.333,
4: 0.000,
5: 16.667,
6: 16.667,
7: 28.000,
8: 16.000,
9: 0.000,
}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_unnormalized_florentine_families_load(self):
G = self.F
c = nx.load_centrality(G, normalized=False)
d = {
"Acciaiuoli": 0.000,
"Albizzi": 38.333,
"Barbadori": 17.000,
"Bischeri": 19.000,
"Castellani": 10.000,
"Ginori": 0.000,
"Guadagni": 45.667,
"Lamberteschi": 0.000,
"Medici": 95.000,
"Pazzi": 0.000,
"Peruzzi": 4.000,
"Ridolfi": 21.333,
"Salviati": 26.000,
"Strozzi": 19.333,
"Tornabuoni": 16.333,
}
for n in sorted(G):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_load_betweenness_difference(self):
# Difference Between Load and Betweenness
# --------------------------------------- The smallest graph
# that shows the difference between load and betweenness is
# G=ladder_graph(3) (Graph B below)
# Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong
# Wang: Comment on "Scientific collaboration
# networks. II. Shortest paths, weighted networks, and
# centrality". https://arxiv.org/pdf/physics/0511084
# Notice that unlike here, their calculation adds to 1 to the
# betweenness of every node i for every path from i to every
# other node. This is exactly what it should be, based on
# Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t,
# s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore,
# they allow v to be the target node.
# We follow Brandes 2001, who follows Freeman 1977 that make
# the sum for betweenness of v exclude paths where v is either
# the source or target node. To agree with their numbers, we
# must additionally, remove edge (4,8) from the graph, see AC
# example following (there is a mistake in the figure in their
# paper - personal communication).
# A = nx.Graph()
# A.add_edges_from([(0,1), (1,2), (1,3), (2,4),
# (3,5), (4,6), (4,7), (4,8),
# (5,8), (6,9), (7,9), (8,9)])
B = nx.Graph() # ladder_graph(3)
B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)])
c = nx.load_centrality(B, normalized=False)
d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750}
for n in sorted(B):
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_c4_edge_load(self):
G = self.C4
c = nx.edge_load_centrality(G)
d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000}
for n in G.edges():
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_p4_edge_load(self):
G = self.P4
c = nx.edge_load_centrality(G)
d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000}
for n in G.edges():
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_k5_edge_load(self):
G = self.K5
c = nx.edge_load_centrality(G)
d = {
(0, 1): 5.000,
(0, 2): 5.000,
(0, 3): 5.000,
(0, 4): 5.000,
(1, 2): 5.000,
(1, 3): 5.000,
(1, 4): 5.000,
(2, 3): 5.000,
(2, 4): 5.000,
(3, 4): 5.000,
}
for n in G.edges():
assert c[n] == pytest.approx(d[n], abs=1e-3)
def test_tree_edge_load(self):
G = self.T
c = nx.edge_load_centrality(G)
d = {
(0, 1): 24.000,
(0, 2): 24.000,
(1, 3): 12.000,
(1, 4): 12.000,
(2, 5): 12.000,
(2, 6): 12.000,
}
for n in G.edges():
assert c[n] == pytest.approx(d[n], abs=1e-3)
| TestLoadCentrality |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_average_to_be_within_range_of_given_point.py | {
"start": 2632,
"end": 7284
} | class ____(ColumnAggregateExpectation):
"""Expect the average of a column of degree-decimal, lat/lon coordinates to be in range of a given point."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"lat_lon": [
(62.75955799999999, -164.483752),
(62.7673475, -164.4996625),
(62.7698675, -164.5034575),
(62.76901333333333, -164.50339),
(62.76906333333334, -164.50353333333337),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon",
"center_point": (62.7597, -164.484),
"range": 8,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "lat_lon",
"center_point": (72.7597, -161.484),
"range": 8,
},
"out": {"success": False},
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.coordinates.distance",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("range", "center_point")
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
distance = metrics.get("column.coordinates.distance")
range = self._get_success_kwargs().get("range")
success = distance <= range
return {"success": success, "result": {"observed_value": distance}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-22",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@austiezr",
"@mmi333", # Don't forget to add your github handle here!
],
}
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: dict = None,
**kwargs,
) -> List[
Union[
dict,
str,
RenderedStringTemplateContent,
RenderedTableContent,
RenderedBulletListContent,
RenderedGraphContent,
Any,
]
]:
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"range",
"center_point",
],
)
template_str = "column average must be in fcc projection within $range of $center_point."
if include_column_name:
template_str = f"$column {template_str}"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
if __name__ == "__main__":
ExpectColumnAverageToBeWithinRangeOfGivenPoint().print_diagnostic_checklist()
| ExpectColumnAverageToBeWithinRangeOfGivenPoint |
python | wandb__wandb | wandb/automations/_filters/operators.py | {
"start": 6421,
"end": 6878
} | class ____(BaseOp):
val: bool = Field(alias="$exists")
@override
def __invert__(self) -> Exists:
"""Implements `~Exists(True) -> Exists(False)` and vice versa."""
return Exists(val=not self.val)
# Evaluation operator(s)
# https://www.mongodb.com/docs/manual/reference/operator/query/regex/
#
# Note: `$contains` is NOT a formal MongoDB operator, but the W&B backend
# recognizes and executes it as a substring-match filter.
| Exists |
python | pytorch__pytorch | torch/fx/_graph_pickler.py | {
"start": 5170,
"end": 5559
} | class ____:
def __init__(self, fake_mode: FakeTensorMode) -> None:
self.fake_mode = fake_mode
self.meta_converter: MetaConverter[FakeTensor] = MetaConverter()
# This token is passed when pickling to indicate that we want to use the
# unpickler's _UnpickleState as a parameter in that position.
_UnpickleStateToken = NewType("_UnpickleStateToken", object)
| _UnpickleState |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/kqueue.py | {
"start": 5459,
"end": 9976
} | class ____(object):
"""
Thread-safe kevent descriptor collection.
"""
def __init__(self):
# Set of KeventDescriptor
self._descriptors = set()
# Descriptor for a given path.
self._descriptor_for_path = dict()
# Descriptor for a given fd.
self._descriptor_for_fd = dict()
# List of kevent objects.
self._kevents = list()
self._lock = threading.Lock()
@property
def kevents(self):
"""
List of kevents monitored.
"""
with self._lock:
return self._kevents
@property
def paths(self):
"""
List of paths for which kevents have been created.
"""
with self._lock:
return list(self._descriptor_for_path.keys())
def get_for_fd(self, fd):
"""
Given a file descriptor, returns the kevent descriptor object
for it.
:param fd:
OS file descriptor.
:type fd:
``int``
:returns:
A :class:`KeventDescriptor` object.
"""
with self._lock:
return self._descriptor_for_fd[fd]
def get(self, path):
"""
Obtains a :class:`KeventDescriptor` object for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._get(path)
def __contains__(self, path):
"""
Determines whether a :class:`KeventDescriptor has been registered
for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._has_path(path)
def add(self, path, is_directory):
"""
Adds a :class:`KeventDescriptor` to the collection for the given
path.
:param path:
The path for which a :class:`KeventDescriptor` object will be
added.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
with self._lock:
path = absolute_path(path)
if not self._has_path(path):
self._add_descriptor(KeventDescriptor(path, is_directory))
def remove(self, path):
"""
Removes the :class:`KeventDescriptor` object for the given path
if it already exists.
:param path:
Path for which the :class:`KeventDescriptor` object will be
removed.
"""
with self._lock:
path = absolute_path(path)
if self._has_path(path):
self._remove_descriptor(self._get(path))
def clear(self):
"""
Clears the collection and closes all open descriptors.
"""
with self._lock:
for descriptor in self._descriptors:
descriptor.close()
self._descriptors.clear()
self._descriptor_for_fd.clear()
self._descriptor_for_path.clear()
self._kevents = []
# Thread-unsafe methods. Locking is provided at a higher level.
def _get(self, path):
"""Returns a kevent descriptor for a given path."""
return self._descriptor_for_path[path]
def _has_path(self, path):
"""Determines whether a :class:`KeventDescriptor` for the specified
path exists already in the collection."""
return path in self._descriptor_for_path
def _add_descriptor(self, descriptor):
"""
Adds a descriptor to the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be added.
"""
self._descriptors.add(descriptor)
self._kevents.append(descriptor.kevent)
self._descriptor_for_path[descriptor.path] = descriptor
self._descriptor_for_fd[descriptor.fd] = descriptor
def _remove_descriptor(self, descriptor):
"""
Removes a descriptor from the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be removed.
"""
self._descriptors.remove(descriptor)
del self._descriptor_for_fd[descriptor.fd]
del self._descriptor_for_path[descriptor.path]
self._kevents.remove(descriptor.kevent)
descriptor.close()
| KeventDescriptorSet |
python | geekcomputers__Python | PingPong/Ball.py | {
"start": 31,
"end": 1462
} | class ____:
def __init__(self, pos, vel, win, rad, minCoord, maxCoord):
self.pos = pos
self.vel = vel
self.win = win
self.rad = rad
self.minCoord = minCoord
self.maxCoord = maxCoord
def drawBall(self):
pygame.draw.circle(self.win, (255,) * 3, self.pos, self.rad, 0)
def doHorizontalFlip(self):
self.vel[0] *= -1
print("Github")
def doVerticalFlip(self):
self.vel[1] *= -1
def borderCollisionCheck(self):
if (self.pos[0] <= self.minCoord[0]) or (self.pos[0] >= self.maxCoord[0]):
self.doHorizontalFlip()
if (self.pos[1] <= self.minCoord[1]) or (self.pos[1] >= self.maxCoord[1]):
self.doVerticalFlip()
def updatePos(self):
self.pos = [self.pos[0] + self.vel[0], self.pos[1] + self.vel[1]]
def checkSlabCollision(self, slabPos): # slab pos = [xmin, ymin, xmax, ymax]
if (
self.pos[0] + self.rad > slabPos[0]
and self.pos[0] - self.rad < slabPos[2]
and self.pos[1] + self.rad > slabPos[1]
and self.pos[1] - self.rad < slabPos[3]
):
# Handle collision here (e.g., reverse ball's direction)
if self.pos[0] < slabPos[0] or self.pos[0] > slabPos[2]:
self.vel[0] *= -1
if self.pos[1] < slabPos[1] or self.pos[1] > slabPos[3]:
self.vel[1] *= -1
| Ball |
python | zostera__django-bootstrap4 | tests/test_forms.py | {
"start": 12193,
"end": 13199
} | class ____(TestCase):
def test_show_label_false(self):
form = CharFieldTestForm()
res = render_template_with_form("{% bootstrap_form form show_label=False %}", {"form": form})
self.assertIn("sr-only", res)
def test_show_label_sr_only(self):
form = CharFieldTestForm()
res = render_template_with_form("{% bootstrap_form form show_label='sr-only' %}", {"form": form})
self.assertIn("sr-only", res)
def test_show_label_skip(self):
form = CharFieldTestForm()
res = render_template_with_form("{% bootstrap_form form show_label='skip' %}", {"form": form})
self.assertNotIn("<label>", res)
def test_for_formset(self):
TestFormSet = formset_factory(CharFieldTestForm, extra=1)
test_formset = TestFormSet()
res = render_template_with_form(
"{% bootstrap_formset formset show_label=False %}",
{"formset": test_formset},
)
self.assertIn("sr-only", res)
| ShowLabelTest |
python | tox-dev__tox | src/tox/pytest.py | {
"start": 14764,
"end": 19625
} | class ____(Protocol):
def __call__(
self,
files: dict[str, Any],
base: Path | None = None,
prj_path: Path | None = None,
) -> ToxProject: ...
@pytest.fixture(name="tox_project")
def init_fixture(
tmp_path: Path,
capfd: CaptureFixture,
monkeypatch: pytest.MonkeyPatch,
mocker: MockerFixture,
) -> ToxProjectCreator:
def _init(files: dict[str, Any], base: Path | None = None, prj_path: Path | None = None) -> ToxProject:
"""Create tox projects."""
return ToxProject(files, base, prj_path or tmp_path / "p", capfd, monkeypatch, mocker)
return _init
@pytest.fixture
def empty_project(tox_project: ToxProjectCreator, monkeypatch: pytest.MonkeyPatch) -> ToxProject:
project = tox_project({"tox.ini": ""})
monkeypatch.chdir(project.path)
return project
_RUN_INTEGRATION_TEST_FLAG = "--run-integration"
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(_RUN_INTEGRATION_TEST_FLAG, action="store_true", help="run the integration tests")
def pytest_configure(config: pytest.Config) -> None:
config.addinivalue_line("markers", "integration")
config.addinivalue_line("markers", "plugin_test")
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Function]) -> None:
# do not require flags if called directly
if len(items) == 1: # pragma: no cover # hard to test
return
skip_int = pytest.mark.skip(reason=f"integration tests not run (no {_RUN_INTEGRATION_TEST_FLAG} flag)")
def is_integration(test_item: pytest.Function) -> bool:
return test_item.get_closest_marker("integration") is not None
integration_enabled = config.getoption(_RUN_INTEGRATION_TEST_FLAG)
if not integration_enabled: # pragma: no cover # hard to test
for item in items:
if is_integration(item):
item.add_marker(skip_int)
# run integration tests (is_integration is True) after unit tests (False)
items.sort(key=is_integration)
def enable_pypi_server(monkeypatch: pytest.MonkeyPatch, url: str | None) -> None:
if url is None: # pragma: no cover # only one of the branches can be hit depending on env
monkeypatch.delenv("PIP_INDEX_URL", raising=False)
else: # pragma: no cover
monkeypatch.setenv("PIP_INDEX_URL", url)
monkeypatch.setenv("PIP_RETRIES", str(5))
monkeypatch.setenv("PIP_TIMEOUT", str(2))
@pytest.fixture(scope="session")
def pypi_server(tmp_path_factory: pytest.TempPathFactory) -> Iterator[IndexServer]:
# takes around 2.5s
path = tmp_path_factory.mktemp("pypi")
with IndexServer(path) as server:
server.create_index("empty", "volatile=False")
yield server
@pytest.fixture(scope="session")
def _invalid_index_fake_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as socket_handler:
socket_handler.bind(("", 0))
return cast("int", socket_handler.getsockname()[1])
@pytest.fixture(autouse=True)
def disable_pip_pypi_access(_invalid_index_fake_port: int, monkeypatch: pytest.MonkeyPatch) -> tuple[str, str | None]:
"""Set a fake pip index url, tests that want to use a pypi server should create and overwrite this."""
previous_url = os.environ.get("PIP_INDEX_URL")
new_url = f"http://localhost:{_invalid_index_fake_port}/bad-pypi-server"
monkeypatch.setenv("PIP_INDEX_URL", new_url)
monkeypatch.setenv("PIP_RETRIES", str(0))
monkeypatch.setenv("PIP_TIMEOUT", str(0.001))
return new_url, previous_url
@pytest.fixture(name="enable_pip_pypi_access")
def enable_pip_pypi_access_fixture(
disable_pip_pypi_access: tuple[str, str | None],
monkeypatch: pytest.MonkeyPatch,
) -> str | None:
"""Set a fake pip index url, tests that want to use a pypi server should create and overwrite this."""
_, previous_url = disable_pip_pypi_access
enable_pypi_server(monkeypatch, previous_url)
return previous_url
def register_inline_plugin(mocker: MockerFixture, *args: Callable[..., Any]) -> None:
frame_info = inspect.stack()[1]
caller_module = inspect.getmodule(frame_info[0])
assert caller_module is not None # noqa: S101
plugin = ModuleType(f"{caller_module.__name__}|{frame_info[3]}")
plugin.__file__ = caller_module.__file__
plugin.__dict__.update({f.__name__: f for f in args})
mocker.patch("tox.plugin.manager.load_inline", return_value=plugin)
LogCaptureFixture = pytest.LogCaptureFixture
TempPathFactory = pytest.TempPathFactory
MonkeyPatch = pytest.MonkeyPatch
__all__ = (
"CaptureFixture",
"LogCaptureFixture",
"MonkeyPatch",
"SubRequest",
"TempPathFactory",
"ToxProject",
"ToxProjectCreator",
"ToxRunOutcome",
"check_os_environ",
"register_inline_plugin",
)
| ToxProjectCreator |
python | plotly__plotly.py | plotly/graph_objs/layout/_hoverlabel.py | {
"start": 235,
"end": 9064
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.hoverlabel"
_valid_props = {
"align",
"bgcolor",
"bordercolor",
"font",
"grouptitlefont",
"namelength",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def bgcolor(self):
"""
Sets the background color of all hover labels on graph
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the border color of all hover labels on graph.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def font(self):
"""
Sets the default hover label font used by all traces on the
graph.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def grouptitlefont(self):
"""
Sets the font for group titles in hover (unified modes).
Defaults to `hoverlabel.font`.
The 'grouptitlefont' property is an instance of Grouptitlefont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.hoverlabel.Grouptitlefont`
- A dict of string/value properties that will be passed
to the Grouptitlefont constructor
Returns
-------
plotly.graph_objs.layout.hoverlabel.Grouptitlefont
"""
return self["grouptitlefont"]
@grouptitlefont.setter
def grouptitlefont(self, val):
self["grouptitlefont"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
bgcolor
Sets the background color of all hover labels on graph
bordercolor
Sets the border color of all hover labels on graph.
font
Sets the default hover label font used by all traces on
the graph.
grouptitlefont
Sets the font for group titles in hover (unified
modes). Defaults to `hoverlabel.font`.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
bgcolor=None,
bordercolor=None,
font=None,
grouptitlefont=None,
namelength=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
bgcolor
Sets the background color of all hover labels on graph
bordercolor
Sets the border color of all hover labels on graph.
font
Sets the default hover label font used by all traces on
the graph.
grouptitlefont
Sets the font for group titles in hover (unified
modes). Defaults to `hoverlabel.font`.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("font", arg, font)
self._set_property("grouptitlefont", arg, grouptitlefont)
self._set_property("namelength", arg, namelength)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | celery__celery | celery/utils/threads.py | {
"start": 4297,
"end": 7142
} | class ____:
"""Local stack.
This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it will return a proxy that
resolves to the topmost item on the stack.
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return Proxy(_lookup)
def push(self, obj):
"""Push a new item to the stack."""
rv = getattr(self._local, 'stack', None)
if rv is None:
# pylint: disable=assigning-non-slot
# This attribute is defined now.
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Remove the topmost item from the stack.
Note:
Will return the old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
def __len__(self):
stack = getattr(self._local, 'stack', None)
return len(stack) if stack else 0
@property
def stack(self):
# get_current_worker_task uses this to find
# the original task that was executed by the worker.
stack = getattr(self._local, 'stack', None)
if stack is not None:
return stack
return []
@property
def top(self):
"""The topmost item on the stack.
Note:
If the stack is empty, :const:`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
| _LocalStack |
python | bottlepy__bottle | bottle.py | {
"start": 140829,
"end": 141949
} | class ____(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
| EventletServer |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 31516,
"end": 32636
} | class ____(MeanMetricWrapper):
"""Computes how often targets are in the top `K` predictions.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to 5.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result().numpy()
0.5
>>> m.reset_state()
>>> m.update_state([[0, 0, 1], [0, 1, 0]],
... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result().numpy()
0.3
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
```
"""
def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):
super(TopKCategoricalAccuracy, self).__init__(
top_k_categorical_accuracy, name, dtype=dtype, k=k)
| TopKCategoricalAccuracy |
python | streamlit__streamlit | lib/streamlit/runtime/scriptrunner/script_cache.py | {
"start": 840,
"end": 2864
} | class ____:
"""Thread-safe cache of Python script bytecode."""
def __init__(self) -> None:
# Mapping of script_path: bytecode
self._cache: dict[str, Any] = {}
self._lock = threading.Lock()
def clear(self) -> None:
"""Remove all entries from the cache.
Notes
-----
Threading: SAFE. May be called on any thread.
"""
with self._lock:
self._cache.clear()
def get_bytecode(self, script_path: str) -> Any:
"""Return the bytecode for the Python script at the given path.
If the bytecode is not already in the cache, the script will be
compiled first.
Raises
------
Any Exception raised while reading or compiling the script.
Notes
-----
Threading: SAFE. May be called on any thread.
"""
script_path = os.path.abspath(script_path)
with self._lock:
bytecode = self._cache.get(script_path, None)
if bytecode is not None:
# Fast path: the code is already cached.
return bytecode
# Populate the cache
with open_python_file(script_path) as f:
filebody = f.read()
if config.get_option("runner.magicEnabled"):
filebody = magic.add_magic(filebody, script_path)
bytecode = compile( # type: ignore
filebody,
# Pass in the file path so it can show up in exceptions.
script_path,
# We're compiling entire blocks of Python, so we need "exec"
# mode (as opposed to "eval" or "single").
mode="exec",
# Don't inherit any flags or "future" statements.
flags=0,
dont_inherit=1,
# Use the default optimization options.
optimize=-1,
)
self._cache[script_path] = bytecode
return bytecode
| ScriptCache |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess23.py | {
"start": 650,
"end": 1271
} | class ____(metaclass=MyMeta):
@property
def attr2(self) -> int:
return 2
@property
def attr3(self) -> int:
return 3
@property
def attr4(self) -> int:
return 4
attr5 = "5"
reveal_type(A.attr1, expected_text="int")
reveal_type(A().attr2, expected_text="int")
reveal_type(A.attr2, expected_text="property")
reveal_type(A().attr3, expected_text="int")
reveal_type(A.attr3, expected_text="int")
reveal_type(A.attr4, expected_text="property")
reveal_type(A.attr5, expected_text="int")
reveal_type(A.attr6, expected_text="int")
reveal_type(A.attr7, expected_text="complex")
| A |
python | google__pytype | pytype/rewrite/load_abstract_test.py | {
"start": 400,
"end": 1265
} | class ____(test_utils.ContextfulTestBase):
def test_class(self):
int_cls = self.ctx.abstract_loader.load_builtin('int')
self.assertIsInstance(int_cls, abstract.SimpleClass)
self.assertEqual(int_cls.name, 'int')
def test_function(self):
abs_func = self.ctx.abstract_loader.load_builtin('abs')
self.assertIsInstance(abs_func, abstract.PytdFunction)
self.assertEqual(abs_func.name, 'abs')
def test_constant(self):
ellipsis = self.ctx.abstract_loader.load_builtin('Ellipsis')
self.assertIsInstance(ellipsis, abstract.PythonConstant)
self.assertEqual(ellipsis.constant, Ellipsis)
def test_none(self):
self.assertIs(
self.ctx.abstract_loader.load_builtin('None'), self.ctx.consts[None]
)
self.assertIs(
self.ctx.abstract_loader.load_builtin('NoneType'), self.ctx.consts[None]
)
| LoadBuiltinTest |
python | gevent__gevent | src/gevent/tests/lock_tests.py | {
"start": 15645,
"end": 16003
} | class ____(BaseSemaphoreTests):
"""
Tests for bounded semaphores.
"""
def test_release_unacquired(self):
# Cannot go past the initial value
sem = self.semtype()
self.assertRaises(ValueError, sem.release)
sem.acquire()
sem.release()
self.assertRaises(ValueError, sem.release)
| BoundedSemaphoreTests |
python | getsentry__sentry | src/sentry/notifications/notification_action/issue_alert_registry/handlers/msteams_issue_alert_handler.py | {
"start": 283,
"end": 347
} | class ____(BaseIssueAlertHandler):
pass
| MSTeamsIssueAlertHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass15.py | {
"start": 247,
"end": 410
} | class ____:
children: "C"
def test(self):
for child in self.children:
reveal_type(child, expected_text="ClassB")
C = List[ClassB]
| ClassB |
python | mlflow__mlflow | mlflow/genai/judges/tools/base.py | {
"start": 422,
"end": 1499
} | class ____(ABC):
"""
Abstract base class for tools that can be used by MLflow judges.
Tools provide additional capabilities to judges for analyzing traces,
performing calculations, or accessing external data sources during evaluation.
"""
@property
@abstractmethod
def name(self) -> str:
"""
Return the unique name of the tool.
Returns:
Tool name used for registration and invocation
"""
@abstractmethod
def get_definition(self) -> ToolDefinition:
"""
Get the tool definition in LiteLLM/OpenAI function calling format.
Returns:
ToolDefinition object containing the tool specification
"""
@abstractmethod
def invoke(self, trace: Trace, **kwargs) -> Any:
"""
Invoke the tool with the provided trace and arguments.
Args:
trace: The MLflow trace object to analyze
kwargs: Additional keyword arguments for the tool
Returns:
Result of the tool execution
"""
| JudgeTool |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py | {
"start": 288,
"end": 1097
} | class ____(dg.ConfigurableIOManager):
def handle_output(self, context: dg.OutputContext, obj):
pass
def load_input(self, context: dg.InputContext):
pass
@dg.op
def produce_pandas_output():
return 1
def read_dataframe_from_table(*_args, **_kwargs):
pass
def write_dataframe_to_table(*_args, **_kwargs):
pass
pd_series_io_manager: Any = None
# start_different_input_managers
@dg.op
def op_1():
return [1, 2, 3]
@dg.op(ins={"a": dg.In(input_manager_key="pandas_series")})
def op_2(a):
return pd.concat([a, pd.Series([4, 5, 6])])
@dg.job(resource_defs={"pandas_series": pd_series_io_manager})
def a_job():
op_2(op_1())
# end_different_input_managers
# start_plain_input_manager
# in this case PandasIOManager is an existing IO Manager
| TableIOManager |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 28781,
"end": 28891
} | class ____(BaseModel, extra="forbid"):
delete: "PointsSelector" = Field(..., description="")
| DeleteOperation |
python | huggingface__transformers | src/transformers/models/modernbert/modular_modernbert.py | {
"start": 70940,
"end": 77166
} | class ____(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
sliding_window_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
indices: Optional[torch.Tensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
batch_size: Optional[int] = None,
seq_len: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
self._maybe_set_compile()
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
sliding_window_mask=sliding_window_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
batch_size=batch_size,
seq_len=seq_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0] # shape (num_choices, seq_len, hidden_size)
# If classifier_pooling is "cls", isolate the <cls> token
if self.config.classifier_pooling == "cls":
indices_0 = torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device)
# for left or right padding, <cls> is the first non-pad token
if attention_mask is not None:
cls_mask = attention_mask.argmax(dim=-1).to(last_hidden_state.device)
# if no pad, <cls> is the first token
else:
cls_mask = torch.tensor(0, dtype=torch.long, device=last_hidden_state.device)
# extract the <cls> token for the logits
last_hidden_state = last_hidden_state[indices_0, cls_mask]
# If classifier_pooling is "mean", pool the hidden states by averaging over the sequence length
elif self.config.classifier_pooling == "mean":
num_non_pad_tokens = attention_mask.sum(dim=1, keepdim=True)
last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / num_non_pad_tokens
pooled_output = self.head(last_hidden_state)
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"ModernBertConfig",
"ModernBertModel",
"ModernBertPreTrainedModel",
"ModernBertForMaskedLM",
"ModernBertForSequenceClassification",
"ModernBertForTokenClassification",
"ModernBertForQuestionAnswering",
"ModernBertForMultipleChoice",
]
| ModernBertForMultipleChoice |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol15.py | {
"start": 171,
"end": 307
} | class ____(Protocol):
@property
def f(self: T) -> T: ...
def m(self, item: T, callback: Callable[[T], str]) -> str: ...
| Proto |
python | pytorch__pytorch | test/dynamo/test_trace_rules.py | {
"start": 3454,
"end": 11171
} | class ____:
"""
Track the objects, object id - name pairs, and name - dynamo wrapping rule pairs
from the heuristic defined in `gen_allowed_objs_and_ids`.
"""
object_ids: dict[int, str]
c_binding_in_graph_functions: set[Any]
non_c_binding_in_graph_functions: set[Any]
name_rule_map: dict[str, Any]
def gen_allowed_objs_and_ids(record=False, c_binding_only=True) -> AllowedObjects:
"""
Walk torch.* and get the ids of all the stuff in it
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
torch_object_ids = {}
c_binding_in_graph_functions = set()
non_c_binding_in_graph_functions = set()
torch_name_rule_map = {}
# In some platforms, these functions were loaded as classes instead of functions.
# To mitigate these weird cases, we need this special check.
def is_special_functions(obj):
return hashable(obj) and obj in {
torch._C._cuda_isCurrentStreamCapturing,
torch._C._graph_pool_handle,
}
# Add obj to c_binding_in_graph_functions set or non_c_binding_in_graph_functions set
# if it's a torch function or method.
# This is used to generate the in graph function list based on heuristic.
def heuristic_record_if_in_graph_function(obj, module, name):
try:
if hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
except Exception:
pass
if isinstance(
obj,
(
types.FunctionType,
types.BuiltinFunctionType,
types.MethodDescriptorType,
types.WrapperDescriptorType,
),
) or is_special_functions(obj):
torch_name_rule_map[f"{module.__name__}.{name}"] = (
TorchInGraphFunctionVariable
)
if c_binding_only:
if not hasattr(obj, "__code__"):
c_binding_in_graph_functions.add(obj)
else:
if hasattr(obj, "__code__"):
non_c_binding_in_graph_functions.add(obj)
else:
c_binding_in_graph_functions.add(obj)
def _is_allowed_module_prefix(obj):
allowed_modules = ("torch", "math")
# torch.nn.modules.rnn is disallowed because these modules internally
# flatten their parameters. This flattening process will call
# Tensor.set_ with a Storage, and Storages cannot be traced with
# AOTAutograd; so we need to graph-break. To ensure this, we inline
# these functions, rather than keep them opaque-ly in the graph.
disallowed_modules = [
"torch.optim.",
"torch.nn.modules.rnn.",
"torch._dynamo.",
"torch._C._dynamo.",
"torch._inductor.",
"torch._C.inductor.",
"torch.fx.",
"torch._C._autograd",
"torch._C._cudart",
"torch._C._distributed_autograd",
"torch._C._distributed_c10d",
"torch._C._distributed_rpc",
"torch._C._functorch",
"torch._C._monitor",
"torch._C._nvtx",
"torch._C._lazy",
"torch._C._profiler",
"torch.__config__",
"torch._custom_op",
"torch._decomp",
"torch._dispatch",
"torch._export",
"torch._functorch.make_functional",
"torch._functorch.compile_utils",
"torch._functorch.partitioners",
"torch._functorch.aot_autograd",
"torch._functorch.compilers",
"torch._functorch.fx_minifier",
"torch.autograd.profiler_util",
"torch.autograd.profiler",
"torch._jit_internal",
"torch._library",
"torch._lobpcg",
"torch._logging",
"torch._meta_registrations",
"torch._namedtensor_internals",
"torch._numpy",
"torch._sources",
"torch._subclasses",
"torch._tensor",
"torch._tensor_str",
"torch._utils",
"torch._utils_internal",
"torch._vmap_internals",
"torch.compiler",
"torch.distributed",
"torch.export",
"torch.hub",
"torch.jit",
"torch.library",
"torch.masked.maskedtensor",
"torch.nn.init",
"torch.nn.modules.module",
"torch.nn.parallel",
"torch.nn.utils",
"torch.multiprocessing",
"torch.onnx",
"torch.overrides",
"torch.package",
"torch.profiler",
"torch.serialization",
"torch.storage",
"torch.utils",
"torch.distributed.",
]
allowed_modules_dot = tuple([x + "." for x in allowed_modules])
module = inspect.getmodule(obj)
if module is None:
return False
mod_name = module.__name__
if any(mod_name.startswith(m) for m in disallowed_modules):
return False
return mod_name in allowed_modules or mod_name.startswith(allowed_modules_dot)
def _find_torch_objects(module):
if any(
module.__name__.startswith(mod_name)
for mod_name in config.allowed_functions_module_string_ignorelist
):
return
torch_object_ids[id(module)] = module.__name__
for name, obj in list(module.__dict__.items()):
if id(obj) not in torch_object_ids:
# Dynamo allows all builtins into the graph and does not attempt
# to introspect into them. We don't want to allow instances of
# HigherOrderOperator into the graph all the time (Dynamo needs
# to introspect the body functions of these HigherOrderOperator
# first, decide they are safe, and then allow them into the graph).
# So we exclude HigherOrderOperator from being a builtin.
import torch._ops
if isinstance(obj, torch._ops.HigherOrderOperator):
continue
# We want to trace through `grad` and `vmap`
if obj in (
torch.func.grad,
deprecated_func.grad,
torch.func.vmap,
deprecated_func.vmap,
torch.nn.functional.triplet_margin_with_distance_loss,
torch.cond,
):
continue
if isinstance(obj, types.ModuleType):
if obj.__name__.startswith("torch.") and _is_allowed_module_prefix(
obj
):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(obj)
elif _is_allowed_module_prefix(obj):
if record:
heuristic_record_if_in_graph_function(obj, module, name)
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
elif inspect.getmodule(obj) is None and not is_safe_constant(obj):
if record:
heuristic_record_if_in_graph_function(obj, module, name)
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(torch)
_find_torch_objects(math)
return AllowedObjects(
torch_object_ids,
c_binding_in_graph_functions,
non_c_binding_in_graph_functions,
torch_name_rule_map,
)
| AllowedObjects |
python | doocs__leetcode | lcci/17.15.Longest Word/Solution.py | {
"start": 0,
"end": 482
} | class ____:
def longestWord(self, words: List[str]) -> str:
def dfs(w: str) -> bool:
if not w:
return True
for k in range(1, len(w) + 1):
if w[:k] in s and dfs(w[k:]):
return True
return False
s = set(words)
words.sort(key=lambda x: (-len(x), x))
for w in words:
s.remove(w)
if dfs(w):
return w
return ""
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/adapter.py | {
"start": 704,
"end": 14592
} | class ____(BaseAdapter):
"""The adapter class allows you to override various functionality of the
``allauth.socialaccount`` app. To do so, point ``settings.SOCIALACCOUNT_ADAPTER`` to
your own class that derives from ``DefaultSocialAccountAdapter`` and override the
behavior by altering the implementation of the methods according to your own
needs.
"""
error_messages = {
"email_taken": _(
"An account already exists with this email address."
" Please sign in to that account first, then connect"
" your %s account."
),
"invalid_token": _("Invalid token."),
"no_password": _("Your account has no password set up."),
"no_verified_email": _("Your account has no verified email address."),
"disconnect_last": _(
"You cannot disconnect your last remaining third-party account."
),
"connected_other": _(
"The third-party account is already connected to a different account."
),
}
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed
(and before the pre_social_login signal is emitted).
You can use this hook to intervene, e.g. abort the login by
raising an ImmediateHttpResponse
Why both an adapter hook and the signal? Intervening in
e.g. the flow from within a signal handler is bad -- multiple
handlers may be active and are executed in undetermined order.
"""
pass
def on_authentication_error(
self,
request,
provider,
error=None,
exception=None,
extra_context=None,
):
"""
Invoked when there is an error in the authentication cycle. In this
case, pre_social_login will not be reached.
You can use this hook to intervene, e.g. redirect to an
educational flow by raising an ImmediateHttpResponse.
"""
if hasattr(self, "authentication_error"):
warnings.warn(
"adapter.authentication_error() is deprecated, use adapter.on_authentication_error()"
)
self.authentication_error(
request,
provider.id,
error=error,
exception=exception,
extra_context=extra_context,
)
def new_user(self, request, sociallogin):
"""
Instantiates a new User instance.
"""
return get_account_adapter().new_user(request)
def save_user(self, request, sociallogin, form=None):
"""
Saves a newly signed up social login. In case of auto-signup,
the signup form is not available.
"""
u = sociallogin.user
u.set_unusable_password()
account_adapter = get_account_adapter()
if form:
account_adapter.save_user(request, u, form)
else:
account_adapter.populate_username(request, u)
sociallogin.save(request)
return u
def populate_user(self, request, sociallogin, data):
"""
Hook that can be used to further populate the user instance.
For convenience, we populate several common fields.
Note that the user instance being populated represents a
suggested User instance that represents the social user that is
in the process of being logged in.
The User instance need not be completely valid and conflict
free. For example, verifying whether or not the username
already exists, is not a responsibility.
"""
username = data.get("username")
first_name = data.get("first_name")
last_name = data.get("last_name")
email = data.get("email")
name = data.get("name")
user = sociallogin.user
user_username(user, username or "")
user_email(user, valid_email_or_none(email) or "")
name_parts = (name or "").partition(" ")
user_field(user, "first_name", first_name or name_parts[0])
user_field(user, "last_name", last_name or name_parts[2])
return user
def get_connect_redirect_url(self, request, socialaccount):
"""
Returns the default URL to redirect to after successfully
connecting a social account.
"""
url = reverse("socialaccount_connections")
return url
def validate_disconnect(self, account, accounts) -> None:
"""
Validate whether or not the socialaccount account can be
safely disconnected.
"""
pass
def is_auto_signup_allowed(self, request, sociallogin):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
return auto_signup
def is_open_for_signup(self, request, sociallogin):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return get_account_adapter(request).is_open_for_signup(request)
def get_signup_form_initial_data(self, sociallogin):
user = sociallogin.user
email = user_email(user)
if not email and len(sociallogin.email_addresses) > 0:
email = sociallogin.email_addresses[0].email
initial = {
"email": email or "",
"username": user_username(user) or "",
"first_name": user_field(user, "first_name") or "",
"last_name": user_field(user, "last_name") or "",
}
return initial
def deserialize_instance(self, model, data):
return deserialize_instance(model, data)
def serialize_instance(self, instance):
return serialize_instance(instance)
def list_providers(self, request):
from allauth.socialaccount.providers import registry
ret = []
provider_classes = registry.get_class_list()
apps = self.list_apps(request)
apps_map = {}
for app in apps:
apps_map.setdefault(app.provider, []).append(app)
for provider_class in provider_classes:
provider_apps = apps_map.get(provider_class.id, [])
if not provider_apps:
if provider_class.uses_apps:
continue
provider_apps = [None]
for app in provider_apps:
provider = provider_class(request=request, app=app)
ret.append(provider)
return ret
def get_provider(self, request, provider, client_id=None):
"""Looks up a `provider`, supporting subproviders by looking up by
`provider_id`.
"""
from allauth.socialaccount.providers import registry
provider_class = registry.get_class(provider)
if provider_class is None or provider_class.uses_apps:
app = self.get_app(request, provider=provider, client_id=client_id)
if not provider_class:
# In this case, the `provider` argument passed was a
# `provider_id`.
provider_class = registry.get_class(app.provider)
if not provider_class:
raise ImproperlyConfigured(f"unknown provider: {app.provider}")
return provider_class(request, app=app)
elif provider_class:
assert not provider_class.uses_apps # nosec
return provider_class(request, app=None)
else:
raise ImproperlyConfigured(f"unknown provider: {provider}")
def list_apps(self, request, provider=None, client_id=None):
"""SocialApp's can be setup in the database, or, via
`settings.SOCIALACCOUNT_PROVIDERS`. This methods returns a uniform list
of all known apps matching the specified criteria, and blends both
(db/settings) sources of data.
"""
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.models import SocialApp
# Map provider to the list of apps.
provider_to_apps = {}
# First, populate it with the DB backed apps.
if request:
db_apps = SocialApp.objects.on_site(request)
else:
db_apps = SocialApp.objects.all()
if provider:
db_apps = db_apps.filter(Q(provider=provider) | Q(provider_id=provider))
if client_id:
db_apps = db_apps.filter(client_id=client_id)
for app in db_apps:
apps = provider_to_apps.setdefault(app.provider, [])
apps.append(app)
# Then, extend it with the settings backed apps.
for p, pcfg in app_settings.PROVIDERS.items():
app_configs = pcfg.get("APPS")
if app_configs is None:
app_config = pcfg.get("APP")
if app_config is None:
continue
app_configs = [app_config]
apps = provider_to_apps.setdefault(p, [])
for config in app_configs:
app = SocialApp(provider=p)
for field in [
"name",
"provider_id",
"client_id",
"secret",
"key",
"settings",
]:
if field in config:
setattr(app, field, config[field])
if "certificate_key" in config:
warnings.warn("'certificate_key' should be moved into app.settings")
app.settings["certificate_key"] = config["certificate_key"]
if client_id and app.client_id != client_id:
continue
if (
provider
and app.provider_id != provider
and app.provider != provider
):
continue
apps.append(app)
# Flatten the list of apps.
apps = []
for provider_apps in provider_to_apps.values():
apps.extend(provider_apps)
return apps
def get_app(self, request, provider, client_id=None):
from allauth.socialaccount.models import SocialApp
apps = self.list_apps(request, provider=provider, client_id=client_id)
if len(apps) > 1:
visible_apps = [app for app in apps if not app.settings.get("hidden")]
if len(visible_apps) != 1:
raise MultipleObjectsReturned
apps = visible_apps
elif len(apps) == 0:
raise SocialApp.DoesNotExist()
return apps[0]
def send_notification_mail(self, *args, **kwargs):
return get_account_adapter().send_notification_mail(*args, **kwargs)
def get_requests_session(self):
import requests
session = requests.Session()
session.request = functools.partial(
session.request, timeout=app_settings.REQUESTS_TIMEOUT
)
return session
def is_email_verified(self, provider, email):
"""
Returns ``True`` iff the given email encountered during a social
login for the given provider is to be assumed verified.
This can be configured with a ``"verified_email"`` key in the provider
app settings, or a ``"VERIFIED_EMAIL"`` in the global provider settings
(``SOCIALACCOUNT_PROVIDERS``). Both can be set to ``False`` or
``True``, or, a list of domains to match email addresses against.
"""
verified_email = None
if provider.app:
verified_email = provider.app.settings.get("verified_email")
if verified_email is None:
settings = provider.get_settings()
verified_email = settings.get("VERIFIED_EMAIL", False)
if isinstance(verified_email, bool):
pass
elif isinstance(verified_email, list):
email_domain = email.partition("@")[2].lower()
verified_domains = [d.lower() for d in verified_email]
verified_email = email_domain in verified_domains
else:
raise ImproperlyConfigured("verified_email wrongly configured")
return verified_email
def can_authenticate_by_email(self, login, email):
"""
Returns ``True`` iff authentication by email is active for this login/email.
This can be configured with a ``"email_authentication"`` key in the provider
app settings, or a ``"VERIFIED_EMAIL"`` in the global provider settings
(``SOCIALACCOUNT_PROVIDERS``).
"""
ret = None
provider = login.provider
if provider.app:
ret = provider.app.settings.get("email_authentication")
if ret is None:
ret = app_settings.EMAIL_AUTHENTICATION or provider.get_settings().get(
"EMAIL_AUTHENTICATION", False
)
return ret
def generate_state_param(self, state: dict) -> str:
"""
To preserve certain state before the handshake with the provider
takes place, and be able to verify/use that state later on, a `state`
parameter is typically passed to the provider. By default, a random
string sufficies as the state parameter value is actually just a
reference/pointer to the actual state. You can use this adapter method
to alter the generation of the `state` parameter.
"""
from allauth.socialaccount.internal.statekit import STATE_ID_LENGTH
return get_random_string(STATE_ID_LENGTH)
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request)
| DefaultSocialAccountAdapter |
python | pytorch__pytorch | test/cpp/aoti_inference/compile_model.py | {
"start": 671,
"end": 2378
} | class ____(torch.nn.Module):
"""
a wrapper nn.Module that instantiates its forward method
on MyAOTIClass
"""
def __init__(self, lib_path, device):
super().__init__()
self.aoti_custom_op = torch.classes.aoti.MyAOTIClass(
lib_path,
device,
)
def forward(self, *x):
outputs = self.aoti_custom_op.forward(x)
return tuple(outputs)
def make_script_module(lib_path, device, *inputs):
m = MyAOTIModule(lib_path, device)
# sanity check
m(*inputs)
return torch.jit.trace(m, inputs)
def compile_model(device, data):
module = SimpleModule().to(device)
x = torch.randn((4, 4), device=device)
inputs = (x,)
# make batch dimension
batch_dim = Dim("batch", min=1, max=1024)
dynamic_shapes = {
"x": {0: batch_dim},
}
with torch.no_grad():
# aot-compile the module into a .so pointed by lib_path
lib_path = torch._export.aot_compile(
module, inputs, dynamic_shapes=dynamic_shapes
)
script_module = make_script_module(lib_path, device, *inputs)
aoti_script_model = f"script_model_{device}.pt"
script_module.save(aoti_script_model)
# save sample inputs and ref output
with torch.no_grad():
ref_output = module(*inputs)
data.update(
{
f"inputs_{device}": list(inputs),
f"outputs_{device}": [ref_output],
}
)
def main():
data = {}
for device in ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]:
compile_model(device, data)
torch.jit.script(TensorSerializer(data)).save("script_data.pt")
if __name__ == "__main__":
main()
| MyAOTIModule |
python | getsentry__sentry | tests/relay_integration/lang/java/test_plugin.py | {
"start": 2559,
"end": 27652
} | class ____ : ComponentActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
InnerClass().whoops()
val list = findViewById<RecyclerView>(R.id.list)
list.layoutManager = LinearLayoutManager(this)
list.adapter = TrackAdapter()
}
class InnerClass {
fun whoops() {
AnotherInnerClass().whoops2()
}
}
class AnotherInnerClass {
fun whoops2() {
AdditionalInnerClass().whoops3()
}
}
class AdditionalInnerClass {
fun whoops3() {
OneMoreInnerClass().whoops4()
}
}
class OneMoreInnerClass {
fun whoops4() {
throw RuntimeException("whoops")
}
}
}
"""
PROGUARD_SOURCE_LOOKUP_UUID = "05d96b1c-1786-477c-8615-d3cf83e027c7"
PROGUARD_SOURCE_LOOKUP_SOURCE = b"""\
io.sentry.samples.instrumentation.ui.EditActivity -> io.sentry.samples.instrumentation.ui.EditActivity:
# {"id":"sourceFile","fileName":"EditActivity.kt"}
int $r8$clinit -> a
0:65535:void <init>():15:15 -> <init>
1:6:void onCreate(android.os.Bundle):18:18 -> onCreate
7:12:void onCreate(android.os.Bundle):19:19 -> onCreate
13:22:void onCreate(android.os.Bundle):21:21 -> onCreate
23:32:void onCreate(android.os.Bundle):22:22 -> onCreate
33:42:void onCreate(android.os.Bundle):23:23 -> onCreate
43:49:void onCreate(android.os.Bundle):24:24 -> onCreate
50:71:void onCreate(android.os.Bundle):26:26 -> onCreate
72:73:java.lang.String io.sentry.samples.instrumentation.data.Track.getName():46:46 -> onCreate
72:73:void onCreate(android.os.Bundle):28 -> onCreate
74:76:void onCreate(android.os.Bundle):28:28 -> onCreate
77:78:java.lang.String io.sentry.samples.instrumentation.data.Track.getComposer():48:48 -> onCreate
77:78:void onCreate(android.os.Bundle):29 -> onCreate
79:81:void onCreate(android.os.Bundle):29:29 -> onCreate
82:83:long io.sentry.samples.instrumentation.data.Track.getMillis():51:51 -> onCreate
82:83:void onCreate(android.os.Bundle):30 -> onCreate
84:90:void onCreate(android.os.Bundle):30:30 -> onCreate
91:92:float io.sentry.samples.instrumentation.data.Track.getPrice():53:53 -> onCreate
91:92:void onCreate(android.os.Bundle):31 -> onCreate
93:102:void onCreate(android.os.Bundle):31:31 -> onCreate
103:119:void onCreate(android.os.Bundle):34:34 -> onCreate
io.sentry.samples.instrumentation.ui.EditActivity$$ExternalSyntheticLambda0 -> io.sentry.samples.instrumentation.ui.g:
# {"id":"sourceFile","fileName":"R8$$SyntheticClass"}
# {"id":"com.android.tools.r8.synthesized"}
android.widget.EditText io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$4 -> e
android.widget.EditText io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$3 -> d
android.widget.EditText io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$2 -> c
io.sentry.samples.instrumentation.data.Track io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$0 -> a
android.widget.EditText io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$1 -> b
io.sentry.samples.instrumentation.ui.EditActivity io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.f$5 -> f
void io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.<init>(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity) -> <init>
# {"id":"com.android.tools.r8.synthesized"}
19:21:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):0:0 -> onMenuItemClick
19:21:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
22:35:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):35:35 -> onMenuItemClick
22:35:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
36:44:void io.sentry.samples.instrumentation.ui.AnotherClassInSameFile$AnotherInnerClass.helloOtherInner():26:26 -> onMenuItemClick
36:44:void io.sentry.samples.instrumentation.ui.AnotherClassInSameFile.otherFun():21 -> onMenuItemClick
36:44:void io.sentry.samples.instrumentation.ui.AnotherClassInSameFile.helloOther():17 -> onMenuItemClick
36:44:void io.sentry.samples.instrumentation.ui.SomeService$InnerClassOfSomeService.helloInner():10 -> onMenuItemClick
36:44:void io.sentry.samples.instrumentation.ui.SomeService.helloThere():5 -> onMenuItemClick
36:44:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):37 -> onMenuItemClick
36:44:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
45:58:io.sentry.protocol.SentryId io.sentry.Sentry.captureException(java.lang.Throwable):433:433 -> onMenuItemClick
45:58:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):39 -> onMenuItemClick
45:58:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
59:68:io.sentry.ITransaction io.sentry.Sentry.startTransaction(java.lang.String,java.lang.String,boolean):697:697 -> onMenuItemClick
59:68:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):42 -> onMenuItemClick
59:68:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
69:71:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):42:42 -> onMenuItemClick
69:71:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
72:79:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):48:48 -> onMenuItemClick
72:79:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
80:87:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):49:49 -> onMenuItemClick
80:87:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
88:95:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):50:50 -> onMenuItemClick
88:95:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
96:103:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):51:51 -> onMenuItemClick
96:103:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
104:125:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):52:52 -> onMenuItemClick
104:125:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
126:142:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):53:53 -> onMenuItemClick
126:142:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
143:164:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):54:54 -> onMenuItemClick
143:164:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
165:175:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):64:64 -> onMenuItemClick
165:175:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
176:187:void io.sentry.samples.instrumentation.ui.EditActivity.addNewTrack(java.lang.String,java.lang.String,long,float):84:84 -> onMenuItemClick
176:187:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):64 -> onMenuItemClick
176:187:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
188:190:void io.sentry.samples.instrumentation.data.Track.<init>(long,java.lang.String,java.lang.Long,java.lang.String,java.lang.Long,java.lang.Long,long,java.lang.Long,float,int,kotlin.jvm.internal.DefaultConstructorMarker):43:43 -> onMenuItemClick
188:190:void io.sentry.samples.instrumentation.ui.EditActivity.addNewTrack(java.lang.String,java.lang.String,long,float):84 -> onMenuItemClick
188:190:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):64 -> onMenuItemClick
188:190:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
191:198:void io.sentry.samples.instrumentation.ui.EditActivity.addNewTrack(java.lang.String,java.lang.String,long,float):94:94 -> onMenuItemClick
191:198:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):64 -> onMenuItemClick
191:198:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
199:211:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):66:66 -> onMenuItemClick
199:211:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
212:227:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):67:67 -> onMenuItemClick
212:227:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
228:238:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):69:69 -> onMenuItemClick
228:238:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
239:250:io.sentry.samples.instrumentation.data.Track io.sentry.samples.instrumentation.data.Track.copy$default(io.sentry.samples.instrumentation.data.Track,long,java.lang.String,java.lang.Long,java.lang.String,java.lang.Long,java.lang.Long,long,java.lang.Long,float,int,java.lang.Object):0:0 -> onMenuItemClick
239:250:void io.sentry.samples.instrumentation.ui.EditActivity.update(io.sentry.samples.instrumentation.data.Track,java.lang.String,java.lang.String,long,float):100 -> onMenuItemClick
239:250:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):69 -> onMenuItemClick
239:250:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
251:265:io.sentry.samples.instrumentation.data.Track io.sentry.samples.instrumentation.data.Track.copy(long,java.lang.String,java.lang.Long,java.lang.String,java.lang.Long,java.lang.Long,long,java.lang.Long,float):0:0 -> onMenuItemClick
251:265:io.sentry.samples.instrumentation.data.Track io.sentry.samples.instrumentation.data.Track.copy$default(io.sentry.samples.instrumentation.data.Track,long,java.lang.String,java.lang.Long,java.lang.String,java.lang.Long,java.lang.Long,long,java.lang.Long,float,int,java.lang.Object):0 -> onMenuItemClick
251:265:void io.sentry.samples.instrumentation.ui.EditActivity.update(io.sentry.samples.instrumentation.data.Track,java.lang.String,java.lang.String,long,float):100 -> onMenuItemClick
251:265:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):69 -> onMenuItemClick
251:265:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
266:273:void io.sentry.samples.instrumentation.ui.EditActivity.update(io.sentry.samples.instrumentation.data.Track,java.lang.String,java.lang.String,long,float):106:106 -> onMenuItemClick
266:273:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):69 -> onMenuItemClick
266:273:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
274:286:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):71:71 -> onMenuItemClick
274:286:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
287:301:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):72:72 -> onMenuItemClick
287:301:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
302:306:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):74:74 -> onMenuItemClick
302:306:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
307:312:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):75:75 -> onMenuItemClick
307:312:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
313:316:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):56:56 -> onMenuItemClick
313:316:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
317:320:boolean io.sentry.samples.instrumentation.ui.EditActivity.onCreate$lambda-1(io.sentry.samples.instrumentation.data.Track,android.widget.EditText,android.widget.EditText,android.widget.EditText,android.widget.EditText,io.sentry.samples.instrumentation.ui.EditActivity,android.view.MenuItem):61:61 -> onMenuItemClick
317:320:boolean io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0.onMenuItemClick(android.view.MenuItem):0 -> onMenuItemClick
# {"id":"com.android.tools.r8.synthesized"}
"""
EDIT_ACTIVITY_SOURCE = b"""\
package io.sentry.samples.instrumentation.ui
import android.os.Bundle
import android.widget.EditText
import android.widget.Toast
import androidx.activity.ComponentActivity
import androidx.appcompat.widget.Toolbar
import io.sentry.Sentry
import io.sentry.SpanStatus
import io.sentry.samples.instrumentation.R
import io.sentry.samples.instrumentation.SampleApp
import io.sentry.samples.instrumentation.data.Track
import kotlinx.coroutines.runBlocking
| MainActivity |
python | has2k1__plotnine | plotnine/geoms/geom_rect.py | {
"start": 416,
"end": 3717
} | class ____(geom):
"""
Rectangles
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {
"color": None,
"fill": "#595959",
"linetype": "solid",
"size": 0.5,
"alpha": 1,
}
REQUIRED_AES = {"xmax", "xmin", "ymax", "ymin"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
draw_legend = staticmethod(geom_polygon.draw_legend)
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
if not coord.is_linear:
data = _rectangles_to_polygons(data)
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True, drop=True)
geom_polygon.draw_group(
gdata, panel_params, coord, ax, self.params
)
else:
self.draw_group(data, panel_params, coord, ax, self.params)
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
from matplotlib.collections import PolyCollection
data = coord.transform(data, panel_params, munch=True)
linewidth = data["size"] * SIZE_FACTOR
limits = zip(data["xmin"], data["xmax"], data["ymin"], data["ymax"])
verts = [[(l, b), (l, t), (r, t), (r, b)] for (l, r, b, t) in limits]
fill = to_rgba(data["fill"], data["alpha"])
color = data["color"]
# prevent unnecessary borders
if all(color.isna()):
color = "none"
col = PolyCollection(
verts,
facecolors=fill,
edgecolors=color,
linestyles=data["linetype"],
linewidths=linewidth,
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(col)
def _rectangles_to_polygons(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert rect data to polygons
Parameters
----------
df : dataframe
Dataframe with *xmin*, *xmax*, *ymin* and *ymax* columns,
plus others for aesthetics ...
Returns
-------
data : dataframe
Dataframe with *x* and *y* columns, plus others for
aesthetics ...
"""
n = len(df)
# Helper indexing arrays
xmin_idx = np.tile([True, True, False, False], n)
xmax_idx = ~xmin_idx
ymin_idx = np.tile([True, False, False, True], n)
ymax_idx = ~ymin_idx
# There are 2 x and 2 y values for each of xmin, xmax, ymin & ymax
# The positions are as laid out in the indexing arrays
# x and y values
x = np.empty(n * 4)
y = np.empty(n * 4)
x[xmin_idx] = df["xmin"].repeat(2)
x[xmax_idx] = df["xmax"].repeat(2)
y[ymin_idx] = df["ymin"].repeat(2)
y[ymax_idx] = df["ymax"].repeat(2)
# Aesthetic columns and others
other_cols = df.columns.difference(
["x", "y", "xmin", "xmax", "ymin", "ymax"]
)
d = {str(col): np.repeat(df[col].to_numpy(), 4) for col in other_cols}
data = pd.DataFrame({"x": x, "y": y, **d})
return data
| geom_rect |
python | huggingface__transformers | utils/modular_integrations.py | {
"start": 6187,
"end": 6697
} | class ____(cst.CSTTransformer):
def __init__(self, relative_path: str, source_library: str):
super().__init__()
self.relative_path = relative_path
self.source_library = source_library
def leave_ImportFrom(self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom) -> cst.ImportFrom:
return convert_relative_import_to_absolute(
import_node=updated_node, file_path=self.relative_path, package_name=self.source_library
)
| AbsoluteImportTransformer |
python | PyCQA__pylint | doc/data/messages/t/too-many-ancestors/good.py | {
"start": 404,
"end": 562
} | class ____(Mammal):
beaver_tailed = True
can_swim = True
has_beak = True
lays_egg = True
protected_specie = True
venomous = True
| Playtypus |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/log.py | {
"start": 2928,
"end": 11780
} | class ____(logging.Handler):
def __init__(self, callback: StructuredLoggerCallback):
super().__init__()
self.callback = check.is_callable(callback, "callback")
def emit(self, record: logging.LogRecord) -> None:
try:
self.callback(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record.dagster_meta, # type: ignore
record=record,
)
)
# Need to catch Exception here, so disabling lint
except Exception as e:
logging.critical("[%s] Error during logging!", self.__class__.__name__)
logging.exception(str(e))
def construct_single_handler_logger(
name: str, level: Union[str, int], handler: logging.Handler
) -> LoggerDefinition:
check.str_param(name, "name")
check.inst_param(handler, "handler", logging.Handler)
level = coerce_valid_log_level(level)
@logger
def single_handler_logger(_init_context: "InitLoggerContext"):
klass = logging.getLoggerClass()
logger_ = klass(name, level=level)
logger_.addHandler(handler)
handler.setLevel(level)
return logger_
return single_handler_logger
# Base python logger whose messages will be captured as structured Dagster log messages.
BASE_DAGSTER_LOGGER = logging.getLogger(name="dagster")
@public
def get_dagster_logger(name: Optional[str] = None) -> logging.Logger:
"""Creates a python logger whose output messages will be captured and converted into Dagster log
messages. This means they will have structured information such as the step_key, run_id, etc.
embedded into them, and will show up in the Dagster event log.
This can be used as a more convenient alternative to `context.log` in most cases. If log level
is not set explicitly, defaults to DEBUG.
Args:
name (Optional[str]): If supplied, will create a logger with the name "dagster.builtin.{name}",
with properties inherited from the base Dagster logger. If omitted, the returned logger
will be named "dagster.builtin".
Returns:
:class:`logging.Logger`: A logger whose output will be captured by Dagster.
Example:
.. code-block:: python
from dagster import get_dagster_logger, op
@op
def hello_op():
log = get_dagster_logger()
for i in range(5):
# do something
log.info(f"Did {i+1} things!")
"""
# enforce that the parent logger will always have a DEBUG log level
BASE_DAGSTER_LOGGER.setLevel(logging.DEBUG)
base_builtin = BASE_DAGSTER_LOGGER.getChild("builtin")
if name:
return base_builtin.getChild(name)
return base_builtin
def define_structured_logger(
name: str, callback: StructuredLoggerCallback, level: Union[str, int]
) -> LoggerDefinition:
check.str_param(name, "name")
check.callable_param(callback, "callback")
level = coerce_valid_log_level(level)
return construct_single_handler_logger(name, level, StructuredLoggerHandler(callback))
def define_json_file_logger(name: str, json_path: str, level: Union[str, int]) -> LoggerDefinition:
check.str_param(name, "name")
check.str_param(json_path, "json_path")
level = coerce_valid_log_level(level)
stream_handler = JsonFileHandler(json_path)
stream_handler.setFormatter(define_default_formatter())
return construct_single_handler_logger(name, level, stream_handler)
def get_stack_trace_array(exception: Exception) -> Sequence[str]:
check.inst_param(exception, "exception", Exception)
if hasattr(exception, "__traceback__"):
tb = exception.__traceback__
else:
_exc_type, _exc_value, tb = sys.exc_info()
return traceback.format_tb(tb)
def default_format_string() -> str:
return "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def default_date_format_string() -> str:
return "%Y-%m-%d %H:%M:%S %z"
def define_default_formatter() -> logging.Formatter:
return logging.Formatter(default_format_string(), default_date_format_string())
def get_structlog_shared_processors():
# Deferred for import perf
import structlog
timestamper = structlog.processors.TimeStamper(fmt="iso", utc=True)
shared_processors = [
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
timestamper,
structlog.processors.StackInfoRenderer(),
structlog.stdlib.ExtraAdder(),
]
return shared_processors
def get_structlog_json_formatter() -> "structlog.stdlib.ProcessorFormatter":
# Deferred for import perf
import structlog
return structlog.stdlib.ProcessorFormatter(
foreign_pre_chain=get_structlog_shared_processors(),
processors=[
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
structlog.processors.JSONRenderer(),
],
)
@deprecated(
breaking_version="2.0",
subject="loggers.dagit",
emit_runtime_warning=False,
)
def configure_loggers(
handler: str = "default", formatter: str = "colored", log_level: Union[str, int] = "INFO"
) -> None:
# Deferred for import perf
import structlog
# It's possible that structlog has already been configured by either the user or a controlling
# process. If so, we don't want to override that configuration.
if not structlog.is_configured():
structlog.configure(
processors=[
*get_structlog_shared_processors(),
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
)
json_formatter = get_structlog_json_formatter()
LOGGING_CONFIG: dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"colored": {
"()": coloredlogs.ColoredFormatter,
"fmt": default_format_string(),
"datefmt": default_date_format_string(),
"field_styles": {"levelname": {"color": "blue"}, "asctime": {"color": "green"}},
"level_styles": {"debug": {}, "error": {"color": "red"}},
},
"json": {
"()": json_formatter.__class__,
"foreign_pre_chain": json_formatter.foreign_pre_chain,
"processors": json_formatter.processors,
},
"rich": {
"()": structlog.stdlib.ProcessorFormatter,
"foreign_pre_chain": get_structlog_shared_processors(),
"processors": [
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
structlog.dev.ConsoleRenderer(),
],
},
},
"handlers": {
"default": {
"formatter": formatter,
"class": "logging.StreamHandler",
"stream": sys.stdout,
"level": log_level,
},
"null": {
"class": "logging.NullHandler",
},
},
"loggers": {
"dagster": {
"handlers": [handler],
"level": log_level,
},
# Only one of dagster or dagster-webserver will be used at a time. We configure them
# both here to avoid a dependency on the dagster-webserver package.
"dagit": {
"handlers": [handler],
"level": log_level,
},
"dagster-webserver": {
"handlers": [handler],
"level": log_level,
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
# override the default warnings handler as per https://docs.python.org/3/library/warnings.html#warnings.showwarning
# to use the same formatting
def custom_warning_handler(message, category, filename, lineno, file=None, line=None):
log_message = warnings.formatwarning(message, category, filename, lineno, line)
logging.getLogger("dagster").warning(log_message)
warnings.showwarning = custom_warning_handler
def create_console_logger(name: str, level: Union[str, int]) -> logging.Logger:
klass = logging.getLoggerClass()
logger = klass(name, level=level)
coloredlogs.install(
logger=logger,
level=level,
fmt=default_format_string(),
datefmt=default_date_format_string(),
field_styles={"levelname": {"color": "blue"}, "asctime": {"color": "green"}},
level_styles={"debug": {}, "error": {"color": "red"}},
)
return logger
| StructuredLoggerHandler |
python | dagster-io__dagster | python_modules/libraries/dagstermill/dagstermill/manager.py | {
"start": 2789,
"end": 16235
} | class ____:
def __init__(self):
self.job = None
self.op_def: Optional[NodeDefinition] = None
self.in_job: bool = False
self.marshal_dir: Optional[str] = None
self.context = None
self.resource_manager = None
def _setup_resources(
self,
resource_defs: Mapping[str, ResourceDefinition],
resource_configs: Mapping[str, ResourceConfig],
log_manager: DagsterLogManager,
execution_plan: Optional[ExecutionPlan],
dagster_run: Optional[DagsterRun],
resource_keys_to_init: Optional[AbstractSet[str]],
instance: Optional[DagsterInstance],
emit_persistent_events: Optional[bool],
event_loop: Optional[AbstractEventLoop],
):
"""Drop-in replacement for
`dagster._core.execution.resources_init.resource_initialization_manager`. It uses a
`DagstermillResourceEventGenerationManager` and explicitly calls `teardown` on it.
"""
generator = resource_initialization_event_generator(
resource_defs=resource_defs,
resource_configs=resource_configs,
log_manager=log_manager,
execution_plan=execution_plan,
dagster_run=dagster_run,
resource_keys_to_init=resource_keys_to_init,
instance=instance,
emit_persistent_events=emit_persistent_events,
event_loop=event_loop,
)
self.resource_manager = DagstermillResourceEventGenerationManager(
generator, ScopedResourcesBuilder
)
return self.resource_manager
def reconstitute_job_context(
self,
executable_dict: Mapping[str, Any],
job_run_dict: Mapping[str, Any],
node_handle_kwargs: Mapping[str, Any],
instance_ref_dict: Mapping[str, Any],
step_key: str,
output_log_path: Optional[str] = None,
marshal_dir: Optional[str] = None,
run_config: Optional[Mapping[str, Any]] = None,
):
"""Reconstitutes a context for dagstermill-managed execution.
You'll see this function called to reconstruct a job context within the ``injected
parameters`` cell of a dagstermill output notebook. Users should not call this function
interactively except when debugging output notebooks.
Use :func:`dagstermill.get_context` in the ``parameters`` cell of your notebook to define a
context for interactive exploration and development. This call will be replaced by one to
:func:`dagstermill.reconstitute_job_context` when the notebook is executed by
dagstermill.
"""
check.opt_str_param(output_log_path, "output_log_path")
check.opt_str_param(marshal_dir, "marshal_dir")
run_config = check.opt_mapping_param(run_config, "run_config", key_type=str)
check.mapping_param(job_run_dict, "job_run_dict")
check.mapping_param(executable_dict, "executable_dict")
check.mapping_param(node_handle_kwargs, "node_handle_kwargs")
check.mapping_param(instance_ref_dict, "instance_ref_dict")
check.str_param(step_key, "step_key")
job = ReconstructableJob.from_dict(executable_dict)
job_def = job.get_definition()
try:
instance_ref = unpack_value(instance_ref_dict, InstanceRef)
instance = DagsterInstance.from_ref(instance_ref)
except Exception as err:
raise DagstermillError(
"Error when attempting to resolve DagsterInstance from serialized InstanceRef"
) from err
dagster_run = unpack_value(job_run_dict, DagsterRun)
node_handle = NodeHandle.from_dict(node_handle_kwargs)
op = job_def.get_node(node_handle)
op_def = op.definition
self.marshal_dir = marshal_dir
self.in_job = True
self.op_def = op_def
self.job = job
ResolvedRunConfig.build(job_def, run_config)
execution_plan = create_execution_plan(
self.job,
run_config,
step_keys_to_execute=dagster_run.step_keys_to_execute,
)
with scoped_job_context(
execution_plan,
job,
run_config,
dagster_run,
instance,
scoped_resources_builder_cm=self._setup_resources,
# Set this flag even though we're not in test for clearer error reporting
raise_on_error=True,
) as job_context:
known_state = None
if dagster_run.parent_run_id:
known_state = KnownExecutionState.build_for_reexecution(
instance=instance,
parent_run=check.not_none(instance.get_run_by_id(dagster_run.parent_run_id)),
)
self.context = DagstermillRuntimeExecutionContext(
job_context=job_context,
job_def=job_def,
op_config=run_config.get("ops", {}).get(op.name, {}).get("config"),
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
job_def,
),
op_name=op.name,
node_handle=node_handle,
step_context=cast(
"StepExecutionContext",
job_context.for_step(
cast("ExecutionStep", execution_plan.get_step_by_key(step_key)),
known_state=known_state,
),
),
)
return self.context
def get_context(
self,
op_config: Any = None,
resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,
logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,
run_config: Optional[dict] = None,
) -> DagstermillExecutionContext:
"""Get a dagstermill execution context for interactive exploration and development.
Args:
op_config (Optional[Any]): If specified, this value will be made available on the
context as its ``op_config`` property.
resource_defs (Optional[Mapping[str, ResourceDefinition]]): Specifies resources to provide to context.
logger_defs (Optional[Mapping[str, LoggerDefinition]]): Specifies loggers to provide to context.
run_config(Optional[dict]): The config dict with which to construct
the context.
Returns:
:py:class:`~dagstermill.DagstermillExecutionContext`
"""
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
# If we are running non-interactively, and there is already a context reconstituted, return
# that context rather than overwriting it.
if self.context is not None and isinstance(
self.context, DagstermillRuntimeExecutionContext
):
return self.context
if not logger_defs:
logger_defs = {"dagstermill": colored_console_logger}
run_config["loggers"] = {"dagstermill": {}}
logger_defs = check.opt_mapping_param(logger_defs, "logger_defs")
resource_defs = check.opt_mapping_param(resource_defs, "resource_defs")
op_def = OpDefinition(
name="this_op",
compute_fn=lambda *args, **kwargs: None,
description="Ephemeral op constructed by dagstermill.get_context()",
required_resource_keys=set(resource_defs.keys()),
)
job_def = JobDefinition(
graph_def=GraphDefinition(name="ephemeral_dagstermill_pipeline", node_defs=[op_def]),
logger_defs=logger_defs,
resource_defs=resource_defs,
)
run_id = make_new_run_id()
# construct stubbed DagsterRun for notebook exploration...
# The actual dagster run during job execution will be serialized and reconstituted
# in the `reconstitute_job_context` call
dagster_run = DagsterRun(
job_name=job_def.name,
run_id=run_id,
run_config=run_config,
step_keys_to_execute=None,
status=DagsterRunStatus.NOT_STARTED,
tags=None,
)
self.in_job = False
self.op_def = op_def
self.job = job_def
job = InMemoryJob(job_def)
execution_plan = create_execution_plan(job, run_config)
with scoped_job_context(
execution_plan,
job,
run_config,
dagster_run,
DagsterInstance.ephemeral(),
scoped_resources_builder_cm=self._setup_resources,
) as job_context:
self.context = DagstermillExecutionContext(
job_context=job_context,
job_def=job_def,
op_config=op_config,
resource_keys_to_init=get_required_resource_keys_to_init(
execution_plan,
job_def,
),
op_name=op_def.name,
node_handle=NodeHandle(op_def.name, parent=None),
)
return self.context
def yield_result(self, value, output_name="result"):
"""Yield a result directly from notebook code.
When called interactively or in development, returns its input.
Args:
value (Any): The value to yield.
output_name (Optional[str]): The name of the result to yield (default: ``'result'``).
"""
if not self.in_job:
return value
# deferred import for perf
import scrapbook
if not self.op_def.has_output(output_name): # pyright: ignore[reportOptionalMemberAccess]
raise DagstermillError(
f"Op {self.op_def.name} does not have output named {output_name}.Expected one of" # pyright: ignore[reportOptionalMemberAccess]
f" {[str(output_def.name) for output_def in self.op_def.output_defs]}" # pyright: ignore[reportOptionalMemberAccess]
)
# pass output value cross process boundary using io manager
step_context = self.context._step_context # noqa: SLF001 # pyright: ignore[reportAttributeAccessIssue,reportOptionalMemberAccess]
# Note: yield_result currently does not support DynamicOutput
# dagstermill assets do not support yielding additional results within the notebook:
if len(step_context.job_def.asset_layer.executable_asset_keys) > 0: # pyright: ignore[reportArgumentType]
raise DagstermillError(
"dagstermill assets do not currently support dagstermill.yield_result"
)
step_output_handle = StepOutputHandle(
step_key=step_context.step.key, output_name=output_name
)
output_context = step_context.get_output_context(step_output_handle)
io_manager = step_context.get_io_manager(step_output_handle)
# Note that we assume io manager is symmetric, i.e handle_input(handle_output(X)) == X
io_manager.handle_output(output_context, value)
# record that the output has been yielded
scrapbook.glue(output_name, "")
def yield_event(self, dagster_event):
"""Yield a dagster event directly from notebook code.
When called interactively or in development, returns its input.
Args:
dagster_event (Union[:class:`dagster.AssetMaterialization`, :class:`dagster.ExpectationResult`, :class:`dagster.TypeCheck`, :class:`dagster.Failure`, :class:`dagster.RetryRequested`]):
An event to yield back to Dagster.
"""
valid_types = (
AssetMaterialization,
AssetObservation,
ExpectationResult,
TypeCheck,
Failure,
RetryRequested,
)
if not isinstance(dagster_event, valid_types):
raise DagstermillError(
f"Received invalid type {dagster_event} in yield_event. Expected a Dagster event"
f" type, one of {valid_types}."
)
if not self.in_job:
return dagster_event
# deferred import for perf
import scrapbook
event_id = f"event-{uuid.uuid4()}"
out_file_path = os.path.join(self.marshal_dir, event_id) # pyright: ignore[reportCallIssue,reportArgumentType]
with open(out_file_path, "wb") as fd:
fd.write(pickle.dumps(dagster_event, PICKLE_PROTOCOL))
scrapbook.glue(event_id, out_file_path)
def teardown_resources(self):
if self.resource_manager is not None:
self.resource_manager.teardown()
def load_input_parameter(self, input_name: str):
# load input from source
dm_context = check.not_none(self.context)
if not isinstance(dm_context, DagstermillRuntimeExecutionContext):
check.failed("Expected DagstermillRuntimeExecutionContext")
step_context = dm_context.step_context
step_input = step_context.step.step_input_named(input_name)
input_def = step_context.op_def.input_def_named(input_name)
for event_or_input_value in step_input.source.load_input_object(step_context, input_def):
if isinstance(event_or_input_value, DagsterEvent):
continue
else:
return event_or_input_value
MANAGER_FOR_NOTEBOOK_INSTANCE = Manager()
| Manager |
python | catalyst-team__catalyst | catalyst/callbacks/scheduler.py | {
"start": 9202,
"end": 12331
} | class ____(ILRUpdater):
"""
Helps you find an optimal learning rate for a model, as per suggestion of
`Cyclical Learning Rates for Training Neural Networks`_ paper.
Learning rate is increased in linear or log scale, depending on user input.
See `How Do You Find A Good Learning Rate`_ article for details.
.. _Cyclical Learning Rates for Training Neural Networks:
https://arxiv.org/abs/1506.01186
.. _How Do You Find A Good Learning Rate:
https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
"""
def __init__(
self,
final_lr: float,
scale: str = "log",
num_steps: Optional[int] = None,
optimizer_key: str = None,
):
"""
Args:
final_lr: final learning rate to try with
scale: learning rate increasing scale ("log" or "linear")
num_steps: number of batches to try, if None - whole loader would be used.
optimizer_key: which optimizer key to use for learning rate scheduling
Raises:
NotImplementedError: if invalid scale value.
"""
super().__init__(optimizer_key=optimizer_key)
self.final_lr = final_lr
self.scale = scale
self.num_steps = num_steps
self.multiplier = 0
self.lr_step = 0
self.iteration = 0
self._calc_lr = None
if scale == "log":
self._calc_lr = self._calc_lr_log
elif scale == "linear":
self._calc_lr = self._calc_lr_linear
else:
raise NotImplementedError("Not supported")
def _calc_lr_log(self):
return self.init_lr * self.multiplier ** self.iteration
def _calc_lr_linear(self):
return self.init_lr + self.lr_step * self.iteration
def calc_lr(self):
"""Calculates learning rate.
Returns:
learning rate.
"""
res = self._calc_lr()
self.iteration += 1
return res
def calc_momentum(self):
"""Calculates new momentum."""
pass
def on_loader_start(self, runner: "IRunner"):
"""Loader start hook. Updates scheduler statistics.
Args:
runner: current runner
"""
if runner.is_train_loader:
lr_step = self.final_lr / self.init_lr
self.num_steps = self.num_steps or runner.loader_batch_len
self.multiplier = lr_step ** (1 / self.num_steps)
self.lr_step = (self.final_lr - self.init_lr) / self.num_steps
super().on_loader_start(runner=runner)
def on_batch_end(self, runner: "IRunner"):
"""Batch end hook. Make scheduler step and stops iterating if needed.
Args:
runner: current runner
Raises:
KeyboardInterrupt: at the end of LRFinder
"""
super().on_batch_end(runner=runner)
if self.iteration > self.num_steps:
# runner.need_early_stop = True
raise KeyboardInterrupt("End of LRFinder")
__all__ = ["ISchedulerCallback", "SchedulerCallback", "ILRUpdater", "LRFinder"]
| LRFinder |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/dynamic_ragged_shape.py | {
"start": 6905,
"end": 75115
} | class ____(extension_type.BatchableExtensionType):
"""The shape of a ragged or dense tensor.
Ragged shapes are encoded using two fields:
* `inner_shape`: An integer vector giving the shape of a dense tensor.
* `row_partitions`: A list of `RowPartition` objects, describing how
that flat shape should be partitioned to add ragged axes.
If a DynamicRaggedShape is the shape of a RaggedTensor rt, then:
1. row_partitions = rt._nested_row_partitions
(and thus len(row_partitions) > 0)
2. inner_shape is the shape of rt.flat_values
If a DynamicRaggedShape is the shape of a dense tensor t, then:
1. row_partitions = []
2. inner_shape is the shape of t.
Examples:
The following table gives a few examples (where `RP(lengths)` is short
for `RowPartition.from_lengths(lengths)`):
Row Partitions | Inner Shape | Example Tensor
--------------------------- | ------------ | ----------------------------
[] | [2, 3] | `[[1, 2, 3], [4, 5, 6]]`
[RP([2, 0, 3])] | [5] | `[[1, 2], [], [3, 4, 5]]`
[RP([2, 1])] | [3, 2] | `[[[1, 2], [3, 4]], [[5, 6]]]`
[RP([2, 1]), RP([2, 1, 2])] | [5] | `[[[1, 2], [3]], [[4, 5]]]`
"""
_row_partitions: Tuple[RowPartition, ...]
_inner_shape: tensor_lib.Tensor
_static_inner_shape: tensor_shape.TensorShape
__batch_encoder__ = _DynamicRaggedShapeBatchEncoder()
__name__ = "tf.DynamicRaggedShape"
def __init__(self,
row_partitions: Sequence[RowPartition],
inner_shape: core.TensorLike,
dtype: Optional[dtypes.DType] = None,
validate: bool = False,
static_inner_shape: ... = None):
"""Core constructor for a DynamicRaggedShape.
Create a DynamicRaggedShape. This can be used to construct a
DynamicRaggedShape representing a ragged or dense shape. If row_partitions
is an empty list, then this is equivalent to a dense shape.
If row_partitions is specified, then the num_row_partitions will be equal
to len(row_partitions). There are several checks made.
Specifically:
1. Consecutive row_partitions must have consistent nvals and nrows.
2. The last row_partitions must have nvals equal to the first element of
inner_shape.
The inner_shape is converted to a tensor.
All row_partitions and the inner_shape are converted to the same dtype
(int64 or int32).
Args:
row_partitions: the row_partitions of the shape.
inner_shape: if len(row_partitions) > 0, the shape of the flat_values.
Otherwise, the shape of the tensor.
dtype: tf.int64, tf.int32, or None representing the preferred dtype.
validate: if true, dynamic validation is applied to the shape.
static_inner_shape: if len(row_partitions) > 0, the static shape of the
flat_values. Otherwise, the static shape of the tensor. Should be
convertible to a TensorShape.
"""
if not isinstance(row_partitions, Iterable):
raise TypeError(
"row_partitions should be a list of row partitions. Instead, got " +
str(row_partitions))
for x in row_partitions:
if not isinstance(x, RowPartition):
raise TypeError("row_partitions contains " + str(x) +
" which is not a RowPartition")
dtype = _find_dtype_iterable(row_partitions, dtype)
dtype = _find_dtype(inner_shape, dtype)
if (isinstance(inner_shape, np.ndarray) and
inner_shape.dtype == np.int32 and dtype is None):
dtype = dtypes.int32
dtype = _find_dtype(dtypes.int64, dtype)
row_partitions = tuple([rp.with_dtype(dtype) for rp in row_partitions])
self._row_partitions = row_partitions
self._inner_shape = ops.convert_to_tensor(
inner_shape, dtype_hint=dtype, name="inner_dim_sizes")
if self._inner_shape.dtype != dtype:
self._inner_shape = math_ops.cast(self._inner_shape, dtype)
checks = []
# Validate shapes.
if self._row_partitions:
for axis, rp in enumerate(self._row_partitions):
if axis > 0:
previous_row_partition = self._row_partitions[axis - 1]
msg = ("RowPartitions in DynamicRaggedShape do not align "
f"between {axis - 1} and {axis}")
static_nrows = rp.static_nrows
static_nvals = previous_row_partition.static_nvals
if (static_nrows is not None) and (static_nvals is not None):
if static_nrows != static_nvals:
raise ValueError(msg)
else:
continue
if validate:
checks.append(
check_ops.assert_equal(
previous_row_partition.nvals(), rp.nrows(), message=msg))
self._inner_shape.shape.assert_has_rank(1)
self._static_inner_shape = tensor_util.constant_value_as_shape(
self._inner_shape)
if static_inner_shape is not None:
self._static_inner_shape = self._static_inner_shape.merge_with(
static_inner_shape)
if row_partitions:
last_row_partition = row_partitions[-1]
static_nvals = last_row_partition.static_nvals
static_inner_shape_nvals = tensor_shape.dimension_value(
self._static_inner_shape[0])
if static_nvals is not None and static_inner_shape_nvals is not None:
if static_nvals != static_inner_shape_nvals:
raise ValueError("Last row partition does not match inner_shape.")
elif validate:
checks.append(
check_ops.assert_equal(
last_row_partition.nvals(),
self._inner_shape[0],
message="Last row partition does not match inner_shape."))
if checks:
self._inner_shape = control_flow_ops.with_dependencies(
checks, self._inner_shape, name="inner_shape_validated")
self._row_partitions = [
rp._with_dependencies(checks) for rp in self._row_partitions # pylint: disable=protected-access
]
@classmethod
def from_lengths(cls,
lengths: Sequence[Union[Sequence[int], int]],
num_row_partitions=None,
dtype=dtypes.int64):
"""Creates a shape with the given lengths and num_row_partitions.
The lengths can either be a nonnegative int or a list of nonnegative ints.
If num_row_partitions is None, then the minimal num_row_partitions is used.
For example, [2, (3, 2)] is the shape of [[0, 0, 0], [0, 0]], and
[2, 2] is the shape of [[0, 0], [0, 0]]
This chooses the minimal num_row_partitions required (including zero).
The following table gives a few examples (where `RP(lengths)` is short
for `RowPartition.from_lengths(lengths)`):
For example:
from_lengths | row_partitions | inner_shape
---------------------- | --------------------------| -------------
[] | [] | []
[2, (3, 2)] | [RP([3, 2])] | [5]
[2, 2] | [] | [2, 2]
[2, (3, 2), 7] | [RP([3, 2])] | [5, 7]
[2, (2, 2), 3] | [RP([2, 2])] | [4, 3]
[2, 2, 3] | [] | [2, 2, 3]
[2, (2, 1), (2, 0, 3)] | [RP(2, 1), RP([2, 0, 3])] | [5]
If we want the row partitions to end with uniform row partitions, then
we can set num_row_partitions.
For example,
below URP(3, 12) is RowPartition.from_uniform_row_length(3, 12)
from_lengths | num_row_partitions | row_partitions | inner_shape
---------------| -------------------|--------------------------|------------
[2, (3, 2), 2] | 2 | [RP([3, 2]), URP(2, 10)] | [10]
[2, 2] | 1 | [URP(2, 4)] | [4]
[2, 2, 3] | 0 | [] | [2, 2, 3]
[2, 2, 3] | 1 | [URP(2, 4)] | [4, 3]
[2, 2, 3] | 2 | [URP(2, 4), URP(3, 12)] | [12]
Representing the shapes from init():
from_lengths | Tensor Example
------------------------ | ------------------------------
`[2, 3]` | `[[1, 2, 3], [4, 5, 6]]`
`[3, (2, 0, 3)]` | `[[1, 2], [], [3, 4, 5]]`
`[2, (2, 1), 2]` | `[[[1, 2], [3, 4]], [[5, 6]]]`
`[2, (2, 1), (2, 1, 2)]` | `[[[1, 2], [3]], [[4, 5]]]`
Args:
lengths: the lengths of sublists along each axis.
num_row_partitions: the num_row_partitions of the result or None
indicating the minimum number of row_partitions.
dtype: the dtype of the shape (tf.int32 or tf.int64).
Returns:
a new DynamicRaggedShape
"""
if not isinstance(lengths, list):
raise ValueError("lengths should be a list")
for x in lengths:
if not _is_int_or_tuple_of_ints(x):
raise ValueError(
"element of lengths should be int or tuple of ints: instead %r" %
(x,))
if num_row_partitions is None:
# Calculate the minimal num_row_partitions.
is_list = [not isinstance(x, int) for x in lengths]
if any(is_list):
# Last index when not a list.
num_row_partitions = len(is_list) - is_list[-1::-1].index(True) - 1
else:
num_row_partitions = 0
if not isinstance(num_row_partitions, int):
raise ValueError("num_row_partitions should be an int or None")
if not lengths:
if num_row_partitions > 0:
raise ValueError("num_row_partitions==0 for a scalar shape")
return DynamicRaggedShape([], [], dtype=dtype)
if not num_row_partitions < len(lengths):
raise ValueError("num_row_partitions should be less than `len(lengths)` "
"if shape is not scalar.")
if num_row_partitions > 0:
(row_partitions, nvals) = _to_row_partitions_and_nvals_from_lengths(
lengths[:num_row_partitions + 1])
inner_shape = [nvals] + lengths[num_row_partitions + 1:]
return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)
else:
return DynamicRaggedShape([], lengths, dtype=dtype)
@classmethod
def from_row_partitions(cls, row_partitions, dtype=None):
"""Create a shape from row_partitions.
Args:
row_partitions: a nonempty list of RowPartition objects.
dtype: the dtype to use, or None to use the row_partitions dtype.
Returns:
a DynamicRaggedShape with inner_rank==1.
"""
if not row_partitions:
raise ValueError("row_partitions cannot be empty")
inner_shape = [row_partitions[-1].nvals()]
return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)
@classmethod
def _from_inner_shape(cls, inner_shape, dtype=None):
"""Create a shape from inner_shape, where num_row_partitions == 0."""
return DynamicRaggedShape([], inner_shape, dtype=dtype)
# pylint: disable=protected-access
@classmethod
def from_tensor(cls, t, dtype=None):
"""Constructs a ragged shape for a potentially ragged tensor."""
if ragged_tensor.is_ragged(t):
return DynamicRaggedShape(
t._nested_row_partitions, _flat_values_shape(t), dtype=dtype)
else:
return DynamicRaggedShape._from_inner_shape(
array_ops.shape(t), dtype=dtype)
@property
def row_partitions(self):
"""The row_partitions of the shape."""
return self._row_partitions
@property
def num_row_partitions(self):
"""The number of row_partitions of the shape."""
return len(self._row_partitions)
@property
def dtype(self):
"""The dtype of the shape -- one of tf.int32 or tf.int64."""
return self._inner_shape.dtype
def _static_inner_shape_as_list(self, truncate_first):
"""Returns the lengths of the inner shape (if rank known), or [...]."""
if self._static_inner_shape.rank is None:
return [...]
result = self._static_inner_shape.as_list()
if truncate_first:
return result[1:]
return result
def static_lengths(self, ragged_lengths=True):
"""Returns a list of statically known axis lengths.
This represents what values are known. For each row partition, it presents
either the uniform row length (if statically known),
the list of row lengths, or none if it is not statically known.
For the inner shape, if the rank is known, then each dimension is reported
if known, and None otherwise. If the rank of the inner shape is not known,
then the returned list ends with an ellipsis.
Args:
ragged_lengths: If false, returns None for all ragged dimensions.
Returns:
A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible
Ellipsis at the end.
"""
if self.num_row_partitions == 0:
return self._static_inner_shape_as_list(False)
first_dim = self.row_partitions[0].static_nrows
if isinstance(first_dim, tensor_shape.Dimension):
first_dim = first_dim.value
rp_dims = [first_dim]
for rp in self.row_partitions:
if rp.is_uniform():
rp_dims.append(rp.static_uniform_row_length)
elif ragged_lengths:
const_vals = tensor_util.constant_value(rp.row_lengths())
if const_vals is None:
rp_dims.append(None)
else:
rp_dims.append(tuple(const_vals.tolist()))
else:
rp_dims.append(None)
return rp_dims + self._static_inner_shape_as_list(True)
def __repr__(self):
lengths = _list_with_ellipsis_to_str(self.static_lengths())
return ("<DynamicRaggedShape "
"lengths=%s num_row_partitions=%r>" %
(lengths, self.num_row_partitions))
def _to_tensor_shape(self) -> tensor_shape.TensorShape:
"""Returns a TensorShape representation of the shape."""
lengths = self.static_lengths(ragged_lengths=False)
if not lengths:
return tensor_shape.TensorShape(())
if lengths[-1] == Ellipsis:
return tensor_shape.TensorShape(None)
return tensor_shape.TensorShape(lengths)
def _slice_shape(self, start, stop):
"""Returns a shape self[start:stop].
If start == 0, then this truncates dimensions after stop.
If start != 0, then this will return a shape with num_row_partitions == 0.
See __getitem__.
Args:
start: the first dimension. 0 <= start <= rank
stop: the last dimension (exclusive). 0 <= stop <= rank
"""
if stop <= start:
return DynamicRaggedShape._from_inner_shape([])
elif start == 0:
if stop <= self.num_row_partitions:
if stop == 1:
return DynamicRaggedShape._from_inner_shape(
[self.row_partitions[0].nrows()])
new_row_partitions = self.row_partitions[:stop - 1]
new_inner_shape = [new_row_partitions[-1].nvals()]
return DynamicRaggedShape(new_row_partitions, new_inner_shape)
else:
if self.rank is None:
new_inner_rank = stop - self.num_row_partitions
new_inner_shape = self.inner_shape[:new_inner_rank]
return DynamicRaggedShape(
row_partitions=self.row_partitions,
inner_shape=new_inner_shape,
static_inner_shape=None,
validate=False)
elif self.rank <= stop:
return self
new_inner_rank = stop - self.num_row_partitions
new_inner_shape = self.inner_shape[:new_inner_rank]
return DynamicRaggedShape(
row_partitions=self.row_partitions,
inner_shape=new_inner_shape,
static_inner_shape=tensor_shape.TensorShape([None] *
new_inner_rank),
validate=False)
else:
if self.rank is None or stop < self.rank:
partial = self._slice_shape(0, stop)
else:
partial = self
for x in partial.row_partitions:
if not x.is_uniform():
raise ValueError("All relevant dimensions must be uniform")
if partial.rank is None:
# TODO(martinz): Implement _with_num_row_partitions(0) if rank is
# unknown, and remove.
raise NotImplementedError(
"__getitem__[start:stop] where start > 0 not implemented")
return DynamicRaggedShape._from_inner_shape(
partial._with_num_row_partitions(0).inner_shape[start:])
def _dimension(self, index):
"""Return a dimension, if the dimension is not ragged (see __getitem__)."""
rank = self.rank
if not isinstance(index, int):
raise TypeError("index should be an int")
if (self.num_row_partitions == 0 or index > self.num_row_partitions + 1):
# If num_row_partitions > 0 and index <= num_row_partitions + 1, then
# we are safe.
if rank is None:
raise ValueError(
"Rank must be known to use __getitem__ on a large index.")
if index >= rank:
raise IndexError("Index is too big: " + str(index) + ">=" + str(rank))
if index < 0:
raise IndexError("Index must be non-negative: " + str(index))
elif not self.is_uniform(index):
raise ValueError("Index " + str(index) + " is not uniform")
elif index == 0 and self.num_row_partitions > 0:
static_nrows = self.row_partitions[0].static_nrows
if static_nrows is not None:
return constant_op.constant(static_nrows, dtype=self.dtype)
return self.row_partitions[0].nrows()
elif self.num_row_partitions == 0:
static_result = tensor_shape.dimension_value(
self._static_inner_shape[index])
if static_result is not None:
return constant_op.constant(static_result, dtype=self.dtype)
return self.inner_shape[index]
elif index > self.num_row_partitions:
static_result = tensor_shape.dimension_value(
self._static_inner_shape[index - self.num_row_partitions])
if static_result is not None:
return constant_op.constant(static_result, dtype=self.dtype)
return self.inner_shape[index - self.num_row_partitions]
else:
return self.row_partitions[index - 1].uniform_row_length()
def __getitem__(self, index):
"""Returns a dimension or a slice of the shape.
Ragged shapes can have ragged dimensions that depend upon other dimensions.
Therefore, if you ask for a dimension that is ragged, this function returns
a ValueError. For similar reasons, if a slice is selected that includes
a ragged dimension without including the zero dimension, then this fails.
Any slice that does not start at zero will return a shape
with num_row_partitions == 0.
Args:
index: the index: can be an int or a slice.
Raises:
IndexError: if the index is not in range.
ValueError: if the rank is unknown, or a ragged rank is requested
incorrectly.
"""
rank = self.rank
if isinstance(index, slice):
if (index.step is not None) and (index.step != 1):
raise IndexError("Cannot stride through a shape")
start = index.start
stop = index.stop
if start is None:
start = 0
start = _fix_start_index(start, rank, self.num_row_partitions)
stop = _fix_stop_index(stop, rank)
return self._slice_shape(start, stop)
elif isinstance(index, int):
if index < 0:
if rank is None:
raise ValueError(
"Rank must be known to use __getitem__ with a negative index.")
return self._dimension(rank + index)
return self._dimension(index)
else:
raise TypeError("Argument is not an int or a slice")
def _num_elements(self):
"""Number of elements in a shape.
Returns:
The number of elements in the shape.
"""
return math_ops.reduce_prod(self.inner_shape)
def _num_slices_in_dimension(self, axis):
"""The total size of a dimension (like nvals).
Effectively, this is self[:axis+1]._num_elements()
Example:
shape = DynamicRaggedShape._from_inner_shape([2, 3, 4])
shape._num_slices_in_dimension(0) = 2
shape._num_slices_in_dimension(1) = 6
shape._num_slices_in_dimension(2) = 24
shape._num_slices_in_dimension(-1) = 24
shape._num_slices_in_dimension(-2) = 6
shape._num_slices_in_dimension(-2) = 2
Args:
axis: the last axis to include in the number of elements. If negative,
then axis = axis + rank.
Returns:
The number of elements in the shape.
"""
if not isinstance(axis, int):
raise TypeError("axis must be an integer")
if axis < 0:
rank = self.rank
if rank is None:
raise ValueError(
"You can't use negative values if the rank is undefined")
axis = axis + rank
if axis == 0:
return self._dimension(0)
if axis <= self.num_row_partitions:
return self.row_partitions[axis - 1].nvals()
# If self.num_row_partitions = 1, and
# self.inner_shape=[3,5,6], and axis=2, then you want:
# 15 = 3 * 5 = math_ops.reduce_prod(self.inner_shape[:2])
# 2 = axis - (self.num_row_partitions - 1)
# If num_row_partitions=0, and
# self.inner_shape=[3,5,6] and axis=2, then you want:
# 90 = 3 * 5 * 6 = math_ops.reduce_prod(self.inner_shape[:3])
# 3 = axis - (self.num_row_partitions - 1)
remainder = axis - (self.num_row_partitions - 1)
return _reduce_prod_patch(self.inner_shape[:remainder])
def is_uniform(self, axis):
"""Returns true if the indicated dimension is uniform."""
if not isinstance(axis, int):
raise TypeError("axis must be an integer")
rank = self.rank
if axis < 0:
raise IndexError("Negative axis values are not supported")
elif rank is not None and axis >= rank:
raise IndexError("Expected axis=%s < rank=%s" % (axis, rank))
else:
return ((axis == 0 or axis > len(self._row_partitions)) # pylint:disable=superfluous-parens
or self._row_partitions[axis - 1].is_uniform())
@property
def rank(self):
"""The number of dimensions in this shape, or None if unknown."""
inner_rank = self.inner_rank
if inner_rank is None:
return None
else:
return self.num_row_partitions + inner_rank
@property
def inner_shape(self):
"""The inner dimension sizes for this shape.
Returns:
A 1-D integer `Tensor`.
"""
return self._inner_shape
@property
def inner_rank(self):
"""The rank of inner_shape."""
return tensor_shape.dimension_value(self._static_inner_shape.rank)
def _alt_inner_shape(self, new_inner_rank):
"""Get an alternative inner shape with higher or lower rank.
For the rank of the inner shape to be be higher, the last few ragged
dimensions must have uniform_row_length.
Args:
new_inner_rank: the new rank of the inner_shape
Returns:
A new inner_shape of rank new_inner_rank.
"""
if new_inner_rank == 0:
raise ValueError("new_inner_rank cannot be zero")
elif self.inner_rank == 0:
raise ValueError("old inner_rank cannot be zero")
elif new_inner_rank == self.inner_rank:
return self.inner_shape
elif new_inner_rank < self.inner_rank:
if self._static_inner_shape.is_fully_defined():
return _alt_inner_shape_from_tensor_shape(self._static_inner_shape,
self.dtype, new_inner_rank)
first_dimension = self._num_slices_in_dimension(-new_inner_rank)
if new_inner_rank == 1:
return array_ops.expand_dims(first_dimension, 0)
remaining_dimensions = self.inner_shape[1 - new_inner_rank:]
return array_ops.concat(
[array_ops.expand_dims(first_dimension, 0), remaining_dimensions],
axis=0)
else:
assert new_inner_rank > self.inner_rank
new_dimensions = new_inner_rank - self.inner_rank
if any(
[not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]):
raise ValueError("Cannot get an inner shape over a ragged dimension")
first_dimension = self._num_slices_in_dimension(-new_inner_rank)
new_dimensions = new_inner_rank - self.inner_rank
new_dims = [first_dimension] + [
x.uniform_row_length() for x in self.row_partitions[-new_dimensions:]
]
return array_ops.concat(
[array_ops_stack.stack(new_dims), self.inner_shape[1:]], axis=0)
def _inner_shape_dim(self, dimension):
"""Returns an int or a tensor representing _inner_shape[dimension]."""
result = tensor_shape.dimension_value(self._static_inner_shape[dimension])
return self._inner_shape[dimension] if result is None else result
def _with_inner_rank(self, inner_rank):
"""Returns the same shape but a different inner_rank.
All dimensions that are to be represented in the inner_shape must be dense.
See inner_rank.
Args:
inner_rank: the new inner_rank of the shape.
Returns:
the same shape but a different inner_rank
Raises:
ValueError if the new dense rank is invalid, or the old rank is unknown.
"""
rank = self.rank
if rank is None:
raise ValueError("Rank must be known to adjust inner_rank")
elif rank < 2:
if inner_rank == rank:
return self
raise ValueError("Cannot change inner_rank if rank < 2")
else:
# When self.rank is not None:
# self.rank = self.inner_rank + self.num_row_partitions
new_num_row_partitions = rank - inner_rank
return self._with_num_row_partitions(new_num_row_partitions)
def _with_num_row_partitions(self, num_row_partitions):
"""Creates an identical shape with the given num_row_partitions.
Note that the shape must be statically refactorable to this rank.
In particular:
* rank must be known.
* num_row_partitions must be a nonnegative int.
* num_row_partitions must be less than the rank of the shape
* num_row_partitions must be greater or equal to the index of any ragged
dimension.
Note that if the num_row_partitions is the same, self is returned.
Args:
num_row_partitions: the target num_row_partitions (must be a nonnegative
int).
Returns:
a shape with a (possibly) different num_row_partitions.
Raises:
ValueError: if the rank is unknown, the argument is not a nonnegative int,
or there is a dimension that is nonuniform.
"""
rank = self.rank
if rank is None:
raise ValueError("Rank must be known to adjust num_row_partitions")
if not isinstance(num_row_partitions, int):
raise ValueError("num_row_partitions must be an int")
if num_row_partitions < 0:
raise ValueError("num_row_partitions must be nonnegative")
if num_row_partitions == self.num_row_partitions:
return self
if num_row_partitions >= rank:
raise ValueError("num_row_partitions must be less than rank")
if num_row_partitions > self.num_row_partitions:
num_row_partitions_diff = num_row_partitions - self.num_row_partitions
new_inner_rank = self.rank - num_row_partitions
nvals = self._inner_shape_dim(0)
more_rp = []
for i in range(num_row_partitions_diff):
nrows = nvals
row_length = self._inner_shape_dim(i + 1)
nvals = nrows * row_length
rp = RowPartition.from_uniform_row_length(
row_length, nrows=nrows, dtype=self.dtype)
more_rp.append(rp)
alt_inner = self._alt_inner_shape(new_inner_rank)
return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner)
else:
assert num_row_partitions < self.num_row_partitions
return DynamicRaggedShape(
self.row_partitions[:num_row_partitions],
self._alt_inner_shape(self.rank - num_row_partitions))
def _merge_dims(self, outer_axis: int,
inner_axis: int) -> "DynamicRaggedShape":
"""Merges outer_axis...inner_axis into a single dimension.
Returns a copy of this shape with the specified range of dimensions
flattened into a single dimension, with elements in row-major order.
#### Examples:
>>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1),
... (1,2,3)])._merge_dims(0, 1)
<DynamicRaggedShape lengths=[3, (1, 2, 3)] num_row_partitions=1>
>>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1),
... (1,2,3)])._merge_dims(1, 2)
<DynamicRaggedShape lengths=[2, (3, 3)] num_row_partitions=1>
>>> tf.experimental.DynamicRaggedShape.from_lengths([2, (2,1),
... (1,2,3)])._merge_dims(0, 2)
<DynamicRaggedShape lengths=[6] num_row_partitions=0>
To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which
flattens all dimensions except the outermost batch dimension), use
`rt.merge_dims(1, -1)`.
Args:
outer_axis: `int`: The first dimension in the range of dimensions to
merge. May be negative if `self.shape.rank` is statically known.
inner_axis: `int`: The last dimension in the range of dimensions to merge.
May be negative if `self.shape.rank` is statically known.
Returns:
A copy of this shape, with the specified dimensions merged into a
single dimension. The returned shape will be
`self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`
is the total number of slices in the merged dimensions.
"""
outer_axis = array_ops.get_positive_axis(
outer_axis, self.rank, axis_name="outer_axis", ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis, self.rank, axis_name="inner_axis", ndims_name="rank(self)")
if not outer_axis <= inner_axis:
raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or "
f"equal to inner_axis ({inner_axis}).")
if outer_axis == inner_axis:
return self
if self.num_row_partitions == 0:
# A dense tensor.
(new_inner_shape,
new_static_inner_shape) = _merge_inner_shape(self._inner_shape,
self._static_inner_shape,
outer_axis, inner_axis)
return DynamicRaggedShape([],
new_inner_shape,
dtype=self.dtype,
static_inner_shape=new_static_inner_shape)
if inner_axis <= self.num_row_partitions:
# Here, we are merging the row_partitions,
# but the inner_shape is unchanged.
if outer_axis == 0:
# There is no need to merge axes before the first, just truncate them.
return DynamicRaggedShape(
self._row_partitions[inner_axis:],
self.inner_shape,
dtype=self.dtype,
static_inner_shape=self._static_inner_shape)
prefix_rp = self._row_partitions[:outer_axis - 1]
suffix_rp = self._row_partitions[inner_axis:]
internal_rp = self._row_partitions[outer_axis - 1:inner_axis]
new_rp = prefix_rp + (_merge_row_partitions(internal_rp),) + suffix_rp
return DynamicRaggedShape(
new_rp,
self.inner_shape,
dtype=self.dtype,
static_inner_shape=self._static_inner_shape)
elif outer_axis > self.num_row_partitions:
# In this scenario, only the inner_shape is changed.
# Example #1:
# if [2, (1, 2), 5, 3], num_row_partitions=1, outer_axis=2, inner_axis=3.
# Result: [2, (1, 2), 15], num_row_partitions=1, outer_axis=2,
# inner_axis=3.
(new_inner_shape, new_static_inner_shape) = _merge_inner_shape(
self._inner_shape, self._static_inner_shape,
outer_axis - self.num_row_partitions,
inner_axis - self.num_row_partitions)
return DynamicRaggedShape(
self._row_partitions,
new_inner_shape,
dtype=self.dtype,
static_inner_shape=new_static_inner_shape)
else:
# Here, both inner_shape and row_partitions are changed.
rank = self.rank
if rank is None:
raise ValueError("Cannot merge_dims of the inner shape if the " +
"dimension of inner_shape is unknown")
if outer_axis == 0:
new_inner_shape = self._alt_inner_shape(rank - inner_axis)
return DynamicRaggedShape._from_inner_shape(new_inner_shape)
else:
prefix = self._row_partitions[:outer_axis - 1]
suffix = _merge_row_partitions(self._row_partitions[outer_axis - 1:])
new_inner_shape = self._alt_inner_shape(rank - inner_axis)
num_merged_inner = inner_axis - self.num_row_partitions
prod = _reduce_prod_patch(self._inner_shape[1:num_merged_inner + 1])
tail_suffix = RowPartition.from_row_splits(suffix.row_splits() * prod)
return DynamicRaggedShape(prefix + (tail_suffix,), new_inner_shape)
def with_dtype(self, dtype):
"""Change the dtype of the shape."""
if dtype == self.dtype:
return self
else:
return DynamicRaggedShape(
self.row_partitions, self.inner_shape, dtype=dtype)
def _merge_with(self, other: "DynamicRaggedShape") -> "DynamicRaggedShape":
"""Merge two shapes that are equal modulo num_row_partitions.
The resulting num_row_partitions is the maximum of the two
num_row_partitions.
Args:
other: a DynamicRaggedShape representing the same shape with a possibly
different number of row partitions.
Returns:
A DynamicRaggedShape with the same shape and the maximum of the
num_row_partitions of the two shapes.
"""
max_num_row_partitions = max(self.num_row_partitions,
other.num_row_partitions)
a = self._with_num_row_partitions(max_num_row_partitions)
b = other._with_num_row_partitions(max_num_row_partitions)
new_row_partitions = [
rp_a._merge_precomputed_encodings(rp_b)
for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions)
]
new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64
new_static_inner_shape = a._static_inner_shape.merge_with(
b._static_inner_shape)
new_inner_shape = a._inner_shape
return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype,
True, new_static_inner_shape)
def _merge_with_spec(
self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape":
"""Merge a spec with a DynamicRaggedShape."""
# TODO(martinz): add tests for dynamic inconsistencies.
max_num_row_partitions = max(self.num_row_partitions,
other.num_row_partitions)
a = self._with_num_row_partitions(max_num_row_partitions)
b = other._with_num_row_partitions(max_num_row_partitions)
new_row_partitions = [
rp_a._merge_with_spec(rp_b)
for (rp_a, rp_b) in zip(a._row_partitions, b._row_partitions)
]
new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64
new_static_inner_shape = a._static_inner_shape.merge_with(
b._static_inner_shape)
new_inner_shape = a._inner_shape
return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype,
True, new_static_inner_shape)
def _as_row_partitions(self):
"""Returns row partitions representing this shape.
In order to represent a shape as row partitions, the rank of the shape
must be known, and the shape must have rank at least one.
Returns:
A list of RowPartition objects.
Raises:
ValueError, if the shape cannot be represented by RowPartitions.
"""
rank = self.rank
if rank is None:
raise ValueError("rank must be known for _as_row_partitions")
elif rank < 1:
raise ValueError("rank must be >= 1 for _as_row_partitions")
fully_ragged = self._with_num_row_partitions(rank - 1)
return fully_ragged.row_partitions
def _validate_flat_values_dynamically(self, flat_values):
"""Test if flat_values have the right nvals dynamically."""
if self.row_partitions:
assert_op = check_ops.assert_equal(
self.row_partitions[-1].nvals(),
array_ops.shape(flat_values, out_type=self.dtype)[0],
message="Last row partition does not match flat_values.")
return control_flow_ops.with_dependencies([assert_op], flat_values)
return flat_values
def _validate_flat_values(self, flat_values):
"""Test if flat_values have the right nvals."""
if not isinstance(flat_values, tensor_lib.Tensor):
return flat_values
if self.row_partitions:
last_row_partition = self.row_partitions[-1]
flat_values_shape = flat_values.shape
if flat_values_shape is None:
return self._validate_flat_values_dynamically(flat_values)
first_dim_flat_values = flat_values_shape[0]
if isinstance(first_dim_flat_values, tensor_shape.Dimension):
first_dim_flat_values = first_dim_flat_values.value
if first_dim_flat_values is None:
return self._validate_flat_values_dynamically(flat_values)
static_nvals = last_row_partition.static_nvals
if static_nvals is None:
return self._validate_flat_values_dynamically(flat_values)
if first_dim_flat_values != static_nvals:
raise ValueError("Last row partition does not match flat_values.")
return flat_values
def _add_row_partitions(self, flat_values, validate=False):
"""Add row partitions to flat_values, if necessary.
If the shape is truly ragged, then this adds the row_partitions.
The shape is dense, then this just returns flat_values.
Args:
flat_values: the flat_values of a ragged tensor with this shape, or a
dense tensor with this shape.
validate: validate the flat_values have the right first dimension.
Returns:
flat_values reshaped to have row_partitions.
"""
if self.row_partitions:
if validate:
flat_values = self._validate_flat_values(flat_values)
return ragged_tensor.RaggedTensor._from_nested_row_partitions(
flat_values, self.row_partitions, validate=False)
else:
return flat_values
class Spec:
"""A Spec for DynamicRaggedShape: similar to a static shape."""
def __init__(self, row_partitions: Tuple[RowPartitionSpec, ...],
static_inner_shape: tensor_shape.TensorShape,
dtype: dtypes.DType):
"""Create a Spec given row partitions, a static inner shape, and a dtype.
Args:
row_partitions: A sequence of `RowPartitionSpec`s describing how the
ragged shape is partitioned.
static_inner_shape: The static shape of the flat_values.
dtype: The DType used to encode the shape (tf.int64 or tf.int32).
"""
# Independent validation and coercion of each argument.
if not isinstance(row_partitions, Iterable):
raise TypeError("row_partitions should be an Iterable")
row_partitions = tuple(row_partitions)
static_inner_shape = tensor_shape.as_shape(static_inner_shape)
dtype = dtypes.as_dtype(dtype)
if not all(isinstance(rp, RowPartitionSpec) for rp in row_partitions):
raise TypeError(
"row_partitions should be an Iterable of RowPartitionSpecs")
if dtype != dtypes.int32 and dtype != dtypes.int64:
raise ValueError("dtype must be tf.int32 or tf.int64")
# All fields are now typechecked and internally consistent.
for spec in row_partitions:
if spec.dtype != dtype:
raise ValueError(
f"dtype of {spec!r} is {spec.dtype!r}: expected {dtype!r}")
row_partitions = tuple(row_partitions)
inner_rank = static_inner_shape.rank
if inner_rank == 0:
if row_partitions:
raise ValueError(
"If row_partitions are provided, must have inner_rank > 0")
else:
num_slices_in_dimension = [] # type: Sequence[tensor_shape.Dimension]
# We first attempt to calculate num_slices_in_dimension through a
# forward pass, using nrows[k] = nrows[k-1] * uniform_row_length
# and other tricks.
for i in range(len(row_partitions)):
rp = row_partitions[i]
result = tensor_shape.Dimension(rp.nrows)
if i > 0:
previous_rp = row_partitions[i - 1]
result = result.merge_with(previous_rp.nvals)
result = result.merge_with(num_slices_in_dimension[-1] *
previous_rp.uniform_row_length)
num_slices_in_dimension.append(result)
# In the last step of the forward pass,
# we combine nvals and the first dimension in static_inner_shape.
if row_partitions:
last_rp = row_partitions[-1]
result = (num_slices_in_dimension[-1] *
last_rp.uniform_row_length).merge_with(last_rp.nvals)
if inner_rank is not None:
result = result.merge_with(
tensor_shape.dimension_at_index(static_inner_shape, 0))
static_inner_shape = result + static_inner_shape[1:]
num_slices_in_dimension.append(result)
# Now, we start a backward pass.
for i in range(len(num_slices_in_dimension) - 1, 0, -1):
num_slices_in_dimension[i - 1] = num_slices_in_dimension[
i - 1].merge_with(
_safe_floor_div(num_slices_in_dimension[i],
row_partitions[i - 1].uniform_row_length))
# Finally, we construct the partitions.
row_partitions = [
RowPartitionSpec( # pylint: disable=g-complex-comprehension
nrows=num_slices_in_dimension[i].value,
uniform_row_length=rp.uniform_row_length,
nvals=num_slices_in_dimension[i + 1].value,
dtype=rp.dtype) for i, rp in enumerate(row_partitions)
]
self._static_inner_shape = static_inner_shape
self._inner_shape = tensor_lib.TensorSpec([inner_rank], dtype=dtype)
self._row_partitions = row_partitions
def __repr__(self):
return (
f"DynamicRaggedShape.Spec(row_partitions={self._row_partitions!r}, " +
f"static_inner_shape={self._static_inner_shape!r}, " +
f"dtype={self.dtype!r})")
@classmethod
def from_value(cls, value: Any) -> "DynamicRaggedShape.Spec":
"""Create a Spec from a DynamicRaggedShape."""
# super().from_value(...) creates an object, but there is no validation.
# No methods can be trusted on the object, just the properties.
initial = super(DynamicRaggedShape.Spec, cls).from_value(value)
# However, since value is a DynamicRaggedShape, we
# can guarantee that initial._inner_shape.shape.rank == 1
# Moreover, if inner_shape.shape[0] is not None, then
# static_inner_shape.rank is not None.
return DynamicRaggedShape.Spec(
row_partitions=initial._row_partitions,
static_inner_shape=initial._static_inner_shape,
dtype=initial._inner_shape.dtype)
# TODO(martinz): it is unclear what the default uniformity of RowPartitions
# should be, so I am moving this to experimental until we figure it out.
# Also, while I have specified this is meant to represent a shape of a
# proper Tensor instead of a RaggedTensor, this is also subject to
# interpretation.
@classmethod
def _from_tensor_shape(cls, shape: Any, num_row_partitions: int,
dtype: dtypes.DType) -> "DynamicRaggedShape.Spec":
"""Creates a `DynamicRaggedShape.Spec` corresponding to a `tf.TensorShape`.
It is assumed that this is a `tf.TensorShape` coming from a
`tf.TensorSpec`, not from `RaggedTensor.shape`.
In addition to the shape, we need to know the number of row partitions,
and the dtype used in the shape (tf.int32 or tf.int64).
Within the dimensions that are partitioned, all dimensions are assumed
to be uniform.
Args:
shape: a TensorShape.
num_row_partitions: the ragged rank of the RaggedShape.
dtype: the dtype of the shape (not the tensor); tf.int64 or tf.int32.
Returns:
a DynamicRaggedShape.Spec representing a TensorShape.
"""
if dtype != dtypes.int32 and dtype != dtypes.int64:
raise ValueError("dtype must be tf.int32 or tf.int64")
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
row_partitions = [
RowPartitionSpec(dtype=dtype) for _ in range(num_row_partitions)
]
return DynamicRaggedShape.Spec(
row_partitions=row_partitions,
static_inner_shape=tensor_shape.TensorShape(None),
dtype=dtype)
if shape.rank <= 1:
# Create a scalar or vector shape.
if num_row_partitions:
raise ValueError("num_row_partitions should be zero " +
"if shape is a scalar or vector.")
return DynamicRaggedShape.Spec(
row_partitions=[], static_inner_shape=shape, dtype=dtype)
if shape.rank <= num_row_partitions:
raise ValueError("num_row_partitions must be less than rank")
num_elements_so_far = tensor_shape.dimension_value(shape[0])
rp_specs = []
for i in range(num_row_partitions):
current_dim = tensor_shape.dimension_value(shape[i + 1])
if current_dim is None or num_elements_so_far is None:
nvals = None
else:
nvals = num_elements_so_far * current_dim
rp_specs.append(
RowPartitionSpec(
nrows=num_elements_so_far,
nvals=nvals,
uniform_row_length=current_dim,
dtype=dtype))
num_elements_so_far = nvals
static_inner_shape = tensor_shape.TensorShape(
[num_elements_so_far]) + shape[num_row_partitions + 1:]
return DynamicRaggedShape.Spec(
row_partitions=rp_specs,
static_inner_shape=static_inner_shape,
dtype=dtype)
@classmethod
def _from_spec(
cls,
spec: Union["DynamicRaggedShape.Spec", ragged_tensor.RaggedTensorSpec,
tensor_lib.TensorSpec],
dtype: dtypes.DType = dtypes.int64) -> "DynamicRaggedShape.Spec":
"""Create a TypeSpec for the shape of an object with a given TypeSpec.
I.e., if `x_spec = tf.type_spec_from_value(x)`, then
`DynamicRaggedShape.from_spec(x_spec)` returns a TypeSpec compatible with
`tf.type_spec_from_value(tf.shape(x))`.
>>> rt = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
>>> rt_spec = tf.type_spec_from_value(rt)
>>> rt_shape = DynamicRaggedShape.from_tensor(rt)
>>> shape_spec_1 = tf.type_spec_from_value(rt_shape)
>>> shape_spec_2 = DynamicRaggedShape.Spec._from_spec(rt_spec)
>>> assert shape_spec_1.is_compatible_with(shape_spec_2)
Args:
spec: a Spec of a Tensor or RaggedTensor.
dtype: the default dtype (if necessary).
Returns:
A Spec of the shape of a Tensor or RaggedTensor.
"""
# TODO(martinz): Add StructuredTensor.Spec when its easy.
if isinstance(spec, DynamicRaggedShape.Spec):
return spec
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
return cls._from_tensor_shape(spec.shape, spec.ragged_rank,
spec.row_splits_dtype)
elif isinstance(spec, tensor_lib.TensorSpec):
return cls._from_tensor_shape(
shape=spec.shape, num_row_partitions=0, dtype=dtype)
@property
def dtype(self) -> dtypes.DType:
return self._inner_shape.dtype
@property
def inner_rank(self) -> Optional[int]:
if self._static_inner_shape.rank is not None:
return self._static_inner_shape.rank
if self._inner_shape.shape.rank is None:
return None
return tensor_shape.dimension_value(self._inner_shape.shape[0])
@property
def num_row_partitions(self) -> int:
return len(self._row_partitions)
@property
def rank(self) -> Optional[int]:
inner_rank = self.inner_rank
return None if inner_rank is None else inner_rank + self.num_row_partitions
def _dimension(self, index: int) -> Optional[int]:
"""Get the size of dimension index, if known statically."""
if index == 0:
if self._row_partitions:
return self._row_partitions[0].nrows
elif self.inner_rank is None:
return None
elif self.inner_rank == 0:
raise ValueError("Index out of range: 0.")
else:
return tensor_shape.dimension_value(self._static_inner_shape[0])
if index <= len(self._row_partitions):
return self._row_partitions[index - 1].uniform_row_length
relative_index = index - self.num_row_partitions
if self.inner_rank is None:
return None
elif self.inner_rank <= relative_index:
raise ValueError(f"Index out of range: {index}.")
else:
return tensor_shape.dimension_value(
self._static_inner_shape[relative_index])
def _num_slices_in_dimension(self, axis: int) -> Optional[int]:
"""The total size of a dimension (like nvals).
This is a static version of DynamicRaggedShape._num_slices_in_dimension()
Example:
```
shape = DynamicRaggedShape.Spec(
_row_partitions=[
RowPartitionSpec(nrows=3, nvals=14, dtype=tf.int32)
RowPartitionSpec(nrows=14, nvals=25, dtype=tf.int32)
],
_static_inner_shape=tf.TensorShape([25, 3, 4]),
_inner_shape=tf.TensorSpec(tf.TensorShape([3]), dtype=tf.int32))
shape._num_slices_in_dimension(0) = 3
shape._num_slices_in_dimension(1) = 14
shape._num_slices_in_dimension(2) = 25
shape._num_slices_in_dimension(3) = 3
shape._num_slices_in_dimension(4) = 4
shape._num_slices_in_dimension(-2) = 3
```
Args:
axis: the last dimension to include.
Returns:
the number of values in a dimension.
"""
if not isinstance(axis, int):
raise TypeError("axis must be an integer")
axis = array_ops.get_positive_axis(axis, self.rank, ndims_name="rank")
if axis == 0:
return self._dimension(0)
if axis <= self.num_row_partitions:
# TODO(martinz): use nvals OR nrows, whichever is defined.
return self._row_partitions[axis - 1].nvals
remainder = axis - (self.num_row_partitions - 1)
head_inner_shape = self._static_inner_shape[:remainder]
return head_inner_shape.num_elements()
def with_dtype(self, dtype: dtypes.DType) -> "DynamicRaggedShape.Spec":
"""Return the same spec, but with a different DType."""
new_rp_specs = [rp.with_dtype(dtype) for rp in self._row_partitions]
return DynamicRaggedShape.Spec(
row_partitions=new_rp_specs,
static_inner_shape=self._static_inner_shape,
dtype=dtype)
def _merge_with(
self, other: "DynamicRaggedShape.Spec") -> "DynamicRaggedShape.Spec":
"""Merges all information between two specs.
Specs are expected to represent the same information modulo
num_row_partitions.
If the specs are of different ranks, then fail.
Args:
other: another Spec of the same rank.
Returns:
a Spec with the union of information.
"""
max_num_row_partitions = max(self.num_row_partitions,
other.num_row_partitions)
a = self._with_num_row_partitions(max_num_row_partitions)
b = other._with_num_row_partitions(max_num_row_partitions)
new_rp = [
a._merge_with(b)
for (a, b) in zip(a._row_partitions, b._row_partitions)
]
new_static_inner_shape = a._static_inner_shape.merge_with(
b._static_inner_shape)
dtype = b.dtype if (a.dtype == dtypes.int32) else dtypes.int64
return DynamicRaggedShape.Spec(
new_rp, new_static_inner_shape, dtype=dtype)
def _with_num_row_partitions(
self, new_num_row_partitions: int) -> "DynamicRaggedShape.Spec":
"""Change the number of row partitions in the spec."""
rank = self.rank
if rank is None:
raise ValueError(
"Changing num_row_partitions with unknown rank unsupported")
if new_num_row_partitions > max(rank - 1, 0):
raise ValueError("Number of row partitions too large")
if new_num_row_partitions < 0:
raise ValueError("Number of row partitions negative")
if self.num_row_partitions == new_num_row_partitions:
return self
elif self.num_row_partitions < new_num_row_partitions:
# TODO(martinz): Consider swapping.
rp_delta = new_num_row_partitions - self.num_row_partitions
tail_shape = DynamicRaggedShape.Spec._from_tensor_shape(
self._static_inner_shape, rp_delta, self.dtype)
return DynamicRaggedShape.Spec(
row_partitions=self._row_partitions + tail_shape._row_partitions,
static_inner_shape=tail_shape._static_inner_shape,
dtype=self.dtype)
else:
assert self.num_row_partitions > new_num_row_partitions
new_row_partitions = self._row_partitions[:new_num_row_partitions]
last_row_partition = new_row_partitions[-1]
old_row_partitions = self._row_partitions[new_num_row_partitions:]
new_static_inner_shape = (
tensor_shape.TensorShape(
[last_row_partition.nvals] +
[x.uniform_row_length for x in old_row_partitions]) +
self._static_inner_shape[1:])
return DynamicRaggedShape.Spec(new_row_partitions,
new_static_inner_shape, self.dtype)
def _set_rank_if_unknown(self, new_rank: int) -> "DynamicRaggedShape.Spec":
"""Ensures this has a known rank at least new_rank."""
if new_rank is None:
raise TypeError("new_rank is None, but expected int")
if new_rank < 0:
raise ValueError("Rank must be non-negative")
current_rank = self.rank
if current_rank is not None and current_rank < new_rank:
raise ValueError(
"Rank is {current_rank}, expected at least {new_rank}.".format(
current_rank=current_rank, new_rank=new_rank))
if current_rank is not None:
return self
if self._row_partitions:
new_inner_rank = max(new_rank - self.num_row_partitions, 1)
first_dim = self._row_partitions[-1].nvals
static_inner_shape = tensor_shape.TensorShape([first_dim] + [None] *
(new_inner_rank - 1))
else:
static_inner_shape = tensor_shape.TensorShape([None] * new_rank)
return DynamicRaggedShape.Spec(
row_partitions=self._row_partitions,
static_inner_shape=static_inner_shape,
dtype=self.dtype)
def _truncate(self, new_rank: int) -> "DynamicRaggedShape.Spec":
"""Truncate a ragged shape spec.
For example, if the original spec s was for a shape:
[3, [4, 1], 2, 7]
Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for:
[3, [4, 1], 2]
Args:
new_rank: the new rank
Returns:
A truncated DynamicRaggedShape.Spec.
"""
if self.rank is None:
return self._set_rank_if_unknown(new_rank)._truncate(new_rank)
if new_rank == 0:
return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype)
if new_rank == 1:
vector_size = self._dimension(0)
return DynamicRaggedShape.Spec._from_tensor_shape([vector_size], 0,
self.dtype)
if new_rank < self.num_row_partitions + 1:
new_row_partitions = self._row_partitions[:new_rank - 1]
new_static_inner_shape = tensor_shape.TensorShape(
[new_row_partitions[-1].nvals])
return DynamicRaggedShape.Spec(
row_partitions=new_row_partitions,
static_inner_shape=new_static_inner_shape,
dtype=self.dtype)
else:
remainder = new_rank - self.num_row_partitions
new_static_inner_shape = self._static_inner_shape[:remainder]
return DynamicRaggedShape.Spec(
row_partitions=self._row_partitions,
static_inner_shape=new_static_inner_shape,
dtype=self.dtype)
def _to_tensor_shape(self):
"""Get a tensor shape corresponding to this type."""
alt = self
if alt._static_inner_shape.rank is None:
return tensor_shape.TensorShape(None)
if alt._static_inner_shape.rank == 0:
assert not alt._row_partitions
return alt._static_inner_shape
prefix = [alt._dimension(0)]
prefix.extend([rp.uniform_row_length for rp in alt._row_partitions])
suffix = alt._static_inner_shape[1:]
return tensor_shape.TensorShape(prefix) + suffix
def broadcast_dynamic_shape(shape_x: DynamicRaggedShape,
shape_y: DynamicRaggedShape) -> DynamicRaggedShape:
"""Returns the shape formed by broadcasting two shapes to be compatible.
1. If shape_x and shape_y both have row_partitions, then fail if their dtypes
don't match.
2. If neither has row_partitions and they have different dtypes,
go with int64.
3. If one has row_partitions, go with that dtype.
Args:
shape_x: A `DynamicRaggedShape`
shape_y: A `DynamicRaggedShape`
Returns:
A `DynamicRaggedShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
"""
if not isinstance(shape_x, DynamicRaggedShape):
raise TypeError("shape_x must be a DynamicRaggedShape")
if not isinstance(shape_y, DynamicRaggedShape):
raise TypeError("shape_y must be a DynamicRaggedShape")
return broadcast_dynamic_shape_extended(shape_x, shape_y)[0]
def broadcast_to(rt_input, shape: DynamicRaggedShape):
"""Broadcasts a potentially ragged tensor to a ragged shape.
Tiles `rt_input` as necessary to match the given shape.
Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`.
Args:
rt_input: The potentially ragged tensor to broadcast.
shape: A `DynamicRaggedShape`
Returns:
A potentially ragged tensor whose values are taken from
`rt_input`, and whose shape matches `shape`.
"""
if not isinstance(shape, DynamicRaggedShape):
raise TypeError("shape must be a DynamicRaggedShape")
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
origin_shape = None
if ragged_tensor.is_ragged(rt_input):
if shape.num_row_partitions != 0:
if rt_input.row_splits.dtype != shape.dtype:
raise ValueError("Cannot coerce row_splits.dtype")
else:
shape = shape.with_dtype(rt_input.row_splits.dtype)
origin_shape = DynamicRaggedShape.from_tensor(rt_input)
else:
if shape.num_row_partitions != 0:
origin_shape = DynamicRaggedShape.from_tensor(rt_input, dtype=shape.dtype)
else:
origin_shape = DynamicRaggedShape.from_tensor(
rt_input, dtype=dtypes.int64)
shape = shape.with_dtype(dtype=dtypes.int64)
broadcaster = _get_broadcaster(origin_shape, shape)
return broadcaster.broadcast(rt_input)
def broadcast_dynamic_shape_extended(
a: DynamicRaggedShape, b: DynamicRaggedShape
): # -> Tuple[DynamicRaggedShape, _Broadcaster, _Broadcaster]
"""Gets the smallest shape to which a and b can broadcast.
In order to create the smallest shape, one must also do most of the
work to figure out how to transform from the shapes given. Thus, in addition
to returning the shape, it also creates transformations from the
original shapes to the result.
This is the equivalent of:
c = broadcast_dynamic_shape(a, b)
ac = get_broadcaster(a, c)
bc = get_broadcaster(b, c)
return (c, ac, bc)
Args:
a: a DynamicRaggedShape
b: a DynamicRaggedShape
Returns:
A triple of a shape and two broadcasters.
"""
if a.row_partitions and b.row_partitions:
if a.dtype != b.dtype:
raise ValueError("Dtypes don't match")
elif a.dtype != b.dtype:
if a.row_partitions:
b = b.with_dtype(a.dtype)
elif b.row_partitions:
a = a.with_dtype(b.dtype)
else:
a = a.with_dtype(dtypes.int64)
b = b.with_dtype(dtypes.int64)
if (a.rank is None or b.rank is None):
raise ValueError("Unable to broadcast: unknown rank")
elif a.rank == 0:
return (b, _Broadcaster(a, b, []), _get_identity_broadcaster(b))
elif b.rank == 0:
return (a, _get_identity_broadcaster(a), _Broadcaster(b, a, []))
elif a.rank == 1 and b.rank == 1:
[a_layer, b_layer,
target] = _broadcast_dynamic_shape_one_layer(a.inner_shape, b.inner_shape)
target_shape = DynamicRaggedShape._from_inner_shape(target) # pylint: disable=protected-access
return (target_shape, _Broadcaster(a, target_shape, [a_layer]),
_Broadcaster(b, target_shape, [b_layer]))
if a.rank > b.rank:
(c, bc, ac) = _broadcast_dynamic_shape_extended_helper(b, a) # pylint: disable=arguments-out-of-order
return (c, ac, bc)
return _broadcast_dynamic_shape_extended_helper(a, b)
def _row_partitions_identical(shape_a, shape_b):
"""Returns True iff all row_partitions in shapes are identical."""
return ((shape_a.num_row_partitions == shape_b.num_row_partitions) and all(
a is b for a, b in zip(shape_a.row_partitions, shape_b.row_partitions)))
# TODO(martinz): Preserve shapes better (see CL/414806185)
@dispatch.dispatch_for_binary_elementwise_apis(ragged_tensor.RaggedOrDense,
ragged_tensor.RaggedOrDense)
def ragged_binary_elementwise_op_impl(op, x, y):
"""Binary elementwise api handler for RaggedTensors."""
x_is_ragged = ragged_tensor.is_ragged(x)
y_is_ragged = ragged_tensor.is_ragged(y)
# Convert args to tensors.
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(
x, preferred_dtype=(y.dtype if y_is_ragged else None))
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(
y, preferred_dtype=x.dtype)
if x_is_ragged and y_is_ragged:
x, y = ragged_tensor.match_row_splits_dtypes(x, y)
if ((x_is_ragged and y_is_ragged) or
(x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or
(y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):
shape_x = DynamicRaggedShape.from_tensor(x)
shape_y = DynamicRaggedShape.from_tensor(y)
if shape_x.dtype != shape_y.dtype:
if not x_is_ragged:
shape_x = shape_x.with_dtype(shape_y.dtype)
elif not y_is_ragged:
shape_y = shape_y.with_dtype(shape_x.dtype)
if _row_partitions_identical(shape_x, shape_y):
# At this point, both x and y must be ragged.
return shape_x._add_row_partitions( # pylint: disable=protected-access
op(x.flat_values, y.flat_values),
validate=False)
(shape_z, bcast_xz,
bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y)
x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False)
y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False)
z_flat = op(x_new_flat, y_new_flat)
return shape_z._add_row_partitions(z_flat, validate=True) # pylint: disable=protected-access
x_values = x.flat_values if ragged_tensor.is_ragged(x) else x
y_values = y.flat_values if ragged_tensor.is_ragged(y) else y
mapped_values = op(x_values, y_values)
if isinstance(mapped_values, bool):
return mapped_values # Special case for tensor_equals.
if ragged_tensor.is_ragged(x):
return x.with_flat_values(mapped_values)
else:
return y.with_flat_values(mapped_values)
@dispatch.dispatch_for_binary_elementwise_assert_apis(
ragged_tensor.RaggedOrDense, ragged_tensor.RaggedOrDense)
def ragged_binary_elementwise_assert_op_impl(op, x, y):
"""Binary elementwise assert api handler for RaggedTensors.
This handles binary assert operations for ragged tensors. Compared with
`ragged_binary_elementwise_op_impl`, this handler does not compute a ragged
tensor as output. Instead, it applies the assert operation `op` to input
tensors based on their ragged shapes and flat_values, and returns the result
of the assertion operation.
Args:
op: a binary assert operation on Tensors.
x: something that can be coerced to a Tensor or RaggedTensor.
y: something that can be coerced to a Tensor or RaggedTensor.
Returns:
the result of the assertion operation.
"""
x_is_ragged = ragged_tensor.is_ragged(x)
y_is_ragged = ragged_tensor.is_ragged(y)
# Convert args to tensors.
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(
x, preferred_dtype=(y.dtype if y_is_ragged else None))
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(
y, preferred_dtype=x.dtype)
if x_is_ragged and y_is_ragged:
x, y = ragged_tensor.match_row_splits_dtypes(x, y)
if ((x_is_ragged and y_is_ragged) or
(x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or
(y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):
shape_x = DynamicRaggedShape.from_tensor(x)
shape_y = DynamicRaggedShape.from_tensor(y)
if shape_x.dtype != shape_y.dtype:
if not x_is_ragged:
shape_x = shape_x.with_dtype(shape_y.dtype)
elif not y_is_ragged:
shape_y = shape_y.with_dtype(shape_x.dtype)
if _row_partitions_identical(shape_x, shape_y):
# At this point, both x and y must be ragged.
return op(x.flat_values, y.flat_values)
(_, bcast_xz, bcast_yz) = broadcast_dynamic_shape_extended(shape_x, shape_y)
x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False)
y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False)
return op(x_new_flat, y_new_flat)
x_values = x.flat_values if ragged_tensor.is_ragged(x) else x
y_values = y.flat_values if ragged_tensor.is_ragged(y) else y
return op(x_values, y_values)
def _find_dtype_helper(value, preferred):
"""Helper for _find_dtype."""
if preferred is not None:
return preferred
elif isinstance(value, RowPartition):
return value.dtype
elif isinstance(value, dtypes.DType):
return value
elif isinstance(value, int):
return None
elif isinstance(value, list):
return None
elif isinstance(value, tuple):
return None
elif isinstance(value, core.Tensor):
return value.dtype
return value.dtype
def _find_dtype(value, preferred):
"""Returns the preferred dtype of value or preferred if preferred != None.
This is used as an operator to pass over multiple objects in decreasing order
of priority until there is a preferred dtype for one. For example, if you were
adding three tensor-ish things (some tensors, some lists), and needed a
preferred dtype, you could use this as:
def adding(a, b, c, dtype = None):
dtype = _find_dtype(a, dtype)
dtype = _find_dtype(b, dtype)
dtype = _find_dtype(c, dtype)
if dtype is None:
dtype = tf.float32
...Code continues here...
Args:
value: a list, value, RowPartition, or tensor.
preferred: a given dtype. If not None, this will be returned.
Returns:
an optional dtype.
"""
result = _find_dtype_helper(value, preferred)
if (result == dtypes.int64 or result == dtypes.int32 or result is None):
return result
raise ValueError("Illegal dtype: " + str(result))
def _find_dtype_iterable(
iterable: Iterable[Any],
dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]:
"""Find the preferred dtype of a list of objects.
This will go over the iterable, and use the first object with a preferred
dtype. The dtype passed has highest priority if it is not None.
Args:
iterable: an iterable with things that might have a dtype.
dtype: an overriding dtype, or None.
Returns:
an optional dtype.
"""
if dtype is not None:
return dtype
for x in iterable:
dtype = _find_dtype(x, dtype)
return dtype
| DynamicRaggedShape |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_null.py | {
"start": 874,
"end": 1436
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.null"
filter_column_isnull = False
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.isnull()
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return column == None # noqa: E711 # FIXME CoP
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
return column.isNull()
| ColumnValuesNull |
python | facebook__pyre-check | tools/upgrade/filesystem.py | {
"start": 812,
"end": 908
} | class ____(NamedTuple):
name: str
strict: bool
pyre: bool
check_types: bool
| Target |
python | doocs__leetcode | solution/2600-2699/2653.Sliding Subarray Beauty/Solution2.py | {
"start": 0,
"end": 1685
} | class ____:
def __init__(self, x: int):
self.x = x
self.small = []
self.large = []
self.delayed = defaultdict(int)
self.small_size = 0
self.large_size = 0
def add_num(self, num: int):
if self.small_size < self.x or num <= -self.small[0]:
heappush(self.small, -num)
self.small_size += 1
else:
heappush(self.large, num)
self.large_size += 1
self.rebalance()
def find(self) -> float:
return -self.small[0] if self.small_size == self.x else 0
def remove_num(self, num: int):
self.delayed[num] += 1
if num <= -self.small[0]:
self.small_size -= 1
if num == -self.small[0]:
self.prune(self.small)
else:
self.large_size -= 1
if num == self.large[0]:
self.prune(self.large)
self.rebalance()
def prune(self, pq: List[int]):
sign = -1 if pq is self.small else 1
while pq and sign * pq[0] in self.delayed:
self.delayed[sign * pq[0]] -= 1
if self.delayed[sign * pq[0]] == 0:
self.delayed.pop(sign * pq[0])
heappop(pq)
def rebalance(self):
if self.small_size > self.x:
heappush(self.large, -heappop(self.small))
self.small_size -= 1
self.large_size += 1
self.prune(self.small)
elif self.small_size < self.x and self.large_size > 0:
heappush(self.small, -heappop(self.large))
self.large_size -= 1
self.small_size += 1
self.prune(self.large)
| MedianFinder |
python | mlflow__mlflow | tests/haystack/test_haystack_tracing.py | {
"start": 443,
"end": 532
} | class ____:
def run(self, a: int, b: int):
return {"sum": a + b}
@component
| Add |
python | falconry__falcon | falcon/inspect.py | {
"start": 13573,
"end": 14150
} | class ____(_Traversable):
"""Describes a middleware class.
Args:
name (str): The name of the middleware class.
source_info (str): The source path where the middleware was defined.
methods (list[MiddlewareMethodInfo]): List of method defined by the
middleware class.
"""
__visit_name__ = 'middleware_class'
def __init__(
self, name: str, source_info: str, methods: list[MiddlewareMethodInfo]
):
self.name = name
self.source_info = source_info
self.methods = methods
| MiddlewareClassInfo |
python | dagster-io__dagster | scripts/run-pyright.py | {
"start": 3602,
"end": 3726
} | class ____(TypedDict):
file: str
message: str
severity: str
range: Range
rule: NotRequired[str]
| Diagnostic |
python | getsentry__sentry | src/sentry/models/releasefile.py | {
"start": 6880,
"end": 7739
} | class ____:
"""Holds data of artifact index and keeps track of changes"""
def __init__(self, data: dict, fresh=False):
self._data = data
self.changed = fresh
@property
def data(self):
"""Meant to be read-only"""
return self._data
@property
def num_files(self):
return len(self._data.get("files", {}))
def get(self, filename: str):
return self._data.get("files", {}).get(filename, None)
def update_files(self, files: dict):
if files:
self._data.setdefault("files", {}).update(files)
self.changed = True
def delete(self, filename: str) -> bool:
result = self._data.get("files", {}).pop(filename, None)
deleted = result is not None
if deleted:
self.changed = True
return deleted
| _ArtifactIndexData |
python | python__mypy | mypy/inspections.py | {
"start": 5284,
"end": 6147
} | class ____(ExtendedTraverserVisitor):
"""Visitor looking for all expressions whose spans enclose given position."""
def __init__(self, line: int, column: int) -> None:
self.line = line
self.column = column
self.result: list[Expression] = []
def visit(self, o: Node) -> bool:
if node_starts_after(o, self.line, self.column):
return False
if node_ends_before(o, self.line, self.column):
return False
if isinstance(o, Expression):
self.result.append(o)
return True
def find_all_by_location(tree: MypyFile, line: int, column: int) -> list[Expression]:
"""Find all expressions enclosing given position starting from innermost."""
visitor = SearchAllVisitor(line, column)
tree.accept(visitor)
return list(reversed(visitor.result))
| SearchAllVisitor |
python | doocs__leetcode | solution/3300-3399/3375.Minimum Operations to Make Array Values Equal to K/Solution.py | {
"start": 0,
"end": 269
} | class ____:
def minOperations(self, nums: List[int], k: int) -> int:
s = set()
mi = inf
for x in nums:
if x < k:
return -1
mi = min(mi, x)
s.add(x)
return len(s) - int(k == mi)
| Solution |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 42505,
"end": 42863
} | class ____(TestNonLatin1HeaderFromApplication):
# Flip-flop of the superclass: Python 3 native string, Python 2 unicode object
header = u"\u1f4a3" # bomb in unicode
# Error both on py3 and py2. On py2, non-native string. On py3, native string
# that cannot be encoded to latin-1
should_error = True
| TestNonLatin1UnicodeHeaderFromApplication |
python | Netflix__metaflow | metaflow/client/filecache.py | {
"start": 1059,
"end": 13947
} | class ____(object):
def __init__(self, cache_dir=None, max_size=None):
self._cache_dir = cache_dir
self._max_size = max_size
if self._cache_dir is None:
self._cache_dir = CLIENT_CACHE_PATH
if self._max_size is None:
self._max_size = int(CLIENT_CACHE_MAX_SIZE)
self._total = 0
self._objects = None
# We have a separate blob_cache per flow and datastore type.
self._blob_caches = {}
# We also keep a cache for FlowDataStore objects because some of them
# may have long-lived persistent connections; this is purely a
# performance optimization. Uses OrderedDict to implement a kind of LRU
# cache and keep only a certain number of these caches around.
self._store_caches = OrderedDict()
# We also keep a cache of data_metadata for TaskDatastore. This is used
# when querying for sizes of artifacts. Once we have queried for the size
# of one artifact in a TaskDatastore, caching this means that any
# queries on that same TaskDatastore will be quick (since we already
# have all the metadata). We keep track of this in a file so it persists
# across processes.
@property
def cache_dir(self):
return self._cache_dir
def get_logs_stream(
self, ds_type, ds_root, stream, attempt, flow_name, run_id, step_name, task_id
):
from metaflow.mflog import LOG_SOURCES
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
task_ds = ds.get_task_datastore(
run_id, step_name, task_id, data_metadata={"objects": {}, "info": {}}
)
return task_ds.load_logs(LOG_SOURCES, stream, attempt_override=attempt)
def get_log_legacy(
self, ds_type, location, logtype, attempt, flow_name, run_id, step_name, task_id
):
ds_cls = self._get_datastore_storage_impl(ds_type)
ds_root = ds_cls.path_join(*ds_cls.path_split(location)[:-5])
cache_id = self.flow_ds_id(ds_type, ds_root, flow_name)
token = (
"%s.cached"
% sha1(
os.path.join(run_id, step_name, task_id, "%s_log" % logtype).encode(
"utf-8"
)
).hexdigest()
)
path = os.path.join(self._cache_dir, cache_id, token[:2], token)
cached_log = self.read_file(path)
if cached_log is not None:
return cached_log
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
task_ds = ds.get_task_datastore(
run_id, step_name, task_id, data_metadata={"objects": {}, "info": {}}
)
log = task_ds.load_log_legacy(logtype, attempt_override=attempt)
# Store this in the file cache as well
self.create_file(path, log)
return log
def get_legacy_log_size(
self, ds_type, location, logtype, attempt, flow_name, run_id, step_name, task_id
):
ds_cls = self._get_datastore_storage_impl(ds_type)
ds_root = ds_cls.path_join(*ds_cls.path_split(location)[:-5])
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
task_ds = ds.get_task_datastore(
run_id,
step_name,
task_id,
attempt=attempt,
data_metadata={"objects": {}, "info": {}},
)
return task_ds.get_legacy_log_size(logtype)
def get_log_size(
self, ds_type, ds_root, logtype, attempt, flow_name, run_id, step_name, task_id
):
from metaflow.mflog import LOG_SOURCES
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
task_ds = ds.get_task_datastore(
run_id,
step_name,
task_id,
attempt=attempt,
data_metadata={"objects": {}, "info": {}},
)
return task_ds.get_log_size(LOG_SOURCES, logtype)
def get_data(self, ds_type, flow_name, location, key):
ds_cls = self._get_datastore_storage_impl(ds_type)
ds_root = ds_cls.get_datastore_root_from_location(location, flow_name)
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
return next(ds.load_data([key], force_raw=True))
def get_artifact_size_by_location(
self, ds_type, location, attempt, flow_name, run_id, step_name, task_id, name
):
"""Gets the size of the artifact content (in bytes) for the name at the location"""
ds_cls = self._get_datastore_storage_impl(ds_type)
ds_root = ds_cls.get_datastore_root_from_location(location, flow_name)
return self.get_artifact_size(
ds_type, ds_root, attempt, flow_name, run_id, step_name, task_id, name
)
def get_artifact_size(
self, ds_type, ds_root, attempt, flow_name, run_id, step_name, task_id, name
):
"""Gets the size of the artifact content (in bytes) for the name"""
task_ds = self._get_task_datastore(
ds_type, ds_root, flow_name, run_id, step_name, task_id, attempt
)
_, size = next(task_ds.get_artifact_sizes([name]))
return size
def get_artifact_by_location(
self,
ds_type,
location,
data_metadata,
flow_name,
run_id,
step_name,
task_id,
name,
):
ds_cls = self._get_datastore_storage_impl(ds_type)
ds_root = ds_cls.get_datastore_root_from_location(location, flow_name)
return self.get_artifact(
ds_type, ds_root, data_metadata, flow_name, run_id, step_name, task_id, name
)
def get_artifact(
self,
ds_type,
ds_root,
data_metadata,
flow_name,
run_id,
step_name,
task_id,
name,
):
_, obj = next(
self.get_artifacts(
ds_type,
ds_root,
data_metadata,
flow_name,
run_id,
step_name,
task_id,
[name],
)
)
return obj
def get_all_artifacts(
self, ds_type, ds_root, data_metadata, flow_name, run_id, step_name, task_id
):
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
# We get the task datastore for this task
task_ds = ds.get_task_datastore(
run_id, step_name, task_id, data_metadata=data_metadata
)
# This will reuse the blob cache if needed. We do not have an
# artifact cache so the unpickling happens every time here.
return task_ds.load_artifacts([n for n, _ in task_ds.items()])
def get_artifacts(
self,
ds_type,
ds_root,
data_metadata,
flow_name,
run_id,
step_name,
task_id,
names,
):
ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
# We get the task datastore for this task
task_ds = ds.get_task_datastore(
run_id, step_name, task_id, data_metadata=data_metadata
)
# note that load_artifacts uses flow_datastore.castore which goes
# through one of the self._blob_cache
return task_ds.load_artifacts(names)
def create_file(self, path, value):
if self._objects is None:
# Index objects lazily (when we first need to write to it).
# This can be an expensive operation
self._index_objects()
dirname = os.path.dirname(path)
try:
FileCache._makedirs(dirname)
except: # noqa E722
raise FileCacheException("Could not create directory: %s" % dirname)
tmpfile = NamedTemporaryFile(dir=dirname, prefix="dlobj", delete=False)
# Now write out the file
try:
tmpfile.write(value)
tmpfile.flush()
os.rename(tmpfile.name, path)
except: # noqa E722
os.unlink(tmpfile.name)
raise
size = os.path.getsize(path)
self._total += size
self._objects.append((int(time.time()), size, path))
self._garbage_collect()
def read_file(self, path):
if os.path.exists(path):
try:
with open(path, "rb") as f:
return f.read()
except IOError:
# It may have been concurrently garbage collected by another
# process
pass
return None
def _index_objects(self):
objects = []
if os.path.exists(self._cache_dir):
for flow_ds_id in os.listdir(self._cache_dir):
root = os.path.join(self._cache_dir, flow_ds_id)
if not os.path.isdir(root):
continue
for subdir in os.listdir(root):
root = os.path.join(self._cache_dir, flow_ds_id, subdir)
if not os.path.isdir(root):
continue
for obj in os.listdir(root):
sha, ext = os.path.splitext(obj)
if ext in ["cached", "blob"]:
path = os.path.join(root, obj)
objects.insert(
0, (os.path.getctime(path), os.path.getsize(path), path)
)
self._total = sum(size for _, size, _ in objects)
self._objects = sorted(objects, reverse=False)
@staticmethod
def flow_ds_id(ds_type, ds_root, flow_name):
p = urlparse(ds_root)
sanitized_root = (p.netloc + p.path).replace("/", "_")
return ".".join([ds_type, sanitized_root, flow_name])
@staticmethod
def task_ds_id(ds_type, ds_root, flow_name, run_id, step_name, task_id, attempt):
p = urlparse(ds_root)
sanitized_root = (p.netloc + p.path).replace("/", "_")
return ".".join(
[
ds_type,
sanitized_root,
flow_name,
run_id,
step_name,
task_id,
str(attempt),
]
)
def _garbage_collect(self):
now = time.time()
while self._objects and self._total > self._max_size * 1024**2:
if now - self._objects[0][0] < NEW_FILE_QUARANTINE:
break
ctime, size, path = self._objects.pop(0)
self._total -= size
try:
os.remove(path)
except OSError:
# maybe another client had already GC'ed the file away
pass
@staticmethod
def _makedirs(path):
# this is for python2 compatibility.
# Python3 has os.makedirs(exist_ok=True).
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
@staticmethod
def _get_datastore_storage_impl(ds_type):
storage_impl = [d for d in DATASTORES if d.TYPE == ds_type]
if len(storage_impl) == 0:
raise FileCacheException("Datastore %s was not found" % ds_type)
return storage_impl[0]
def _get_flow_datastore(self, ds_type, ds_root, flow_name):
cache_id = self.flow_ds_id(ds_type, ds_root, flow_name)
cached_flow_datastore = self._store_caches.get(cache_id)
if cached_flow_datastore:
od_move_to_end(self._store_caches, cache_id)
return cached_flow_datastore
else:
storage_impl = self._get_datastore_storage_impl(ds_type)
cached_flow_datastore = FlowDataStore(
flow_name=flow_name,
environment=None, # TODO: Add environment here
storage_impl=storage_impl,
ds_root=ds_root,
)
blob_cache = self._blob_caches.setdefault(
cache_id,
(
FileBlobCache(self, cache_id),
TaskMetadataCache(self, ds_type, ds_root, flow_name),
),
)
cached_flow_datastore.ca_store.set_blob_cache(blob_cache[0])
cached_flow_datastore.set_metadata_cache(blob_cache[1])
self._store_caches[cache_id] = cached_flow_datastore
if len(self._store_caches) > CLIENT_CACHE_MAX_FLOWDATASTORE_COUNT:
cache_id_to_remove, _ = self._store_caches.popitem(last=False)
del self._blob_caches[cache_id_to_remove]
return cached_flow_datastore
def _get_task_datastore(
self, ds_type, ds_root, flow_name, run_id, step_name, task_id, attempt
):
flow_ds = self._get_flow_datastore(ds_type, ds_root, flow_name)
return flow_ds.get_task_datastore(run_id, step_name, task_id, attempt=attempt)
| FileCache |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 21260,
"end": 22987
} | class ____(graphene.Interface):
id = graphene.NonNull(graphene.ID)
runId = graphene.NonNull(graphene.String)
# Nullable because of historical runs
pipelineSnapshotId = graphene.String()
repositoryOrigin = graphene.Field(GrapheneRepositoryOrigin)
status = graphene.NonNull(GrapheneRunStatus)
pipeline = graphene.NonNull(GraphenePipelineReference)
pipelineName = graphene.NonNull(graphene.String)
jobName = graphene.NonNull(graphene.String)
solidSelection = graphene.List(graphene.NonNull(graphene.String))
stats = graphene.NonNull(GrapheneRunStatsSnapshotOrError)
stepStats = non_null_list(GrapheneRunStepStats)
capturedLogs = graphene.Field(
graphene.NonNull(GrapheneCapturedLogs),
fileKey=graphene.Argument(graphene.NonNull(graphene.String)),
description="""
Captured logs are the stdout/stderr logs for a given file key within the run
""",
)
executionPlan = graphene.Field(GrapheneExecutionPlan)
stepKeysToExecute = graphene.List(graphene.NonNull(graphene.String))
runConfigYaml = graphene.NonNull(graphene.String)
runConfig = graphene.NonNull(GrapheneRunConfigData)
mode = graphene.NonNull(graphene.String)
tags = non_null_list(GraphenePipelineTag)
rootRunId = graphene.Field(graphene.String)
parentRunId = graphene.Field(graphene.String)
canTerminate = graphene.NonNull(graphene.Boolean)
assets = non_null_list(GrapheneAsset)
eventConnection = graphene.Field(
graphene.NonNull(GrapheneEventConnection),
afterCursor=graphene.Argument(graphene.String),
limit=graphene.Argument(graphene.Int),
)
class Meta:
name = "PipelineRun"
| GraphenePipelineRun |
python | kamyu104__LeetCode-Solutions | Python/split-message-based-on-limit.py | {
"start": 124,
"end": 843
} | class ____(object):
def splitMessage(self, message, limit):
"""
:type message: str
:type limit: int
:rtype: List[str]
"""
cnt, l, total, base = 1, 1, len(message)+1, 1
while 3+l*2 < limit:
if total+(3+l)*cnt <= limit*cnt:
break
cnt += 1
if cnt == base*10:
l += 1
base *= 10
total += l
if 3+l*2 >= limit:
return []
result = []
j = 0
for i in xrange(cnt):
l = limit-(3+len(str(i+1))+len(str(cnt)))
result.append("%s<%s/%s>"%(message[j:j+l], i+1, cnt))
j += l
return result
| Solution |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 7075,
"end": 7628
} | class ____(LocalizableStreamlitException):
"""Exception raised when no weights are specified, or a negative weight is specified."""
def __init__(self) -> None:
super().__init__(
"The `spec` argument to `st.columns` must be either a "
"positive integer (number of columns) or a list of positive numbers (width ratios of the columns). "
"See [documentation](https://docs.streamlit.io/develop/api-reference/layout/st.columns) "
"for more information."
)
| StreamlitInvalidColumnSpecError |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 43232,
"end": 45363
} | class ____(PrefectOperatorFilterBaseModel):
"""Filter for deployments. Only deployments matching all criteria will be returned."""
id: Optional[DeploymentFilterId] = Field(
default=None, description="Filter criteria for `Deployment.id`"
)
name: Optional[DeploymentFilterName] = Field(
default=None, description="Filter criteria for `Deployment.name`"
)
flow_or_deployment_name: Optional[DeploymentOrFlowNameFilter] = Field(
default=None, description="Filter criteria for `Deployment.name` or `Flow.name`"
)
paused: Optional[DeploymentFilterPaused] = Field(
default=None, description="Filter criteria for `Deployment.paused`"
)
tags: Optional[DeploymentFilterTags] = Field(
default=None, description="Filter criteria for `Deployment.tags`"
)
work_queue_name: Optional[DeploymentFilterWorkQueueName] = Field(
default=None, description="Filter criteria for `Deployment.work_queue_name`"
)
concurrency_limit: Optional[DeploymentFilterConcurrencyLimit] = Field(
default=None,
description="DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. If provided, will be ignored for backwards-compatibility. Will be removed after December 2024.",
deprecated=True,
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.id is not None:
filters.append(self.id.as_sql_filter())
if self.name is not None:
filters.append(self.name.as_sql_filter())
if self.flow_or_deployment_name is not None:
filters.append(self.flow_or_deployment_name.as_sql_filter())
if self.paused is not None:
filters.append(self.paused.as_sql_filter())
if self.tags is not None:
filters.append(self.tags.as_sql_filter())
if self.work_queue_name is not None:
filters.append(self.work_queue_name.as_sql_filter())
return filters
| DeploymentFilter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/plan.py | {
"start": 228,
"end": 655
} | class ____(CatalogModel):
add_ons: List[AddOn]
billing_day_of_month: Optional[Decimal]
billing_frequency: Decimal
created_at: datetime
currency_iso_code: str
description: str
discounts: List[Discount]
id: str
name: str
number_of_billing_cycles: Optional[Decimal]
price: Decimal
trial_duration: Decimal
trial_duration_unit: str
trial_period: bool
updated_at: datetime
| Plan |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 14300,
"end": 15026
} | class ____:
def __init__(
self,
target_collection: Optional[str],
uuids: UUIDS,
):
"""You should not initialise this class directly. Use the `.to_multi()` class methods instead."""
self.__target_collection = target_collection if target_collection else ""
self.__uuids = uuids
def _to_beacons(self) -> List[Dict[str, str]]:
return _to_beacons(self.__uuids, self.__target_collection)
@property
def is_one_to_many(self) -> bool:
"""Returns True if the reference is to a one-to-many references, i.e. points to more than one object."""
return self.__uuids is not None and isinstance(self.__uuids, list) and len(self.__uuids) > 1
| _Reference |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/barrier_ops_test.py | {
"start": 1086,
"end": 28110
} | class ____(test.TestCase):
def testConstructorWithShapes(self):
with ops.Graph().as_default():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
self.assertTrue(isinstance(b.barrier_ref, tensor.Tensor))
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
key: "capacity"
value {
i: -1
}
}
attr { key: 'component_types'
value { list { type: DT_FLOAT type: DT_FLOAT } } }
attr {
key: 'shapes'
value {
list {
shape {
dim { size: 1 } dim { size: 2 } dim { size: 3 }
}
shape {
dim { size: 8 }
}
}
}
}
attr { key: 'container' value { s: "" } }
attr { key: 'shared_name' value: { s: 'B' } }
""", b.barrier_ref.op.node_def)
@test_util.run_deprecated_v1
def testInsertMany(self):
with self.cached_session():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
self.assertEqual(self.evaluate(size_t), [0])
insert_0_op.run()
self.assertEqual(self.evaluate(size_t), [0])
insert_1_op.run()
self.assertEqual(self.evaluate(size_t), [3])
def testInsertManyEmptyTensor(self):
with self.cached_session():
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegex(ValueError, error_message):
data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
@test_util.run_deprecated_v1
def testInsertManyEmptyTensorUnknown(self):
with self.cached_session():
b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
self.assertEqual(self.evaluate(size_t), [0])
with self.assertRaisesOpError(
".*Tensors with no elements are not supported.*"):
insert_0_op.run()
@test_util.run_deprecated_v1
def testTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEqual(self.evaluate(size_t), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testTakeManySmallBatch(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
values_0 = [10.0, 20.0, 30.0, 40.0]
values_1 = [100.0, 200.0, 300.0, 400.0]
insert_0_op = b.insert_many(0, keys, values_0)
# Split adding of the second component into two independent operations.
# After insert_1_1_op, we'll have two ready elements in the barrier,
# 2 will still be incomplete.
insert_1_1_op = b.insert_many(1, keys[0:2], values_1[0:2]) # add "a", "b"
insert_1_2_op = b.insert_many(1, keys[2:3], values_1[2:3]) # add "c"
insert_1_3_op = b.insert_many(1, keys[3:], values_1[3:]) # add "d"
insert_empty_op = b.insert_many(0, [], [])
close_op = b.close()
close_op_final = b.close(cancel_pending_enqueues=True)
index_t, key_t, value_list_t = b.take_many(3, allow_small_batch=True)
insert_0_op.run()
insert_1_1_op.run()
close_op.run()
# Now we have a closed barrier with 2 ready elements. Running take_t
# should return a reduced batch with 2 elements only.
self.assertEqual(self.evaluate(size_i),
[2]) # assert that incomplete size = 2
self.assertEqual(self.evaluate(size_t), [2]) # assert that ready size = 2
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# The next insert completes the element with key "c". The next take_t
# should return a batch with just 1 element.
insert_1_2_op.run()
self.assertEqual(self.evaluate(size_i),
[1]) # assert that incomplete size = 1
self.assertEqual(self.evaluate(size_t), [1]) # assert that ready size = 1
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# Adding nothing ought to work, even if the barrier is closed.
insert_empty_op.run()
# currently keys "a" and "b" are not in the barrier, adding them
# again after it has been closed, ought to cause failure.
with self.assertRaisesOpError("is closed"):
insert_1_1_op.run()
close_op_final.run()
# These ops should fail because the barrier has now been closed with
# cancel_pending_enqueues = True.
with self.assertRaisesOpError("is closed"):
insert_empty_op.run()
with self.assertRaisesOpError("is closed"):
insert_1_3_op.run()
@test_util.run_deprecated_v1
def testUseBarrierWithShape(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEqual(self.evaluate(size_t), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
self.assertShapeEqual(values_1_val, take_t[2][1])
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertAllEqual(values_0_val[idx], v0)
self.assertAllEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testParallelInsertMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
self.evaluate(insert_ops)
self.assertEqual(self.evaluate(size_t), [10])
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val, [-2**63 + x for x in range(10)])
for k, v in zip(keys, values):
idx = keys_val.tolist().index(k)
self.assertEqual(values_val[idx], v)
@test_util.run_deprecated_v1
def testParallelTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_op = b.insert_many(0, keys, values)
take_t = [b.take_many(1) for _ in keys]
insert_op.run()
self.assertEqual(self.evaluate(size_t), [10])
index_fetches = []
key_fetches = []
value_fetches = []
for ix_t, k_t, v_t in take_t:
index_fetches.append(ix_t)
key_fetches.append(k_t)
value_fetches.append(v_t[0])
vals = sess.run(index_fetches + key_fetches + value_fetches)
index_vals = vals[:len(keys)]
key_vals = vals[len(keys):2 * len(keys)]
value_vals = vals[2 * len(keys):]
taken_elems = []
for k, v in zip(key_vals, value_vals):
taken_elems.append((k[0], v[0]))
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
@test_util.run_deprecated_v1
def testBlockingTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val,
[int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
t.start()
time.sleep(0.1)
for insert_op in insert_ops:
insert_op.run()
t.join()
@test_util.run_deprecated_v1
def testParallelInsertManyTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
])
taken.append({
"indices": indices_val,
"keys": keys_val,
"values_0": values_0_val,
"values_1": values_1_val
})
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
for t in take_threads:
t.start()
time.sleep(0.1)
for t in insert_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_threads:
t.join()
self.assertEqual(len(taken), num_iterations)
flatten = lambda l: [item for sublist in l for item in sublist]
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
expected_keys = sorted(
flatten([keys_i(i) for i in range(num_iterations)]))
expected_indices = sorted(
flatten([-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
for taken_i in taken:
outer_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[0]) for k in taken_i["keys"]])
inner_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[1]) for k in taken_i["keys"]])
self.assertAllEqual(taken_i["values_0"],
outer_indices_from_keys + inner_indices_from_keys)
expected_values_1 = np.vstack(
(1 + outer_indices_from_keys + inner_indices_from_keys,
2 + outer_indices_from_keys + inner_indices_from_keys)).T
self.assertAllEqual(taken_i["values_1"], expected_values_1)
@test_util.run_deprecated_v1
def testClose(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
close_op = b.close()
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(3)
take_too_many_t = b.take_many(4)
self.assertEqual(self.evaluate(size_t), [0])
self.assertEqual(self.evaluate(incomplete_t), [0])
insert_0_op.run()
self.assertEqual(self.evaluate(size_t), [0])
self.assertEqual(self.evaluate(incomplete_t), [3])
close_op.run()
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should succeed because the barrier has not canceled
# pending enqueues
insert_1_op.run()
self.assertEqual(self.evaluate(size_t), [3])
self.assertEqual(self.evaluate(incomplete_t), [0])
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
@test_util.run_deprecated_v1
def testCancel(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys[0:2], values_1[0:2])
insert_2_op = b.insert_many(1, keys[2:], values_1[2:])
cancel_op = b.close(cancel_pending_enqueues=True)
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(2)
take_too_many_t = b.take_many(3)
self.assertEqual(self.evaluate(size_t), [0])
insert_0_op.run()
insert_1_op.run()
self.assertEqual(self.evaluate(size_t), [2])
self.assertEqual(self.evaluate(incomplete_t), [1])
cancel_op.run()
# This op should fail because the queue is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because the queue is canceled.
with self.assertRaisesOpError("is closed"):
insert_2_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 2)
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
self.evaluate(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
self.evaluate(take_t)
@test_util.run_deprecated_v1
def testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self):
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=False)
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=True)
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
except errors_impl.CancelledError:
pass
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
first_half_insert_threads = insert_threads[:num_iterations // 2]
second_half_insert_threads = insert_threads[num_iterations // 2:]
for t in take_threads:
t.start()
for t in first_half_insert_threads:
t.start()
for t in first_half_insert_threads:
t.join()
close_op.run()
for t in second_half_insert_threads:
t.start()
for t in take_threads:
t.join()
for t in second_half_insert_threads:
t.join()
self.assertEqual(
sorted(taken),
[0] * (num_iterations // 2) + [10] * (num_iterations // 2))
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(
0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
for i in range(num_iterations)
]
close_op = b.close(cancel_pending_enqueues=cancel)
take_ops = [
b.take_many(
10, name="take_%d" % i) for i in range(num_iterations)
]
# insert_1_ops will only run after closure
insert_1_ops = [
b.insert_many(
1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
for i in range(num_iterations)
]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
else:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
def insert_0(sess, i):
insert_0_ops[i].run(session=sess)
def insert_1(sess, i):
if cancel:
try:
insert_1_ops[i].run(session=sess)
except errors_impl.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_0_threads = [
self.checkedThread(
target=insert_0, args=(sess, i)) for i in range(num_iterations)
]
insert_1_threads = [
self.checkedThread(
target=insert_1, args=(sess, i)) for i in range(num_iterations)
]
for t in insert_0_threads:
t.start()
for t in insert_0_threads:
t.join()
for t in take_threads:
t.start()
close_op.run()
for t in insert_1_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_1_threads:
t.join()
if cancel:
self.assertEqual(taken, [0] * num_iterations)
else:
self.assertEqual(taken, [10] * num_iterations)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=True)
@test_util.run_deprecated_v1
def testIncompatibleSharedBarrierErrors(self):
with self.cached_session():
# Do component types and shapes.
b_a_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_a")
b_a_2 = data_flow_ops.Barrier(
(dtypes.int32,), shapes=(()), shared_name="b_a")
self.evaluate(b_a_1.barrier_ref)
with self.assertRaisesOpError("component types"):
self.evaluate(b_a_2.barrier_ref)
b_b_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
self.evaluate(b_b_1.barrier_ref)
with self.assertRaisesOpError("component types"):
self.evaluate(b_b_2.barrier_ref)
b_c_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shared_name="b_c")
self.evaluate(b_c_1.barrier_ref)
with self.assertRaisesOpError("component shapes"):
self.evaluate(b_c_2.barrier_ref)
b_d_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
self.evaluate(b_d_1.barrier_ref)
with self.assertRaisesOpError("component shapes"):
self.evaluate(b_d_2.barrier_ref)
b_e_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
self.evaluate(b_e_1.barrier_ref)
with self.assertRaisesOpError("component shapes"):
self.evaluate(b_e_2.barrier_ref)
if __name__ == "__main__":
test.main()
| BarrierTest |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 32432,
"end": 34054
} | class ____(Field):
"""
Layout object for rendering fields as uneditable in bootstrap.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
attrs : dict
Attributes to be applied to the field. These are converted into html
attributes. e.g. ``data_id: 'test'`` in the attrs dict will become
``data-id='test'`` on the field's ``<input>``.
Parameters
----------
fields : str
The name of the field.
css_class : str, optional
CSS classes to be applied to the field. These are added to any classes
included in the ``attrs`` dict. By default ``None``.
wrapper_class: str, optional
CSS classes to be used when rendering the Field. This class is usually
applied to the ``<div>`` which wraps the Field's ``<label>`` and
``<input>`` tags. By default ``None``.
template : str, optional
Overrides the default template, if provided. By default ``None``.
**kwargs : dict, optional
Additional attributes are converted into key="value", pairs. These
attributes are added to the ``<div>``.
Examples
--------
Example::
UneditableField('field_name', css_class="input-xlarge")
"""
template = "%s/layout/uneditable_input.html"
def __init__(self, field, css_class=None, wrapper_class=None, template=None, **kwargs):
self.attrs = {"class": "uneditable-input"}
super().__init__(field, css_class=css_class, wrapper_class=wrapper_class, template=template, **kwargs)
| UneditableField |
python | realpython__materials | python-property/point_v3.py | {
"start": 50,
"end": 449
} | class ____:
def __init__(self, x, y):
self._x = x
self._y = y
@property
def x(self):
return self._x
@x.setter
def x(self, value):
raise WriteCoordinateError("x coordinate is read-only")
@property
def y(self):
return self._y
@y.setter
def y(self, value):
raise WriteCoordinateError("y coordinate is read-only")
| Point |
python | pennersr__django-allauth | allauth/headless/account/views.py | {
"start": 2284,
"end": 3351
} | class ____(APIView):
input_class = ConfirmLoginCodeInput
def dispatch(self, request, *args, **kwargs):
auth_status = authkit.AuthenticationStatus(request)
self.stage = auth_status.get_pending_stage()
if not self.stage:
return ConflictResponse(request)
self.process = flows.login_by_code.LoginCodeVerificationProcess.resume(
self.stage
)
if not self.process:
return ConflictResponse(request)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
response = self.process.finish(None)
return AuthenticationResponse.from_response(request, response)
def get_input_kwargs(self):
kwargs = super().get_input_kwargs()
kwargs["code"] = self.process.code
return kwargs
def handle_invalid_input(self, input):
self.process.record_invalid_attempt()
return super().handle_invalid_input(input)
@method_decorator(rate_limit(action="login"), name="handle")
| ConfirmLoginCodeView |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 16579,
"end": 16689
} | class ____(nodes.reference):
"""Node for download references, similar to pending_xref."""
| download_reference |
python | eth-brownie__brownie | brownie/_cli/console.py | {
"start": 13217,
"end": 14155
} | class ____(Completer):
def __init__(self, console, local_dict: Dict[str, Any]) -> None:
self.console = console
self.locals = local_dict
super().__init__()
def get_completions(self, document, complete_event):
try:
text = "\n".join(self.console.buffer + [document.text])
base, current = _parse_document(self.locals, text)[:2]
if isinstance(base[-1], dict):
completions = sorted(base[-1], key=lambda k: str(k))
else:
completions = dir(base[-1])
if current:
completions = [i for i in completions if i.startswith(current)]
else:
completions = [i for i in completions if not i.startswith("_")]
for key in completions:
yield Completion(key, start_position=-len(current))
except Exception:
return
@final
| ConsoleCompleter |
python | pandas-dev__pandas | pandas/tests/series/methods/test_dropna.py | {
"start": 174,
"end": 3577
} | class ____:
def test_dropna_empty(self):
ser = Series([], dtype=object)
assert len(ser.dropna()) == 0
return_value = ser.dropna(inplace=True)
assert return_value is None
assert len(ser) == 0
# invalid axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
ser.dropna(axis=1)
def test_dropna_preserve_name(self, datetime_series):
datetime_series[:5] = np.nan
result = datetime_series.dropna()
assert result.name == datetime_series.name
name = datetime_series.name
ts = datetime_series.copy()
return_value = ts.dropna(inplace=True)
assert return_value is None
assert ts.name == name
def test_dropna_no_nan(self):
for ser in [
Series([1, 2, 3], name="x"),
Series([False, True, False], name="x"),
]:
result = ser.dropna()
tm.assert_series_equal(result, ser)
assert result is not ser
s2 = ser.copy()
return_value = s2.dropna(inplace=True)
assert return_value is None
tm.assert_series_equal(s2, ser)
def test_dropna_intervals(self):
ser = Series(
[np.nan, 1, 2, 3],
IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),
)
result = ser.dropna()
expected = ser.iloc[1:]
tm.assert_series_equal(result, expected)
def test_dropna_period_dtype(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
result = ser.dropna()
expected = Series([Period("2011-01", freq="M")])
tm.assert_series_equal(result, expected)
def test_datetime64_tz_dropna(self, unit):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
],
dtype=f"M8[{unit}]",
)
result = ser.dropna()
expected = Series(
[Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")],
index=[0, 2],
dtype=f"M8[{unit}]",
)
tm.assert_series_equal(result, expected)
# DatetimeTZBlock
idx = DatetimeIndex(
["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz="Asia/Tokyo"
).as_unit(unit)
ser = Series(idx)
assert ser.dtype == f"datetime64[{unit}, Asia/Tokyo]"
result = ser.dropna()
expected = Series(
[
Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),
Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),
],
index=[0, 2],
dtype=f"datetime64[{unit}, Asia/Tokyo]",
)
assert result.dtype == f"datetime64[{unit}, Asia/Tokyo]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("val", [1, 1.5])
def test_dropna_ignore_index(self, val):
# GH#31725
ser = Series([1, 2, val], index=[3, 2, 1])
result = ser.dropna(ignore_index=True)
expected = Series([1, 2, val])
tm.assert_series_equal(result, expected)
ser.dropna(ignore_index=True, inplace=True)
tm.assert_series_equal(ser, expected)
| TestDropna |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 4741,
"end": 5502
} | class ____(FunctionPass):
_name = "rewrite_semantic_constants"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
"""
This prunes dead branches, a dead branch is one which is derivable as
not taken at compile time purely based on const/literal evaluation.
"""
assert state.func_ir
msg = ('Internal error in pre-inference dead branch pruning '
'pass encountered during compilation of '
'function "%s"' % (state.func_id.func_name,))
with fallback_context(state, msg):
rewrite_semantic_constants(state.func_ir, state.args)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| RewriteSemanticConstants |
python | neetcode-gh__leetcode | python/0057-insert-interval.py | {
"start": 0,
"end": 640
} | class ____:
def insert(
self, intervals: List[List[int]], newInterval: List[int]
) -> List[List[int]]:
res = []
for i in range(len(intervals)):
if newInterval[1] < intervals[i][0]:
res.append(newInterval)
return res + intervals[i:]
elif newInterval[0] > intervals[i][1]:
res.append(intervals[i])
else:
newInterval = [
min(newInterval[0], intervals[i][0]),
max(newInterval[1], intervals[i][1]),
]
res.append(newInterval)
return res
| Solution |
python | apache__avro | lang/py/avro/errors.py | {
"start": 1607,
"end": 1676
} | class ____(UserWarning):
"""Base class for warnings."""
| AvroWarning |
python | coleifer__peewee | tests/schema.py | {
"start": 516,
"end": 593
} | class ____(TestModel):
value = IntegerField(sequence='test_seq')
| TMSequence |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 113369,
"end": 117203
} | class ____(Request):
"""
Get user and system tags used for the specified projects and their children
:param include_system: If set to 'true' then the list of the system tags is also returned.
The default value is 'false'
:type include_system: bool
:param projects: The list of projects under which the tags are searched. If not passed or empty then all the
projects are searched
:type projects: Sequence[str]
:param filter: Filter on entities to collect tags from
:type filter: dict
"""
_service = "projects"
_action = "get_project_tags"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"filter": {
"description": "Filter on entities to collect tags from",
"properties": {
"system_tags": {
"description": "The list of system tag values to filter by. Use 'null' value to specify empty system tags. Use '__$not' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "The list of tag values to filter by. Use 'null' value to specify empty tags. Use '__$not' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
},
"type": ["object", "null"],
},
"include_system": {
"default": False,
"description": "If set to 'true' then the list of the system tags is also returned. The default value is 'false'",
"type": ["boolean", "null"],
},
"projects": {
"description": "The list of projects under which the tags are searched. If not passed or empty then all the projects are searched",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
include_system: Optional[bool] = False,
projects: Optional[List[str]] = None,
filter: Optional[dict] = None,
**kwargs: Any
) -> None:
super(GetProjectTagsRequest, self).__init__(**kwargs)
self.include_system = include_system
self.projects = projects
self.filter = filter
@schema_property("include_system")
def include_system(self) -> Optional[bool]:
return self._property_include_system
@include_system.setter
def include_system(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_system = None
return
self.assert_isinstance(value, "include_system", (bool,))
self._property_include_system = value
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("filter")
def filter(self) -> Optional[dict]:
return self._property_filter
@filter.setter
def filter(self, value: Optional[dict]) -> None:
if value is None:
self._property_filter = None
return
self.assert_isinstance(value, "filter", (dict,))
self._property_filter = value
| GetProjectTagsRequest |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 70637,
"end": 72095
} | class ____():
"""A per-worker CapturableResource class for non-ParameterServer strategy.
Resources that populate `host_to_resources` should be instances of classes
subclassing CapturableResource, although currently it's only used and tested
for StaticHashTable with TPUStrategy.
"""
def __init__(self, strategy, host_to_resources):
distribute_lib.distribution_strategy_input_api_counter.get_cell(
"PerWorkerResource", "TPUDistributedLookupTable").increase_by(1)
self._strategy = strategy
self._host_to_resources = host_to_resources
def __getattribute__(self, name):
if name not in ("__init__", "__getattribute__", "_host_to_resources",
"_strategy", "local_resource"):
return getattr(self.local_resource(), name)
return super(PerWorkerResource, self).__getattribute__(name)
def __setattr__(self, name, value):
if name not in ("_strategy", "_host_to_resources"):
return setattr(self.local_resource(), name, value)
return super(PerWorkerResource, self).__setattr__(name, value)
def local_resource(self):
"""Returns the resource on the local worker."""
current_device = device_util.canonicalize(device_util.current())
host_device = device_util.canonicalize(
device_util.get_host_for_device(current_device))
return self._host_to_resources.get(
host_device,
self._host_to_resources[next(iter(self._host_to_resources))])
| PerWorkerResource |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001_py310.py | {
"start": 1034,
"end": 1185
} | class ____(SQLModel):
name: str | None = None
secret_name: str | None = None
age: int | None = None
team_id: int | None = None
| HeroUpdate |
python | kamyu104__LeetCode-Solutions | Python/check-if-a-string-can-break-another-string.py | {
"start": 692,
"end": 1001
} | class ____(object):
def checkIfCanBreak(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
return not {1, -1}.issubset(set(cmp(a, b) for a, b in itertools.izip(sorted(s1), sorted(s2))))
# Time: O(nlogn)
# Space: O(1)
import itertools
| Solution2 |
python | plotly__plotly.py | _plotly_utils/basevalidators.py | {
"start": 73769,
"end": 76832
} | class ____(BaseValidator):
def __init__(self, plotly_name, parent_name, data_class_str, data_docs, **kwargs):
super(CompoundValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
# Save element class string
self.data_class_str = data_class_str
self._data_class = None
self.data_docs = data_docs
self.module_str = CompoundValidator.compute_graph_obj_module_str(
self.data_class_str, parent_name
)
@staticmethod
def compute_graph_obj_module_str(data_class_str, parent_name):
if parent_name == "frame" and data_class_str in ["Data", "Layout"]:
# Special case. There are no graph_objs.frame.Data or
# graph_objs.frame.Layout classes. These are remapped to
# graph_objs.Data and graph_objs.Layout
parent_parts = parent_name.split(".")
module_str = ".".join(["plotly.graph_objs"] + parent_parts[1:])
elif parent_name == "layout.template" and data_class_str == "Layout":
# Remap template's layout to regular layout
module_str = "plotly.graph_objs"
elif "layout.template.data" in parent_name:
# Remap template's traces to regular traces
parent_name = parent_name.replace("layout.template.data.", "")
if parent_name:
module_str = "plotly.graph_objs." + parent_name
else:
module_str = "plotly.graph_objs"
elif parent_name:
module_str = "plotly.graph_objs." + parent_name
else:
module_str = "plotly.graph_objs"
return module_str
@property
def data_class(self):
if self._data_class is None:
module = import_module(self.module_str)
self._data_class = getattr(module, self.data_class_str)
return self._data_class
def description(self):
desc = (
"""\
The '{plotly_name}' property is an instance of {class_str}
that may be specified as:
- An instance of :class:`{module_str}.{class_str}`
- A dict of string/value properties that will be passed
to the {class_str} constructor"""
).format(
plotly_name=self.plotly_name,
class_str=self.data_class_str,
module_str=self.module_str,
)
return desc
def validate_coerce(self, v, skip_invalid=False, _validate=True):
if v is None:
v = self.data_class()
elif isinstance(v, dict):
v = self.data_class(v, skip_invalid=skip_invalid, _validate=_validate)
elif isinstance(v, self.data_class):
# Copy object
v = self.data_class(v)
else:
if skip_invalid:
v = self.data_class()
else:
self.raise_invalid_val(v)
v._plotly_name = self.plotly_name
return v
def present(self, v):
# Return compound object as-is
return v
| CompoundValidator |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_sigma_utils.py | {
"start": 1787,
"end": 2706
} | class ____(SigmaComponent):
@cached_property
def organization_resource(self) -> MockSigmaOrganization:
return MockSigmaOrganization(**self.organization.model_dump())
def test_mock_sigma_organization() -> None:
"""Test that the mock Sigma organization returns the expected data."""
import asyncio
organization = MockSigmaOrganization(
base_url=SigmaBaseUrl.AWS_US.value,
client_id="test_client_id",
client_secret="test_client_secret",
)
organization_data = asyncio.run(organization.build_organization_data())
# Verify we have the expected content
assert len(organization_data.workbooks) == 1
assert len(organization_data.datasets) == 1
# Verify specific content
assert organization_data.workbooks[0].properties["name"] == "Sample_Workbook"
assert organization_data.datasets[0].properties["name"] == "Orders_Dataset"
| MockSigmaComponent |
python | pytorch__pytorch | torch/jit/_recursive.py | {
"start": 4426,
"end": 15545
} | class ____(torch._C._jit_tree_views.SourceRangeFactory):
pass
def get_annotations(obj):
# In Python-3.10+ it is recommended to use inspect.get_annotations
# See https://docs.python.org/3.10/howto/annotations.html
# But also, in 3.10 annotations from base class are not inherited
# by unannotated derived one, so they must be manually extracted
annotations = inspect.get_annotations(obj)
if annotations:
return annotations
def get_cls_annotations(cls):
cls_annotations = inspect.get_annotations(cls)
if cls_annotations:
return cls_annotations
for base in cls.__bases__:
cls_annotations = get_cls_annotations(base)
if cls_annotations:
return cls_annotations
return {}
cls = obj if isinstance(obj, type) else type(obj)
return get_cls_annotations(cls)
def infer_concrete_type_builder(nn_module, share_types=True):
"""
Build a ConcreteModuleTypeBuilder from an nn.Module.
This ConcreteModuleType doesn't have a JIT type associated with it yet, it
must be filled in by the caller.
"""
concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module))
if isinstance(nn_module, (torch.nn.ModuleDict)):
concrete_type_builder.set_module_dict()
if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)):
concrete_type_builder.set_module_list()
if isinstance(nn_module, (torch.nn.ParameterList)):
concrete_type_builder.set_parameter_list()
if isinstance(nn_module, (torch.nn.ParameterDict)):
concrete_type_builder.set_parameter_dict()
class_annotations = get_annotations(nn_module)
if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)):
class_annotations = {}
# Get user-annotated ignored attributes.
user_annotated_ignored_attributes = getattr(
nn_module, "__jit_ignored_attributes__", []
)
concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes)
ignored_properties = jit_ignored_properties(nn_module)
# try to infer the type from type annotation or from the object itself
def infer_type(name, item):
# The forward function from Module is special; never use this annotations; we
# need to infer type directly using JIT. I originally wanted to write
# this test as isinstance(class_annotations[name], Callable) but
# isinstance on typing things doesn't seem to work: isinstance(list, Callable)
# is also true!
inferred = False
try:
if (
name in class_annotations
and class_annotations[name]
!= torch.nn.Module.__annotations__["forward"]
):
ann_to_type = torch.jit.annotations.ann_to_type(
class_annotations[name], fake_range()
)
attr_type = torch._C.InferredType(ann_to_type)
elif isinstance(item, torch.jit.Attribute):
ann_to_type = torch.jit.annotations.ann_to_type(item.type, fake_range())
attr_type = torch._C.InferredType(ann_to_type)
else:
attr_type = torch._C._jit_try_infer_type(item)
inferred = True
except RuntimeError as re:
raise RuntimeError(f"Error inferring type for {name}: {item}: {re}") from re
return attr_type, inferred
added_names = set()
for name, item in nn_module._parameters.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
# We currently have the invariant in various places in our code
# that parameters must be Tensors. However, the nn.Module API also
# allows NoneType parameters. These parameters are not returned as
# part of `parameters()` and its variants, but are available
# through direct attribute access.
concrete_type_builder.add_attribute(name, attr_type.type(), True, False)
added_names.add(name)
for name, item in nn_module._buffers.items():
if name in user_annotated_ignored_attributes:
continue
assert item is None or isinstance(item, torch.Tensor)
attr_type, _ = infer_type(name, item)
concrete_type_builder.add_attribute(name, attr_type.type(), False, True)
added_names.add(name)
for name, item in nn_module._modules.items():
if name in user_annotated_ignored_attributes:
continue
attr_type, _ = infer_type(name, item)
if item is None:
# Modules can be None. We don't have direct support for optional
# Modules, so the register it as an NoneType attribute instead.
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
continue
if attr_type.success():
assert attr_type.type().is_interface_type()
# if the type can be inferred, it should be a module interface type
sub_concrete_type = torch._C.ConcreteModuleType.from_jit_type(
attr_type.type()
)
else:
# otherwise we get the concrete module type for item and add it to concrete_type
sub_concrete_type = get_module_concrete_type(item, share_types)
concrete_type_builder.add_module(name, sub_concrete_type)
added_names.add(name)
# populate constants_set
constants_set = set(getattr(nn_module, "__constants__", ()))
# Constants annotated via `Final[T]` rather than being added to `__constants__`
for name, ann in class_annotations.items():
if torch._jit_internal.is_final(ann):
constants_set.add(name)
for name in constants_set:
if name in added_names:
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
if name in nn_module._modules:
hint = "submodule"
elif name in nn_module._buffers:
hint = "buffer"
elif name in nn_module._parameters:
hint = "parameter"
else:
raise AssertionError(
"added_names must be submodule, parameter, or buffer"
)
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
f" but it is a non-constant {hint}. Consider removing it.",
stacklevel=2,
)
continue
if not hasattr(nn_module, name):
# TODO: We should really error in this case, but its bc-breaking so
# we need to warn for at least one release
warnings.warn(
f"'{name}' was found in ScriptModule constants, "
"but was not actually set in __init__. "
"Consider removing it.",
stacklevel=2,
)
continue
value = getattr(nn_module, name)
concrete_type_builder.add_constant(
name, _get_valid_constant(name, value, type(nn_module).__name__)
)
added_names.add(name)
# populate overloads
overloads = getattr(nn_module, "__overloads__", {})
# update with any annotated overloads
overloads.update(
get_overload_name_mapping(
get_overload_annotations(nn_module, ignored_properties)
)
)
for name, overloaded_names in overloads.items():
concrete_type_builder.add_overload(name, overloaded_names)
for name, value in nn_module.__dict__.items():
if name in ignored_attributes or name.startswith("__"):
# Python objects have lots of random attributes attached to them;
# PyTorch adds a few more. Prevent these from getting compiled.
continue
if name in user_annotated_ignored_attributes:
continue
if name in added_names:
# Don't re-add anything we already added
continue
isoverloadpacket = isinstance(value, torch._ops.OpOverloadPacket)
if isoverloadpacket:
value = value.op
# Handle Python function attributes
if inspect.isfunction(value):
try:
scripted_fn = torch.jit.script(value)
concrete_type_builder.add_function_attribute(
name, torch._C._jit_try_infer_type(scripted_fn).type(), value
)
except Exception as e:
# If we fail to script the function, it isn't a hard error.
# Instead, we will add it to the list of attributes we failed
# to convert, with the compilation error.
hint = (
"(This function exists as an attribute on the Python module, "
"but we failed to compile it to a TorchScript function. "
f"\nThe error stack is reproduced here:\n{e})"
)
concrete_type_builder.add_failed_attribute(name, hint)
continue
# Handle calls to builtin functions (either bespoke builtins from torch.jit._builtins or
# a call to an aten function like torch.add)
builtin_symbol_name = _find_builtin(value)
if builtin_symbol_name:
concrete_type_builder.add_builtin_function(name, builtin_symbol_name)
continue
# Handle Script function attributes
if isinstance(value, torch.jit.ScriptFunction):
concrete_type_builder.add_function_attribute(
name, torch._C._jit_try_infer_type(value).type(), value
)
continue
# If we got here, this is a regular "data" attribute, add it to the concrete type
attr_type, inferred = infer_type(name, value)
if attr_type.success():
concrete_type_builder.add_attribute(name, attr_type.type(), False, False)
else:
# TODO: could add more detail here. For example, what the user should do
# when the pytype is `list` or `NoneType`
inferred_msg = (
"Its type was inferred; try adding a type annotation for the attribute."
if inferred
else ""
)
additional_info = f"{attr_type.reason()}. {inferred_msg}"
hint = (
"(This attribute exists on the Python module, "
f"but we failed to convert Python type: '{torch.typename(type(value))}' "
f"to a TorchScript type. {additional_info})"
)
concrete_type_builder.add_failed_attribute(name, hint)
# add hooks to concrete type
for hook in nn_module._forward_hooks.values():
concrete_type_builder.add_forward_hook(hook)
for pre_hook in nn_module._forward_pre_hooks.values():
concrete_type_builder.add_forward_pre_hook(pre_hook)
return concrete_type_builder
| SourceContext |
python | chroma-core__chroma | chromadb/db/base.py | {
"start": 397,
"end": 909
} | class ____(Protocol):
"""Reifies methods we use from a DBAPI2 Cursor since DBAPI2 is not typed."""
def execute(self, sql: str, params: Optional[Tuple[Any, ...]] = None) -> Self:
...
def executescript(self, script: str) -> Self:
...
def executemany(
self, sql: str, params: Optional[Sequence[Tuple[Any, ...]]] = None
) -> Self:
...
def fetchone(self) -> Tuple[Any, ...]:
...
def fetchall(self) -> Sequence[Tuple[Any, ...]]:
...
| Cursor |
python | lepture__authlib | authlib/oauth2/rfc7591/claims.py | {
"start": 206,
"end": 12169
} | class ____(BaseClaims):
# https://tools.ietf.org/html/rfc7591#section-2
REGISTERED_CLAIMS = [
"redirect_uris",
"token_endpoint_auth_method",
"grant_types",
"response_types",
"client_name",
"client_uri",
"logo_uri",
"scope",
"contacts",
"tos_uri",
"policy_uri",
"jwks_uri",
"jwks",
"software_id",
"software_version",
]
def validate(self):
self._validate_essential_claims()
self.validate_redirect_uris()
self.validate_token_endpoint_auth_method()
self.validate_grant_types()
self.validate_response_types()
self.validate_client_name()
self.validate_client_uri()
self.validate_logo_uri()
self.validate_scope()
self.validate_contacts()
self.validate_tos_uri()
self.validate_policy_uri()
self.validate_jwks_uri()
self.validate_jwks()
self.validate_software_id()
self.validate_software_version()
def validate_redirect_uris(self):
"""Array of redirection URI strings for use in redirect-based flows
such as the authorization code and implicit flows. As required by
Section 2 of OAuth 2.0 [RFC6749], clients using flows with
redirection MUST register their redirection URI values.
Authorization servers that support dynamic registration for
redirect-based flows MUST implement support for this metadata
value.
"""
uris = self.get("redirect_uris")
if uris:
for uri in uris:
self._validate_uri("redirect_uris", uri)
def validate_token_endpoint_auth_method(self):
"""String indicator of the requested authentication method for the
token endpoint.
"""
# If unspecified or omitted, the default is "client_secret_basic"
if "token_endpoint_auth_method" not in self:
self["token_endpoint_auth_method"] = "client_secret_basic"
self._validate_claim_value("token_endpoint_auth_method")
def validate_grant_types(self):
"""Array of OAuth 2.0 grant type strings that the client can use at
the token endpoint.
"""
self._validate_claim_value("grant_types")
def validate_response_types(self):
"""Array of the OAuth 2.0 response type strings that the client can
use at the authorization endpoint.
"""
self._validate_claim_value("response_types")
def validate_client_name(self):
"""Human-readable string name of the client to be presented to the
end-user during authorization. If omitted, the authorization
server MAY display the raw "client_id" value to the end-user
instead. It is RECOMMENDED that clients always send this field.
The value of this field MAY be internationalized, as described in
Section 2.2.
"""
def validate_client_uri(self):
"""URL string of a web page providing information about the client.
If present, the server SHOULD display this URL to the end-user in
a clickable fashion. It is RECOMMENDED that clients always send
this field. The value of this field MUST point to a valid web
page. The value of this field MAY be internationalized, as
described in Section 2.2.
"""
self._validate_uri("client_uri")
def validate_logo_uri(self):
"""URL string that references a logo for the client. If present, the
server SHOULD display this image to the end-user during approval.
The value of this field MUST point to a valid image file. The
value of this field MAY be internationalized, as described in
Section 2.2.
"""
self._validate_uri("logo_uri")
def validate_scope(self):
"""String containing a space-separated list of scope values (as
described in Section 3.3 of OAuth 2.0 [RFC6749]) that the client
can use when requesting access tokens. The semantics of values in
this list are service specific. If omitted, an authorization
server MAY register a client with a default set of scopes.
"""
self._validate_claim_value("scope")
def validate_contacts(self):
"""Array of strings representing ways to contact people responsible
for this client, typically email addresses. The authorization
server MAY make these contact addresses available to end-users for
support requests for the client. See Section 6 for information on
Privacy Considerations.
"""
if "contacts" in self and not isinstance(self["contacts"], list):
raise InvalidClaimError("contacts")
def validate_tos_uri(self):
"""URL string that points to a human-readable terms of service
document for the client that describes a contractual relationship
between the end-user and the client that the end-user accepts when
authorizing the client. The authorization server SHOULD display
this URL to the end-user if it is provided. The value of this
field MUST point to a valid web page. The value of this field MAY
be internationalized, as described in Section 2.2.
"""
self._validate_uri("tos_uri")
def validate_policy_uri(self):
"""URL string that points to a human-readable privacy policy document
that describes how the deployment organization collects, uses,
retains, and discloses personal data. The authorization server
SHOULD display this URL to the end-user if it is provided. The
value of this field MUST point to a valid web page. The value of
this field MAY be internationalized, as described in Section 2.2.
"""
self._validate_uri("policy_uri")
def validate_jwks_uri(self):
"""URL string referencing the client's JSON Web Key (JWK) Set
[RFC7517] document, which contains the client's public keys. The
value of this field MUST point to a valid JWK Set document. These
keys can be used by higher-level protocols that use signing or
encryption. For instance, these keys might be used by some
applications for validating signed requests made to the token
endpoint when using JWTs for client authentication [RFC7523]. Use
of this parameter is preferred over the "jwks" parameter, as it
allows for easier key rotation. The "jwks_uri" and "jwks"
parameters MUST NOT both be present in the same request or
response.
"""
# TODO: use real HTTP library
self._validate_uri("jwks_uri")
def validate_jwks(self):
"""Client's JSON Web Key Set [RFC7517] document value, which contains
the client's public keys. The value of this field MUST be a JSON
object containing a valid JWK Set. These keys can be used by
higher-level protocols that use signing or encryption. This
parameter is intended to be used by clients that cannot use the
"jwks_uri" parameter, such as native clients that cannot host
public URLs. The "jwks_uri" and "jwks" parameters MUST NOT both
be present in the same request or response.
"""
if "jwks" in self:
if "jwks_uri" in self:
# The "jwks_uri" and "jwks" parameters MUST NOT both be present
raise InvalidClaimError("jwks")
jwks = self["jwks"]
try:
key_set = JsonWebKey.import_key_set(jwks)
if not key_set:
raise InvalidClaimError("jwks")
except ValueError as exc:
raise InvalidClaimError("jwks") from exc
def validate_software_id(self):
"""A unique identifier string (e.g., a Universally Unique Identifier
(UUID)) assigned by the client developer or software publisher
used by registration endpoints to identify the client software to
be dynamically registered. Unlike "client_id", which is issued by
the authorization server and SHOULD vary between instances, the
"software_id" SHOULD remain the same for all instances of the
client software. The "software_id" SHOULD remain the same across
multiple updates or versions of the same piece of software. The
value of this field is not intended to be human readable and is
usually opaque to the client and authorization server.
"""
def validate_software_version(self):
"""A version identifier string for the client software identified by
"software_id". The value of the "software_version" SHOULD change
on any update to the client software identified by the same
"software_id". The value of this field is intended to be compared
using string equality matching and no other comparison semantics
are defined by this specification. The value of this field is
outside the scope of this specification, but it is not intended to
be human readable and is usually opaque to the client and
authorization server. The definition of what constitutes an
update to client software that would trigger a change to this
value is specific to the software itself and is outside the scope
of this specification.
"""
def _validate_uri(self, key, uri=None):
if uri is None:
uri = self.get(key)
if uri and not is_valid_url(uri, fragments_allowed=False):
raise InvalidClaimError(key)
@classmethod
def get_claims_options(cls, metadata):
"""Generate claims options validation from Authorization Server metadata."""
scopes_supported = metadata.get("scopes_supported")
response_types_supported = metadata.get("response_types_supported")
grant_types_supported = metadata.get("grant_types_supported")
auth_methods_supported = metadata.get("token_endpoint_auth_methods_supported")
options = {}
if scopes_supported is not None:
scopes_supported = set(scopes_supported)
def _validate_scope(claims, value):
if not value:
return True
scopes = set(scope_to_list(value))
return scopes_supported.issuperset(scopes)
options["scope"] = {"validate": _validate_scope}
if response_types_supported is not None:
response_types_supported = [
set(items.split()) for items in response_types_supported
]
def _validate_response_types(claims, value):
# If omitted, the default is that the client will use only the "code"
# response type.
response_types = (
[set(items.split()) for items in value] if value else [{"code"}]
)
return all(
response_type in response_types_supported
for response_type in response_types
)
options["response_types"] = {"validate": _validate_response_types}
if grant_types_supported is not None:
grant_types_supported = set(grant_types_supported)
def _validate_grant_types(claims, value):
# If omitted, the default behavior is that the client will use only
# the "authorization_code" Grant Type.
grant_types = set(value) if value else {"authorization_code"}
return grant_types_supported.issuperset(grant_types)
options["grant_types"] = {"validate": _validate_grant_types}
if auth_methods_supported is not None:
options["token_endpoint_auth_method"] = {"values": auth_methods_supported}
return options
| ClientMetadataClaims |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 1673,
"end": 1898
} | class ____(Elasticsearch7MockSearchIndex):
def prepare_text(self, obj):
if obj.author == "daniel3":
raise SkipDocument
return "Indexed!\n%s" % obj.id
| Elasticsearch7MockSearchIndexWithSkipDocument |
python | django__django | tests/gis_tests/geoapp/feeds.py | {
"start": 929,
"end": 1149
} | class ____(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
| TestGeoAtom2 |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/transform_reward.py | {
"start": 476,
"end": 1817
} | class ____(
gym.RewardWrapper[ObsType, ActType], gym.utils.RecordConstructorArgs
):
"""Applies a function to the ``reward`` received from the environment's ``step``.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.TransformReward`.
Example:
>>> import gymnasium as gym
>>> from gymnasium.wrappers import TransformReward
>>> env = gym.make("CartPole-v1")
>>> env = TransformReward(env, lambda r: 2 * r + 1)
>>> _ = env.reset()
>>> _, rew, _, _, _ = env.step(0)
>>> rew
3.0
Change logs:
* v0.15.0 - Initially added
"""
def __init__(
self,
env: gym.Env[ObsType, ActType],
func: Callable[[SupportsFloat], SupportsFloat],
):
"""Initialize TransformReward wrapper.
Args:
env (Env): The environment to wrap
func: (Callable): The function to apply to reward
"""
gym.utils.RecordConstructorArgs.__init__(self, func=func)
gym.RewardWrapper.__init__(self, env)
self.func = func
def reward(self, reward: SupportsFloat) -> SupportsFloat:
"""Apply function to reward.
Args:
reward (Union[float, int, np.ndarray]): environment's reward
"""
return self.func(reward)
| TransformReward |
python | getsentry__responses | responses/_recorder.py | {
"start": 3007,
"end": 5637
} | class ____(RequestsMock):
def __init__(
self,
*,
target: str = "requests.adapters.HTTPAdapter.send",
registry: "Type[FirstMatchRegistry]" = OrderedRegistry,
) -> None:
super().__init__(target=target, registry=registry)
def reset(self) -> None:
self._registry = OrderedRegistry()
def record(
self, *, file_path: "Union[str, bytes, os.PathLike[Any]]" = "response.yaml"
) -> "Union[Callable[[_F], _F], _F]":
def deco_record(function: "_F") -> "Callable[..., Any]":
@wraps(function)
def wrapper(*args: "Any", **kwargs: "Any") -> "Any": # type: ignore[misc]
with self:
ret = function(*args, **kwargs)
self.dump_to_file(
file_path=file_path, registered=self.get_registry().registered
)
return ret
return wrapper
return deco_record
def dump_to_file(
self,
file_path: "Union[str, bytes, os.PathLike[Any]]",
*,
registered: "Optional[List[BaseResponse]]" = None,
) -> None:
"""Dump the recorded responses to a file."""
if registered is None:
registered = self.get_registry().registered
with open(file_path, "w") as file:
_dump(registered, file, yaml.dump)
def _on_request(
self,
adapter: "HTTPAdapter",
request: "PreparedRequest",
**kwargs: "Any",
) -> "models.Response":
# add attributes params and req_kwargs to 'request' object for further match comparison
# original request object does not have these attributes
request.params = self._parse_request_params(request.path_url) # type: ignore[attr-defined]
request.req_kwargs = kwargs # type: ignore[attr-defined]
requests_response = _real_send(adapter, request, **kwargs)
headers_values = {
key: value for key, value in requests_response.headers.items()
}
responses_response = Response(
method=str(request.method),
url=str(requests_response.request.url),
status=requests_response.status_code,
body=requests_response.text,
headers=headers_values,
content_type=requests_response.headers.get("Content-Type", _UNSET),
)
self._registry.add(responses_response)
return requests_response
def stop(self, allow_assert: bool = True) -> None:
super().stop(allow_assert=False)
recorder = Recorder()
record = recorder.record
| Recorder |
python | huggingface__transformers | tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py | {
"start": 1627,
"end": 6402
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=14,
bos_token_id=0,
eos_token_id=1,
pad_token_id=2,
hidden_act="silu",
hidden_size=32,
vocab_size=99,
intermediate_size=37,
max_position_embeddings=512,
max_window_layers=3,
num_attention_heads=4,
num_hidden_layers=2,
num_key_value_heads=2,
rope_theta=10000,
tie_word_embeddings=True,
is_training=True,
vision_config=None,
rope_parameters=None,
vision_start_token_id=3,
image_token_id=4,
video_token_id=5,
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_size = hidden_size
self.vision_start_token_id = vision_start_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.vocab_size = vocab_size
self.num_image_tokens = 32
self.seq_length = seq_length + self.num_image_tokens
# Default vision config is None to avoid a mutable default argument
if vision_config is None:
vision_config = {
"depth": 2,
"in_chans": 3,
"hidden_act": "silu",
"intermediate_size": 32,
"out_hidden_size": 32,
"hidden_size": 32,
"num_heads": 4,
"patch_size": 14,
"spatial_patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
}
self.vision_config = vision_config
self.text_config = {
"bos_token_id": bos_token_id,
"eos_token_id": eos_token_id,
"pad_token_id": pad_token_id,
"hidden_act": hidden_act,
"hidden_size": hidden_size,
"intermediate_size": intermediate_size,
"max_position_embeddings": max_position_embeddings,
"max_window_layers": max_window_layers,
"num_attention_heads": num_attention_heads,
"num_hidden_layers": num_hidden_layers,
"num_key_value_heads": num_key_value_heads,
"rope_theta": rope_theta,
"tie_word_embeddings": tie_word_embeddings,
"vocab_size": vocab_size,
"rope_parameters": {"type": "mrope", "mrope_section": [2, 1, 1]},
}
def get_config(self):
return Qwen2_5_VLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
vision_start_token_id=self.vision_start_token_id,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = self.pad_token_id
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id
input_ids[:, self.num_image_tokens] = self.image_token_id
input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| Qwen2_5_VLVisionText2TextModelTester |
python | sqlalchemy__sqlalchemy | test/sql/test_query.py | {
"start": 1241,
"end": 26291
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("user_id", Integer, ForeignKey("users.user_id")),
Column("address", String(30)),
test_needs_acid=True,
)
Table(
"u2",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_order_by_label(self, connection):
"""test that a label within an ORDER BY works on each backend.
This test should be modified to support [ticket:1068] when that ticket
is implemented. For now, you need to put the actual string in the
ORDER BY.
"""
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
concat = ("test: " + users.c.user_name).label("thedata")
eq_(
connection.execute(select(concat).order_by("thedata")).fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)],
)
eq_(
connection.execute(select(concat).order_by("thedata")).fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)],
)
concat = ("test: " + users.c.user_name).label("thedata")
eq_(
connection.execute(
select(concat).order_by(desc("thedata"))
).fetchall(),
[("test: jack",), ("test: fred",), ("test: ed",)],
)
@testing.requires.order_by_label_with_expression
def test_order_by_label_compound(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": "fred"},
],
)
concat = ("test: " + users.c.user_name).label("thedata")
eq_(
connection.execute(
select(concat).order_by(literal_column("thedata") + "x")
).fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)],
)
@testing.requires.boolean_col_expressions
def test_or_and_as_columns(self, connection):
true, false = literal(True), literal(False)
eq_(connection.execute(select(and_(true, false))).scalar(), False)
eq_(connection.execute(select(and_(true, true))).scalar(), True)
eq_(connection.execute(select(or_(true, false))).scalar(), True)
eq_(connection.execute(select(or_(false, false))).scalar(), False)
eq_(
connection.execute(select(not_(or_(false, false)))).scalar(),
True,
)
row = connection.execute(
select(or_(false, false).label("x"), and_(true, false).label("y"))
).first()
assert row.x == False # noqa
assert row.y == False # noqa
row = connection.execute(
select(or_(true, false).label("x"), and_(true, false).label("y"))
).first()
assert row.x == True # noqa
assert row.y == False # noqa
def test_select_tuple(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
{"user_id": 1, "user_name": "apples"},
)
assert_raises_message(
exc.CompileError,
r"Most backends don't support SELECTing from a tuple\(\) object.",
connection.execute,
select(tuple_(users.c.user_id, users.c.user_name)),
)
@testing.combinations(
(
lambda users: select(users.c.user_id).where(
users.c.user_name.startswith("apple")
),
[(1,)],
),
(
lambda users: select(users.c.user_id).where(
users.c.user_name.contains("i % t")
),
[(5,)],
),
(
lambda users: select(users.c.user_id).where(
users.c.user_name.endswith("anas")
),
[(3,)],
),
(
lambda users: select(users.c.user_id).where(
users.c.user_name.contains("i % t", escape="&")
),
[(5,)],
),
argnames="expr,result",
)
def test_like_ops(self, connection, expr, result):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 1, "user_name": "apples"},
{"user_id": 2, "user_name": "oranges"},
{"user_id": 3, "user_name": "bananas"},
{"user_id": 4, "user_name": "legumes"},
{"user_id": 5, "user_name": "hi % there"},
],
)
expr = resolve_lambda(expr, users=users)
eq_(connection.execute(expr).fetchall(), result)
@testing.requires.mod_operator_as_percent_sign
@testing.emits_warning(".*now automatically escapes.*")
def test_percents_in_text(self, connection):
for expr, result in (
(text("select 6 % 10"), 6),
(text("select 17 % 10"), 7),
(text("select '%'"), "%"),
(text("select '%%'"), "%%"),
(text("select '%%%'"), "%%%"),
(text("select 'hello % world'"), "hello % world"),
):
eq_(connection.scalar(expr), result)
def test_ilike(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 1, "user_name": "one"},
{"user_id": 2, "user_name": "TwO"},
{"user_id": 3, "user_name": "ONE"},
{"user_id": 4, "user_name": "OnE"},
],
)
eq_(
connection.execute(
select(users.c.user_id).where(users.c.user_name.ilike("one"))
).fetchall(),
[(1,), (3,), (4,)],
)
eq_(
connection.execute(
select(users.c.user_id).where(users.c.user_name.ilike("TWO"))
).fetchall(),
[(2,)],
)
if testing.against("postgresql"):
eq_(
connection.execute(
select(users.c.user_id).where(
users.c.user_name.like("one")
)
).fetchall(),
[(1,)],
)
eq_(
connection.execute(
select(users.c.user_id).where(
users.c.user_name.like("TWO")
)
).fetchall(),
[],
)
def test_repeated_bindparams(self, connection):
"""Tests that a BindParam can be used more than once.
This should be run for DB-APIs with both positional and named
paramstyles.
"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="jack"))
connection.execute(users.insert(), dict(user_id=8, user_name="fred"))
u = bindparam("userid")
s = users.select().where(
and_(users.c.user_name == u, users.c.user_name == u)
)
r = connection.execute(s, dict(userid="fred")).fetchall()
assert len(r) == 1
def test_bindparam_detection(self):
dialect = default.DefaultDialect(paramstyle="qmark")
def prep(q):
return str(sql.text(q).compile(dialect=dialect))
def a_eq(got, wanted):
if got != wanted:
print("Wanted %s" % wanted)
print("Received %s" % got)
self.assert_(got == wanted, got)
a_eq(prep("select foo"), "select foo")
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep(":this:that"), ":this:that")
a_eq(prep(":this :that"), "? ?")
a_eq(prep("(:this),(:that :other)"), "(?),(? ?)")
a_eq(prep("(:this),(:that:other)"), "(?),(:that:other)")
a_eq(prep("(:this),(:that,:other)"), "(?),(?,?)")
a_eq(prep("(:that_:other)"), "(:that_:other)")
a_eq(prep("(:that_ :other)"), "(? ?)")
a_eq(prep("(:that_other)"), "(?)")
a_eq(prep("(:that$other)"), "(?)")
a_eq(prep("(:that$:other)"), "(:that$:other)")
a_eq(prep(".:that$ :other."), ".? ?.")
a_eq(prep(r"select \foo"), r"select \foo")
a_eq(prep(r"time='12\:30:00'"), r"time='12\:30:00'")
a_eq(prep(r":this \:that"), "? :that")
a_eq(prep(r"(\:that$other)"), "(:that$other)")
a_eq(prep(r".\:that$ :other."), ".:that$ ?.")
@testing.requires.standalone_binds
def test_select_from_bindparam(self, connection):
"""Test result row processing when selecting from a plain bind
param."""
class MyInteger(TypeDecorator):
impl = Integer
cache_ok = True
def process_bind_param(self, value, dialect):
return int(value[4:])
def process_result_value(self, value, dialect):
return "INT_%d" % value
eq_(
connection.scalar(select(cast("INT_5", type_=MyInteger))),
"INT_5",
)
eq_(
connection.scalar(
select(cast("INT_5", type_=MyInteger).label("foo"))
),
"INT_5",
)
def test_order_by(self, connection):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1, user_name="c"))
connection.execute(users.insert(), dict(user_id=2, user_name="b"))
connection.execute(users.insert(), dict(user_id=3, user_name="a"))
def a_eq(executable, wanted):
got = list(connection.execute(executable))
eq_(got, wanted)
for labels in False, True:
label_style = (
LABEL_STYLE_NONE
if labels is False
else LABEL_STYLE_TABLENAME_PLUS_COL
)
def go(stmt):
if labels:
stmt = stmt.set_label_style(label_style)
return stmt
a_eq(
users.select()
.order_by(users.c.user_id)
.set_label_style(label_style),
[(1, "c"), (2, "b"), (3, "a")],
)
a_eq(
users.select()
.order_by(users.c.user_name, users.c.user_id)
.set_label_style(label_style),
[(3, "a"), (2, "b"), (1, "c")],
)
a_eq(
go(
select(users.c.user_id.label("foo")).order_by(
users.c.user_id
)
),
[(1,), (2,), (3,)],
)
a_eq(
go(
select(
users.c.user_id.label("foo"), users.c.user_name
).order_by(users.c.user_name, users.c.user_id),
),
[(3, "a"), (2, "b"), (1, "c")],
)
a_eq(
users.select()
.distinct()
.order_by(users.c.user_id)
.set_label_style(label_style),
[(1, "c"), (2, "b"), (3, "a")],
)
a_eq(
go(
select(users.c.user_id.label("foo"))
.distinct()
.order_by(users.c.user_id),
),
[(1,), (2,), (3,)],
)
a_eq(
go(
select(
users.c.user_id.label("a"),
users.c.user_id.label("b"),
users.c.user_name,
).order_by(users.c.user_id),
),
[(1, 1, "c"), (2, 2, "b"), (3, 3, "a")],
)
a_eq(
users.select()
.distinct()
.order_by(desc(users.c.user_id))
.set_label_style(label_style),
[(3, "a"), (2, "b"), (1, "c")],
)
a_eq(
go(
select(users.c.user_id.label("foo"))
.distinct()
.order_by(users.c.user_id.desc()),
),
[(3,), (2,), (1,)],
)
@testing.requires.nullsordering
def test_order_by_nulls(self, connection):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=1))
connection.execute(users.insert(), dict(user_id=2, user_name="b"))
connection.execute(users.insert(), dict(user_id=3, user_name="a"))
def a_eq(executable, wanted):
got = list(connection.execute(executable))
eq_(got, wanted)
for labels in False, True:
label_style = (
LABEL_STYLE_NONE
if labels is False
else LABEL_STYLE_TABLENAME_PLUS_COL
)
a_eq(
users.select()
.order_by(users.c.user_name.nulls_first())
.set_label_style(label_style),
[(1, None), (3, "a"), (2, "b")],
)
a_eq(
users.select()
.order_by(users.c.user_name.nulls_last())
.set_label_style(label_style),
[(3, "a"), (2, "b"), (1, None)],
)
a_eq(
users.select()
.order_by(asc(users.c.user_name).nulls_first())
.set_label_style(label_style),
[(1, None), (3, "a"), (2, "b")],
)
a_eq(
users.select()
.order_by(asc(users.c.user_name).nulls_last())
.set_label_style(label_style),
[(3, "a"), (2, "b"), (1, None)],
)
a_eq(
users.select()
.order_by(users.c.user_name.desc().nulls_first())
.set_label_style(label_style),
[(1, None), (2, "b"), (3, "a")],
)
a_eq(
users.select()
.order_by(users.c.user_name.desc().nulls_last())
.set_label_style(label_style),
[(2, "b"), (3, "a"), (1, None)],
)
a_eq(
users.select()
.order_by(desc(users.c.user_name).nulls_first())
.set_label_style(label_style),
[(1, None), (2, "b"), (3, "a")],
)
a_eq(
users.select()
.order_by(desc(users.c.user_name).nulls_last())
.set_label_style(label_style),
[(2, "b"), (3, "a"), (1, None)],
)
a_eq(
users.select()
.order_by(
users.c.user_name.nulls_first(),
users.c.user_id,
)
.set_label_style(label_style),
[(1, None), (3, "a"), (2, "b")],
)
a_eq(
users.select()
.order_by(users.c.user_name.nulls_last(), users.c.user_id)
.set_label_style(label_style),
[(3, "a"), (2, "b"), (1, None)],
)
def test_in_filtering(self, connection):
"""test the behavior of the in_() function."""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="jack"))
connection.execute(users.insert(), dict(user_id=8, user_name="fred"))
connection.execute(users.insert(), dict(user_id=9, user_name=None))
s = users.select().where(users.c.user_name.in_([]))
r = connection.execute(s).fetchall()
# No username is in empty set
assert len(r) == 0
s = users.select().where(not_(users.c.user_name.in_([])))
r = connection.execute(s).fetchall()
assert len(r) == 3
s = users.select().where(users.c.user_name.in_(["jack", "fred"]))
r = connection.execute(s).fetchall()
assert len(r) == 2
s = users.select().where(not_(users.c.user_name.in_(["jack", "fred"])))
r = connection.execute(s).fetchall()
# Null values are not outside any set
assert len(r) == 0
def test_expanding_in(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=7, user_name="jack"),
dict(user_id=8, user_name="fred"),
dict(user_id=9, user_name=None),
],
)
stmt = (
select(users)
.where(users.c.user_name.in_(bindparam("uname", expanding=True)))
.order_by(users.c.user_id)
)
eq_(
connection.execute(stmt, {"uname": ["jack"]}).fetchall(),
[(7, "jack")],
)
eq_(
connection.execute(stmt, {"uname": ["jack", "fred"]}).fetchall(),
[(7, "jack"), (8, "fred")],
)
eq_(connection.execute(stmt, {"uname": []}).fetchall(), [])
assert_raises_message(
exc.StatementError,
"'expanding' parameters can't be used with executemany()",
connection.execute,
users.update().where(
users.c.user_name.in_(bindparam("uname", expanding=True))
),
[{"uname": ["fred"]}, {"uname": ["ed"]}],
)
@testing.requires.no_quoting_special_bind_names
def test_expanding_in_special_chars(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=7, user_name="jack"),
dict(user_id=8, user_name="fred"),
],
)
stmt = (
select(users)
.where(users.c.user_name.in_(bindparam("u35", expanding=True)))
.where(users.c.user_id == bindparam("u46"))
.order_by(users.c.user_id)
)
eq_(
connection.execute(
stmt, {"u35": ["jack", "fred"], "u46": 7}
).fetchall(),
[(7, "jack")],
)
stmt = (
select(users)
.where(users.c.user_name.in_(bindparam("u.35", expanding=True)))
.where(users.c.user_id == bindparam("u.46"))
.order_by(users.c.user_id)
)
eq_(
connection.execute(
stmt, {"u.35": ["jack", "fred"], "u.46": 7}
).fetchall(),
[(7, "jack")],
)
def test_expanding_in_multiple(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=7, user_name="jack"),
dict(user_id=8, user_name="fred"),
dict(user_id=9, user_name="ed"),
],
)
stmt = (
select(users)
.where(users.c.user_name.in_(bindparam("uname", expanding=True)))
.where(users.c.user_id.in_(bindparam("userid", expanding=True)))
.order_by(users.c.user_id)
)
eq_(
connection.execute(
stmt, {"uname": ["jack", "fred", "ed"], "userid": [8, 9]}
).fetchall(),
[(8, "fred"), (9, "ed")],
)
def test_expanding_in_repeated(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=7, user_name="jack"),
dict(user_id=8, user_name="fred"),
dict(user_id=9, user_name="ed"),
],
)
stmt = (
select(users)
.where(
users.c.user_name.in_(bindparam("uname", expanding=True))
| users.c.user_name.in_(bindparam("uname2", expanding=True))
)
.where(users.c.user_id == 8)
)
stmt = stmt.union(
select(users)
.where(
users.c.user_name.in_(bindparam("uname", expanding=True))
| users.c.user_name.in_(bindparam("uname2", expanding=True))
)
.where(users.c.user_id == 9)
).order_by("user_id")
eq_(
connection.execute(
stmt,
{
"uname": ["jack", "fred"],
"uname2": ["ed"],
"userid": [8, 9],
},
).fetchall(),
[(8, "fred"), (9, "ed")],
)
@testing.requires.tuple_in
def test_expanding_in_composite(self, connection):
users = self.tables.users
connection.execute(
users.insert(),
[
dict(user_id=7, user_name="jack"),
dict(user_id=8, user_name="fred"),
dict(user_id=9, user_name=None),
],
)
stmt = (
select(users)
.where(
tuple_(users.c.user_id, users.c.user_name).in_(
bindparam("uname", expanding=True)
)
)
.order_by(users.c.user_id)
)
eq_(
connection.execute(stmt, {"uname": [(7, "jack")]}).fetchall(),
[(7, "jack")],
)
eq_(
connection.execute(
stmt, {"uname": [(7, "jack"), (8, "fred")]}
).fetchall(),
[(7, "jack"), (8, "fred")],
)
@testing.skip_if(["mssql"])
def test_bind_in(self, connection):
"""test calling IN against a bind parameter.
this isn't allowed on several platforms since we
generate ? = ?.
"""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="jack"))
connection.execute(users.insert(), dict(user_id=8, user_name="fred"))
connection.execute(users.insert(), dict(user_id=9, user_name=None))
u = bindparam("search_key", type_=String)
s = users.select().where(not_(u.in_([])))
r = connection.execute(s, dict(search_key="john")).fetchall()
assert len(r) == 3
r = connection.execute(s, dict(search_key=None)).fetchall()
assert len(r) == 3
def test_literal_in(self, connection):
"""similar to test_bind_in but use a bind with a value."""
users = self.tables.users
connection.execute(users.insert(), dict(user_id=7, user_name="jack"))
connection.execute(users.insert(), dict(user_id=8, user_name="fred"))
connection.execute(users.insert(), dict(user_id=9, user_name=None))
s = users.select().where(not_(literal("john").in_([])))
r = connection.execute(s).fetchall()
assert len(r) == 3
@testing.requires.boolean_col_expressions
def test_empty_in_filtering_static(self, connection):
"""test the behavior of the in_() function when
comparing against an empty collection, specifically
that a proper boolean value is generated.
"""
users = self.tables.users
connection.execute(
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9, "user_name": None},
],
)
s = users.select().where(users.c.user_name.in_([]) == True) # noqa
r = connection.execute(s).fetchall()
assert len(r) == 0
s = users.select().where(users.c.user_name.in_([]) == False) # noqa
r = connection.execute(s).fetchall()
assert len(r) == 3
s = users.select().where(users.c.user_name.in_([]) == None) # noqa
r = connection.execute(s).fetchall()
assert len(r) == 0
| QueryTest |
python | streamlit__streamlit | lib/streamlit/web/server/oauth_authlib_routes.py | {
"start": 4629,
"end": 4794
} | class ____(AuthHandlerMixin, tornado.web.RequestHandler):
def get(self) -> None:
self.clear_auth_cookie()
self.redirect_to_base()
| AuthLogoutHandler |
python | django__django | tests/admin_changelist/models.py | {
"start": 1355,
"end": 1545
} | class ____(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through="Membership")
def __str__(self):
return self.name
| Group |
python | django__django | django/db/models/fields/json.py | {
"start": 19996,
"end": 21092
} | class ____(lookups.In):
def resolve_expression_parameter(self, compiler, connection, sql, param):
sql, params = super().resolve_expression_parameter(
compiler,
connection,
sql,
param,
)
if (
not hasattr(param, "as_sql")
and not connection.features.has_native_json_field
):
if connection.vendor == "oracle":
value = json.loads(param)
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
if isinstance(value, (list, dict)):
sql %= "JSON_QUERY"
else:
sql %= "JSON_VALUE"
elif connection.vendor == "mysql" or (
connection.vendor == "sqlite"
and params[0] not in connection.ops.jsonfield_datatype_values
):
sql = "JSON_EXTRACT(%s, '$')"
if connection.vendor == "mysql" and connection.mysql_is_mariadb:
sql = "JSON_UNQUOTE(%s)" % sql
return sql, params
| KeyTransformIn |
python | doocs__leetcode | solution/1400-1499/1493.Longest Subarray of 1's After Deleting One Element/Solution.py | {
"start": 0,
"end": 420
} | class ____:
def longestSubarray(self, nums: List[int]) -> int:
n = len(nums)
left = [0] * (n + 1)
right = [0] * (n + 1)
for i, x in enumerate(nums, 1):
if x:
left[i] = left[i - 1] + 1
for i in range(n - 1, -1, -1):
if nums[i]:
right[i] = right[i + 1] + 1
return max(left[i] + right[i + 1] for i in range(n))
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image56.py | {
"start": 315,
"end": 841
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image56.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.gif")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | python__mypy | mypy/types.py | {
"start": 146686,
"end": 147295
} | class ____(BoolTypeQuery):
"""Visitor for querying whether a type has a type variable component."""
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
self.skip_alias_target = True
def visit_type_var(self, t: TypeVarType) -> bool:
return True
def visit_type_var_tuple(self, t: TypeVarTupleType) -> bool:
return True
def visit_param_spec(self, t: ParamSpecType) -> bool:
return True
def has_type_vars(typ: Type) -> bool:
"""Check if a type contains any type variables (recursively)."""
return typ.accept(HasTypeVars())
| HasTypeVars |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 34563,
"end": 35774
} | class ____(unittest.TestCase):
def test_warning_suppression(self):
ip.run_cell("import warnings")
try:
with self.assertWarnsRegex(UserWarning, "asdf"):
ip.run_cell("warnings.warn('asdf')")
# Here's the real test -- if we run that again, we should get the
# warning again. Traditionally, each warning was only issued once per
# IPython session (approximately), even if the user typed in new and
# different code that should have also triggered the warning, leading
# to much confusion.
with self.assertWarnsRegex(UserWarning, "asdf"):
ip.run_cell("warnings.warn('asdf')")
finally:
ip.run_cell("del warnings")
def test_deprecation_warning(self):
ip.run_cell(
"""
import warnings
def wrn():
warnings.warn(
"I AM A WARNING",
DeprecationWarning
)
"""
)
try:
with self.assertWarnsRegex(DeprecationWarning, "I AM A WARNING"):
ip.run_cell("wrn()")
finally:
ip.run_cell("del warnings")
ip.run_cell("del wrn")
| TestWarningSuppression |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.