language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 7808,
"end": 8048
} | class ____(models.Model):
name = models.CharField(max_length=100)
TreeForeignKey(
Group, blank=True, null=True, on_delete=models.CASCADE
).contribute_to_class(Group, "parent")
mptt.register(Group, order_insertion_by=("name",))
| Group |
python | spack__spack | lib/spack/spack/vendor/jsonschema/validators.py | {
"start": 18978,
"end": 29452
} | class ____(object):
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
"""
def __init__(
self,
base_uri,
referrer,
store=(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
)
self.store.update(store)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
@classmethod
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
"""
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions.RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`"
)
@property
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
@property
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
"""
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref)
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given remote URL.
"""
url, fragment = urldefrag(url)
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions.RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip(u"/")
parts = unquote(fragment).split(u"/") if fragment else []
for part in parts:
part = part.replace(u"~1", u"/").replace(u"~0", u"~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise exceptions.RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in [u"http", u"https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
with urlopen(uri) as url:
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is
itself valid, since not doing so can lead to less obvious error
messages and fail in less obvious or consistent ways.
If you know you have a valid schema already, especially if you
intend to validate multiple instances with the same schema, you
likely would prefer using the `IValidator.validate` method directly
on a specific validator (e.g. ``Draft7Validator.validate``).
Arguments:
instance:
The instance to validate
schema:
The schema to validate with
cls (IValidator):
The class that will be used to validate the instance.
If the ``cls`` argument is not provided, two things will happen
in accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_
then the proper validator will be used. The specification recommends
that all schemas contain :validator:`$schema` properties for this
reason. If no :validator:`$schema` property is found, the default
validator class is the latest released draft.
Any other provided positional and keyword arguments will be passed
on when instantiating the ``cls``.
Raises:
`jsonschema.exceptions.ValidationError` if the instance
is invalid
`jsonschema.exceptions.SchemaError` if the schema itself
is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with
`jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
error = exceptions.best_match(validator.iter_errors(instance))
if error is not None:
raise error
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the
given schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
| RefResolver |
python | tensorflow__tensorflow | tensorflow/python/distribute/integration_test/saved_model_test.py | {
"start": 20974,
"end": 26476
} | class ____(test.TestCase):
# Test saved_model saving and loading for parameter server strategy. These
# tests are different enough than the tests in `SaveAndLoadForXXX` so we make
# a separate test class for them.
@classmethod
def setUpClass(cls):
super().setUpClass()
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=2)
cls.cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
tf.train.ClusterSpec(cluster_def))
def tearDown(self):
super().tearDown()
context._reset_context()
def load_and_run_v1(self,
model_dir,
inputs,
signature_key=tf1.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Load a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = tf.Graph()
with graph.as_default(), tf1.Session() as session:
meta_graph_def = tf1.saved_model.load(
session, [tf1.saved_model.tag_constants.SERVING], model_dir)
signature = meta_graph_def.signature_def[signature_key]
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict)["output_0"]
class Model(tf.Module):
def __init__(self):
self.v1 = tf.Variable([0, 0, 0, 0])
self.v2 = tf.Variable([1, 1, 1, 1])
self.table = lookup_ops.MutableHashTable(
key_dtype=tf.int32, value_dtype=tf.int32, default_value=-1)
def train(self):
# simulate a training process to mutate the state of the model.
self.v1.assign([2, 2, 2, 2])
self.v2.assign([3, 3, 3, 3])
self.table.insert(keys=1, values=1)
@tf.function(input_signature=[
tf.TensorSpec(shape=(), dtype=tf.dtypes.int32, name="x")
])
def __call__(self, x):
t = tf.math.add(self.v1, self.v2)
return tf.math.add(t, self.table.lookup(x))
def test_basic(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
model_dir = self.get_temp_dir()
with strategy.scope():
m = self.Model()
m.train()
tf.saved_model.save(m, model_dir)
# Load via V2 API.
loaded = tf.saved_model.load(model_dir)
self.assertRegex(loaded.v1.device, "/job:chief/replica:0/task:0")
self.assertRegex(loaded.v2.device, "/job:chief/replica:0/task:0")
self.assertAllEqual(loaded(tf.identity(1)), [6, 6, 6, 6])
loaded.v2.assign([1, 1, 1, 1])
self.assertAllEqual(loaded(tf.identity(1)), [4, 4, 4, 4])
# Load via V1 API.
self.assertAllEqual(self.load_and_run_v1(model_dir, {"x": 1}), [6, 6, 6, 6])
def test_load_to_same_strategy(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
model_dir = self.get_temp_dir()
with strategy.scope():
m = self.Model()
m.train()
tf.saved_model.save(m, model_dir)
with strategy.scope():
loaded = tf.saved_model.load(model_dir)
# Make sure that the variables are created on different devices. SavedModel
# may load the variables in a different order compared to the creation order
# so the devices may not be exactly the same as before.
self.assertTrue(("/job:ps/replica:0/task:0" in loaded.v1.device and
"/job:ps/replica:0/task:1" in loaded.v2.device) or
("/job:ps/replica:0/task:1" in loaded.v1.device and
"/job:ps/replica:0/task:0" in loaded.v2.device))
self.assertAllEqual(loaded(tf.identity(1)), [6, 6, 6, 6])
def test_load_to_different_strategy(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
model_dir = self.get_temp_dir()
with strategy.scope():
m = self.Model()
m.train()
tf.saved_model.save(m, model_dir)
del m # Garbage collect variables before we reset the context.
context._reset_context()
mirrored_strategy = tf.distribute.MirroredStrategy(devices=["CPU:0"])
with mirrored_strategy.scope():
loaded = tf.saved_model.load(model_dir)
self.assertIsInstance(loaded.v1, values.DistributedVariable)
self.assertAllEqual(loaded(tf.identity(1)), [6, 6, 6, 6])
def test_sharded_variable(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, tf1.fixed_size_partitioner(2))
model_dir = self.get_temp_dir()
with strategy.scope():
m = self.Model()
self.assertIsInstance(m.v1, sharded_variable.ShardedVariable)
m.train()
tf.saved_model.save(m, model_dir)
self.assertAllEqual(self.load_and_run_v1(model_dir, {"x": 1}), [6, 6, 6, 6])
def test_load_with_partitioner_works(self):
model = self.Model()
model_dir = self.get_temp_dir()
tf.saved_model.save(model, model_dir)
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, tf1.fixed_size_partitioner(2))
with strategy.scope():
tf.saved_model.load(model_dir)
if __name__ == "__main__":
test_util.main()
| PSStrategySaveAndLoadTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/graphql_callees.py | {
"start": 367,
"end": 1669
} | class ____:
x: str
def __init__(self, x) -> None:
self.x = x
@method_decorator
def callee(self) -> None:
_test_sink(self)
# This should not be called
def not_callee(self) -> None:
_test_sink(self)
def entrypoint_decorator(callable: Callable[[Any], Any]) -> Callable[[Any], Any]:
return callable
@entrypoint_decorator
def return_graphql_entrypoint_1(x: Any) -> GraphQLEntrypoint:
entrypoint = GraphQLEntrypoint(_test_source())
return entrypoint
@entrypoint_decorator
def return_graphql_entrypoint_2(x: Any) -> GraphQLEntrypoint:
# Test co-existence of return callees and expression callees
return GraphQLEntrypoint(_test_source())
@entrypoint_decorator
def return_graphql_entrypoint_3(entrypoint: GraphQLEntrypoint) -> GraphQLEntrypoint:
# Test sink
return entrypoint
@entrypoint_decorator
def return_graphql_entrypoint_strip_list(x: Any) -> List[GraphQLEntrypoint]:
# Test stripping from list types
entrypoint = GraphQLEntrypoint(_test_source())
return [entrypoint]
@entrypoint_decorator
def return_graphql_entrypoint_strip_list_with_sink(
entrypoints: List[GraphQLEntrypoint],
) -> List[GraphQLEntrypoint]:
# Test stripping from list types and test sink
return entrypoints
| GraphQLEntrypoint |
python | mlflow__mlflow | mlflow/genai/judges/utils/prompt_utils.py | {
"start": 379,
"end": 3607
} | class ____(NamedTuple):
"""Result of splitting ChatMessage list for Databricks API."""
system_prompt: str | None
user_prompt: str
def format_prompt(prompt: str, **values) -> str:
"""Format double-curly variables in the prompt template."""
for key, value in values.items():
# Escape backslashes in the replacement string to prevent re.sub from interpreting
# them as escape sequences (e.g. \u being treated as Unicode escape)
replacement = str(value).replace("\\", "\\\\")
prompt = re.sub(r"\{\{\s*" + key + r"\s*\}\}", replacement, prompt)
return prompt
def add_output_format_instructions(prompt: str, output_fields: list["JudgeField"]) -> str:
"""
Add structured output format instructions to a judge prompt.
This ensures the LLM returns a JSON response with the expected fields,
matching the expected format for the invoke_judge_model function.
Args:
prompt: The formatted prompt with template variables filled in
output_fields: List of JudgeField objects defining output fields.
Returns:
The prompt with output format instructions appended
"""
json_format_lines = [f' "{field.name}": "{field.description}"' for field in output_fields]
json_format = "{\n" + ",\n".join(json_format_lines) + "\n}"
output_format_instructions = f"""
Please provide your assessment in the following JSON format only (no markdown):
{json_format}"""
return prompt + output_format_instructions
def _split_messages_for_databricks(messages: list["ChatMessage"]) -> DatabricksLLMJudgePrompts:
"""
Split a list of ChatMessage objects into system and user prompts for Databricks API.
Args:
messages: List of ChatMessage objects to split.
Returns:
DatabricksLLMJudgePrompts namedtuple with system_prompt and user_prompt fields.
The system_prompt may be None.
Raises:
MlflowException: If the messages list is empty or invalid.
"""
from mlflow.types.llm import ChatMessage
if not messages:
raise MlflowException(
"Invalid prompt format: expected non-empty list of ChatMessage",
error_code=BAD_REQUEST,
)
system_prompt = None
user_parts = []
for msg in messages:
if isinstance(msg, ChatMessage):
if msg.role == "system":
# Use the first system message as the actual system prompt for the API.
# Any subsequent system messages are appended to the user prompt to preserve
# their content and maintain the order in which they appear in the submitted
# evaluation payload.
if system_prompt is None:
system_prompt = msg.content
else:
user_parts.append(f"System: {msg.content}")
elif msg.role == "user":
user_parts.append(msg.content)
elif msg.role == "assistant":
user_parts.append(f"Assistant: {msg.content}")
user_prompt = "\n\n".join(user_parts) if user_parts else ""
return DatabricksLLMJudgePrompts(system_prompt=system_prompt, user_prompt=user_prompt)
| DatabricksLLMJudgePrompts |
python | huggingface__transformers | tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py | {
"start": 38374,
"end": 43433
} | class ____(EncoderDecoderMixin, unittest.TestCase):
supports_sdpa = True # both submodels support SDPA
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = ViTModel(config).eval()
decoder_model = GPT2LMHeadModel(decoder_config).eval()
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester_encoder = ViTModelTester(self, batch_size=13)
model_tester_decoder = GPT2ModelTester(self, batch_size=13, hidden_size=32, max_position_embeddings=512)
encoder_config_and_inputs = model_tester_encoder.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs(extra_inputs=True)
config, pixel_values, labels = encoder_config_and_inputs
decoder_config, decoder_input_ids, decoder_attention_mask, _, _, _, _, _ = decoder_config_and_inputs
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
# disable cache for now
decoder_config.use_cache = False
return {
"config": config,
"pixel_values": pixel_values,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"labels": decoder_input_ids,
}
def check_encoder_decoder_model_output_attentions(
self,
config,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
pixel_values,
labels=None,
**kwargs,
):
# force eager attention to support output attentions
config._attn_implementation = "eager"
decoder_config._attn_implementation = "eager"
# make the decoder inputs a different shape from the encoder inputs to harden the test
decoder_input_ids = decoder_input_ids[:, :-1]
decoder_attention_mask = decoder_attention_mask[:, :-1]
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_attentions=True,
**kwargs,
)
encoder_attentions = outputs_encoder_decoder["encoder_attentions"]
self.assertEqual(len(encoder_attentions), config.num_hidden_layers)
seq_len = (encoder_model.config.image_size // encoder_model.config.patch_size) ** 2 + 1
decoder_attentions = outputs_encoder_decoder["decoder_attentions"]
num_decoder_layers = (
decoder_config.num_decoder_layers
if hasattr(decoder_config, "num_decoder_layers")
else decoder_config.num_hidden_layers
)
self.assertEqual(len(decoder_attentions), num_decoder_layers)
self.assertEqual(
decoder_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, decoder_input_ids.shape[-1], decoder_input_ids.shape[-1]),
)
cross_attentions = outputs_encoder_decoder["cross_attentions"]
self.assertEqual(len(cross_attentions), num_decoder_layers)
cross_attention_input_seq_len = decoder_input_ids.shape[-1]
self.assertEqual(
cross_attentions[0].shape[-3:],
(decoder_config.num_attention_heads, cross_attention_input_seq_len, seq_len), # 4 6 16
)
def check_encoder_decoder_model_generate(self, config, decoder_config, pixel_values=None, **kwargs):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
# Generate until max length
if hasattr(enc_dec_model.config, "eos_token_id"):
enc_dec_model.config.eos_token_id = None
if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"):
enc_dec_model.config.decoder.eos_token_id = None
if hasattr(enc_dec_model.generation_config, "eos_token_id"):
enc_dec_model.generation_config.eos_token_id = None
enc_dec_model.to(torch_device)
generated_output = enc_dec_model.generate(
pixel_values=pixel_values,
decoder_start_token_id=enc_dec_model.config.decoder.bos_token_id,
max_length=enc_dec_model.generation_config.max_length,
**kwargs,
)
self.assertEqual(
generated_output.shape, (pixel_values.shape[0],) + (enc_dec_model.generation_config.max_length,)
)
@unittest.skip(reason="VIT2GPT2 also has an integration test for testinf save-load")
def test_real_model_save_load_from_pretrained(self):
pass
@require_torch
| VIT2GPT2Test |
python | readthedocs__readthedocs.org | readthedocs/metrics/tasks.py | {
"start": 839,
"end": 1093
} | class ____(Metrics1mTaskBase):
metrics = Metrics1mTaskBase.metrics + [
RedislenMetric(queue_name="build-large"),
RunningBuildsMetric(builder="large"),
ConcurrencyLimitedBuildsMetric(builder="large"),
]
| CommunityMetrics1mTask |
python | mahmoud__glom | glom/core.py | {
"start": 62608,
"end": 63526
} | class ____:
"""
:class:`Vars` is a helper that can be used with **S** in order to
store shared mutable state.
Takes the same arguments as :class:`dict()`.
Arguments here should be thought of the same way as default arguments
to a function. Each time the spec is evaluated, the same arguments
will be referenced; so, think carefully about mutable data structures.
"""
def __init__(self, base=(), **kw):
dict(base) # ensure it is a dict-compatible first arg
self.base = base
self.defaults = kw
def glomit(self, target, spec):
return ScopeVars(self.base, self.defaults)
def __repr__(self):
ret = format_invocation(self.__class__.__name__,
args=(self.base,) if self.base else (),
kwargs=self.defaults,
repr=bbrepr)
return ret
| Vars |
python | jazzband__django-simple-history | simple_history/registry_tests/tests.py | {
"start": 3038,
"end": 3500
} | class ____(TestCase):
def test_using_app_label(self):
try:
from ..tests.models import HistoricalConcreteExternal
except ImportError:
self.fail("HistoricalConcreteExternal is in wrong module")
def test_default(self):
try:
from ..tests.models import HistoricalConcreteExternal2
except ImportError:
self.fail("HistoricalConcreteExternal2 is in wrong module")
| TestInheritedModule |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py | {
"start": 1570,
"end": 1687
} | class ____:
"""Represents the details of an asset alias."""
id: str | None = None
@dataclass
| AssetAliasDetails |
python | gevent__gevent | src/gevent/tests/test__greenio.py | {
"start": 1544,
"end": 5523
} | class ____(TestCase):
def test_close_with_makefile(self):
def accept_close_early(listener):
# verify that the makefile and the socket are truly independent
# by closing the socket prior to using the made file
try:
conn, _ = listener.accept()
fd = conn.makefile(mode='wb')
conn.close()
fd.write(b'hello\n')
fd.close()
_write_to_closed(fd, b'a')
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def accept_close_late(listener):
# verify that the makefile and the socket are truly independent
# by closing the made file and then sending a character
try:
conn, _ = listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello')
fd.close()
conn.send(b'\n')
conn.close()
_write_to_closed(fd, b'a')
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def did_it_work(server):
client = socket.create_connection((params.DEFAULT_CONNECT, server.getsockname()[1]))
fd = client.makefile(mode='rb')
client.close()
self.assertEqual(fd.readline(), b'hello\n')
self.assertFalse(fd.read())
fd.close()
server = tcp_listener()
server_greenlet = gevent.spawn(accept_close_early, server)
did_it_work(server)
server_greenlet.kill()
server = tcp_listener()
server_greenlet = gevent.spawn(accept_close_late, server)
did_it_work(server)
server_greenlet.kill()
@skipOnPyPy("Takes multiple GCs and issues a warning we can't catch")
def test_del_closes_socket(self):
import warnings
def accept_once(listener):
# delete/overwrite the original conn
# object, only keeping the file object around
# closing the file object should close everything
# This is not *exactly* true on Python 3. This produces
# a ResourceWarning, which we silence below. (Previously we actually
# *saved* a reference to the socket object, so we
# weren't testing what we thought we were.)
# It's definitely not true on PyPy, which needs GC to
# reliably close everything; sometimes this is more than
# one collection cycle. And PyPy issues a warning with -X
# track-resources that we cannot catch.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
conn = listener.accept()[0]
# Note that we overwrite the original variable,
# losing our reference to the socket.
conn = conn.makefile(mode='wb')
conn.write(b'hello\n')
conn.close()
_write_to_closed(conn, b'a')
finally:
listener.close()
del listener
del conn
gc_collect_if_needed()
gc_collect_if_needed()
server = tcp_listener()
gevent.spawn(accept_once, server)
client = socket.create_connection((params.DEFAULT_CONNECT, server.getsockname()[1]))
with gevent.Timeout.start_new(0.5):
fd = client.makefile()
client.close()
self.assertEqual(fd.read(), 'hello\n')
# If the socket isn't closed when 'accept_once' finished,
# then this will hang and exceed the timeout
self.assertEqual(fd.read(), '')
fd.close()
del client
del fd
if __name__ == '__main__':
greentest.main()
| TestGreenIo |
python | keon__algorithms | tests/test_matrix.py | {
"start": 6361,
"end": 7201
} | class ____(unittest.TestCase):
"""[summary]
Test for the file matrix_exponentiation.py
Arguments:
unittest {[type]} -- [description]
"""
def test_matrix_exponentiation(self):
mat = [[1, 0, 2], [2, 1, 0], [0, 2, 1]]
self.assertEqual(matrix_exponentiation.matrix_exponentiation(mat, 0),
[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertEqual(matrix_exponentiation.matrix_exponentiation(mat, 1),
[[1, 0, 2], [2, 1, 0], [0, 2, 1]])
self.assertEqual(matrix_exponentiation.matrix_exponentiation(mat, 2),
[[1, 4, 4], [4, 1, 4], [4, 4, 1]])
self.assertEqual(matrix_exponentiation.matrix_exponentiation(mat, 5),
[[81, 72, 90], [90, 81, 72], [72, 90, 81]])
| TestMatrixExponentiation |
python | RaRe-Technologies__gensim | gensim/models/doc2vec.py | {
"start": 4714,
"end": 5632
} | class ____(namedtuple('TaggedDocument', 'words tags')):
"""Represents a document along with a tag, input document format for :class:`~gensim.models.doc2vec.Doc2Vec`.
A single document, made up of `words` (a list of unicode string tokens) and `tags` (a list of tokens).
Tags may be one or more unicode string tokens, but typical practice (which will also be the most memory-efficient)
is for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from :class:`gensim.models.word2vec.Word2Vec`.
"""
def __str__(self):
"""Human readable representation of the object's state, used for debugging.
Returns
-------
str
Human readable representation of the object's state (words and tags).
"""
return '%s<%s, %s>' % (self.__class__.__name__, self.words, self.tags)
@dataclass
| TaggedDocument |
python | python-markdown__markdown | tests/test_legacy.py | {
"start": 1151,
"end": 1240
} | class ____(LegacyTestCase):
location = os.path.join(parent_test_dir, 'basic')
| TestBasic |
python | django__django | tests/flatpages_tests/test_middleware.py | {
"start": 2458,
"end": 5830
} | class ____(TestDataMixin, TestCase):
def test_view_flatpage(self):
"""
A flatpage can be served through a view, even when the middleware is in
use
"""
response = self.client.get("/flatpage_root/flatpage/")
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view, even when
the middleware is in use.
"""
response = self.client.get("/flatpage_root/no_such_flatpage/")
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get("/flatpage_root/sekrit/")
self.assertRedirects(response, "/accounts/login/?next=/flatpage_root/sekrit/")
user = User.objects.create_user("testuser", "test@example.com", "s3krit")
self.client.force_login(user)
response = self.client.get("/flatpage_root/sekrit/")
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get("/flatpage/")
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage raises a 404 when served by the fallback
middleware.
"""
response = self.client.get("/no_such_flatpage/")
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get("/sekrit/")
self.assertRedirects(response, "/accounts/login/?next=/sekrit/")
user = User.objects.create_user("testuser", "test@example.com", "s3krit")
self.client.force_login(user)
response = self.client.get("/sekrit/")
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"""
A flatpage with special chars in the URL can be served by the fallback
middleware.
"""
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get("/some.very_special~chars-here/")
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={"append": "django.contrib.flatpages"})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL="/accounts/login/",
MIDDLEWARE=[
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
],
ROOT_URLCONF="flatpages_tests.urls",
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
| FlatpageMiddlewareTests |
python | pytorch__pytorch | torch/ao/ns/_numeric_suite_fx.py | {
"start": 4438,
"end": 8590
} | class ____(nn.Module):
"""
Base class for capturing intermediate values.
"""
stats: list[torch.Tensor]
stats_rnn: list[RNNReturnType]
# Mark as impure so that calls to it will not be removed during DCE.
_is_impure = True
def __init__(
self,
ref_node_name: str,
prev_node_name: str,
model_name: str,
ref_name: str,
prev_node_target_type: str,
ref_node_target_type: str,
results_type: str,
index_within_arg: int,
index_of_arg: int,
fqn: str | None,
qconfig_str: str | None = "",
):
super().__init__()
self.stats: list[torch.Tensor] = []
self.stats_rnn: list[RNNReturnType] = []
# name of the node which was responsible for adding this logger
# Note:
# - if we are logging node outputs, this is the same as prev_node_name
# - if we are logging node inputs, this is the name of the node
# whose input this logger is logging.
#
# example, where logger1 is logging input of op1 and logger2 is logging
# the output of op1:
#
# x1 -> logger1 -> op1 -> logger2 -> x2
#
# in this example,
# - logger1's prev_node_name is x1 and ref_node_name is op1
# - logger2's prev_node_name is op1 and ref_node_name is op1
self.ref_node_name = ref_node_name
# name of the node whose output this Logger is capturing
self.prev_node_name = prev_node_name
# name of the model from which the node originated from
self.model_name = model_name
# reference name, used to match loggers from separate models
# to each other
self.ref_name = ref_name
# type of the target of the node whose output this logger is logging
self.prev_node_target_type = prev_node_target_type
# type of the target of the node which was responsible for adding this
# logger
self.ref_node_target_type = ref_node_target_type
# what kind of values are inside of stats
self.results_type = results_type
# index of this node within the arg of the input/output node
# for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
self.index_within_arg = index_within_arg
# index of this node within the args of the input/output node
# for example, in add(x1, x2), x2 would have index_of_arg == 1
self.index_of_arg = index_of_arg
# fully qualified name
self.fqn = fqn
# if loggers are added before prepare_fx, but we do not want
# collect results of calibration, only results after convert_fx
# so, we add a flag to control whether this logger collects data
self.enabled = True
# string representation of qconfig
self.qconfig_str = qconfig_str
# this can be turned off to reduce memory usage during calibration
self.save_activations = True
# Note: cannot annotate the type of x because TorchScript does not support
# the Union type.
def forward(self, x):
# fmt: off
"""
""" # blank docblock to make autodoc happy
# fmt: on
# TODO(future PR): consider designing this better, as the difference
# between these two flags is subtle and not obvious.
if not self.enabled:
return x
if not self.save_activations:
return x
# TODO(future PR): consider refactoring this to better reuse the parent
# class
if isinstance(x, torch.Tensor):
self.stats.append(x.detach())
elif isinstance(x, tuple) and len(x) == 2 and len(x[1]) == 2:
new_res = (x[0].detach(), (x[1][0].detach(), x[1][1].detach()))
self.stats_rnn.append(new_res)
return x
def __repr__(self):
clean_dict = {
k: v
for k, v in self.__dict__.items()
# skip nn.Module keys
if (k != "training") and not k.startswith("_")
}
return f"OutputLogger({clean_dict})"
| OutputLogger |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 74015,
"end": 74559
} | class ____:
"""Remove a global variable when hook is called"""
scope: dict[str, Any]
name: str
def __call__(self, *args: Any) -> None:
# Make sure we're not shutting down
if CleanupManager is not None:
CleanupManager.count -= 1
del self.scope[self.name]
@staticmethod
def create(scope: dict[str, Any], name: str, val: Any) -> CleanupHook:
assert name not in scope
CleanupManager.count += 1
scope[name] = val
return CleanupHook(scope, name)
| CleanupHook |
python | pypa__pip | src/pip/_internal/commands/uninstall.py | {
"start": 711,
"end": 3868
} | class ____(Command, SessionCommandMixin):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(
"-r",
"--requirement",
dest="requirements",
action="append",
default=[],
metavar="file",
help=(
"Uninstall all the packages listed in the given requirements "
"file. This option can be used multiple times."
),
)
self.cmd_opts.add_option(
"-y",
"--yes",
dest="yes",
action="store_true",
help="Don't ask for confirmation of uninstall deletions.",
)
self.cmd_opts.add_option(cmdoptions.root_user_action())
self.cmd_opts.add_option(cmdoptions.override_externally_managed())
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: list[str]) -> int:
session = self.get_default_session(options)
reqs_to_uninstall = {}
for name in args:
req = install_req_from_line(
name,
isolated=options.isolated_mode,
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
else:
logger.warning(
"Invalid requirement: %r ignored -"
" the uninstall command expects named"
" requirements.",
name,
)
for filename in options.requirements:
for parsed_req in parse_requirements(
filename, options=options, session=session
):
req = install_req_from_parsed_requirement(
parsed_req, isolated=options.isolated_mode
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
if not reqs_to_uninstall:
raise InstallationError(
f"You must give at least one requirement to {self.name} (see "
f'"pip help {self.name}")'
)
if not options.override_externally_managed:
check_externally_managed()
protect_pip_from_modification_on_windows(
modifying_pip="pip" in reqs_to_uninstall
)
for req in reqs_to_uninstall.values():
uninstall_pathset = req.uninstall(
auto_confirm=options.yes,
verbose=self.verbosity > 0,
)
if uninstall_pathset:
uninstall_pathset.commit()
if options.root_user_action == "warn":
warn_if_run_as_root()
return SUCCESS
| UninstallCommand |
python | squidfunk__mkdocs-material | material/plugins/group/config.py | {
"start": 1411,
"end": 1513
} | class ____(Config):
enabled = Type(bool, default = False)
plugins = Type((list, dict))
| GroupConfig |
python | mlflow__mlflow | dev/clint/src/clint/rules/forbidden_trace_ui_in_notebook.py | {
"start": 36,
"end": 422
} | class ____(Rule):
def _message(self) -> str:
return (
"Found the MLflow Trace UI iframe in the notebook. "
"The trace UI in cell outputs will not render correctly in previews or the website. "
"Please run `mlflow.tracing.disable_notebook_display()` and rerun the cell "
"to remove the iframe."
)
| ForbiddenTraceUIInNotebook |
python | kamyu104__LeetCode-Solutions | Python/sum-of-digits-in-base-k.py | {
"start": 32,
"end": 285
} | class ____(object):
def sumBase(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
result = 0
while n:
n, r = divmod(n, k)
result += r
return result
| Solution |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 55671,
"end": 56033
} | class ____(_PrintableStructure):
_fields_ = [
('engineId', c_uint),
('schedulerPolicy', c_uint),
('isEnabledARR', c_uint),
('schedulerParams', c_nvmlVgpuSchedulerParams_t),
('entriesCount', c_uint),
('logEntries', c_nvmlVgpuSchedulerLogEntry_t * NVML_SCHEDULER_SW_MAX_LOG_ENTRIES),
]
| c_nvmlVgpuSchedulerLog_t |
python | pypa__installer | src/installer/scripts.py | {
"start": 1154,
"end": 1272
} | class ____(ValueError):
"""Raised if the user provides incorrect script section or kind."""
@dataclass
| InvalidScript |
python | ray-project__ray | python/ray/data/_internal/block_batching/util.py | {
"start": 9700,
"end": 10480
} | class ____(BlockPrefetcher):
"""Block prefetcher using a local actor."""
def __init__(self):
self.prefetch_actor = self._get_or_create_actor_prefetcher()
@staticmethod
def _get_or_create_actor_prefetcher() -> "ActorHandle":
node_id = ray.get_runtime_context().get_node_id()
actor_name = f"dataset-block-prefetcher-{node_id}"
return _BlockPretcher.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(node_id, soft=False),
name=actor_name,
namespace=PREFETCHER_ACTOR_NAMESPACE,
get_if_exists=True,
).remote()
def prefetch_blocks(self, blocks: List[ObjectRef[Block]]):
self.prefetch_actor.prefetch.remote(*blocks)
@ray.remote(num_cpus=0)
| ActorBlockPrefetcher |
python | pypa__pip | src/pip/_vendor/packaging/_parser.py | {
"start": 1043,
"end": 10221
} | class ____(NamedTuple):
name: str
url: str
extras: list[str]
specifier: str
marker: MarkerList | None
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> tuple[str, str, MarkerList | None]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: list[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
retval = _parse_marker(tokenizer)
tokenizer.expect("END", expected="end of marker expression")
return retval
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if env_var in ("platform_python_implementation", "python_implementation"):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| ParsedRequirement |
python | pytorch__pytorch | test/ao/sparsity/test_structured_sparsifier.py | {
"start": 905,
"end": 1084
} | class ____(BaseStructuredSparsifier):
def update_mask(self, module, tensor_name, **kwargs):
getattr(module.parametrizations, tensor_name)[0].mask[1] = False
| SimplePruner |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modeling_lfm2_moe.py | {
"start": 8522,
"end": 10438
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.top_k = config.num_experts_per_tok
self.routed_scaling_factor = config.routed_scaling_factor
self.norm_topk_prob = config.norm_topk_prob
self.use_expert_bias = config.use_expert_bias
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = Lfm2MoeExperts(config)
if self.use_expert_bias:
self.register_buffer("expert_bias", torch.zeros(config.num_experts, dtype=torch.float32))
def route_tokens_to_experts(self, router_logits):
routing_weights = router_logits.sigmoid()
if self.use_expert_bias:
scores_for_routing = routing_weights + self.expert_bias
_, selected_experts = torch.topk(scores_for_routing, k=self.top_k, dim=-1)
routing_weights = torch.gather(routing_weights, dim=1, index=selected_experts).type_as(router_logits)
else:
routing_weights, selected_experts = torch.topk(routing_weights, k=self.top_k, dim=-1)
if self.norm_topk_prob:
routing_weights = routing_weights / (routing_weights.sum(dim=-1, keepdim=True) + 1e-6)
routing_weights = routing_weights * self.routed_scaling_factor
return selected_experts, routing_weights
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states_reshaped = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states_reshaped)
selected_experts, routing_weights = self.route_tokens_to_experts(router_logits)
final_hidden_states = self.experts(hidden_states_reshaped, selected_experts, routing_weights)
return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
| Lfm2MoeSparseMoeBlock |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass6.py | {
"start": 827,
"end": 1085
} | class ____:
prop_1: str
prop_2: str
prop_3: str = field(default="")
prop_4: str = field(init=False)
prop_5: str = field(init=False)
def __post_init__(self):
cprop_1 = "calculated value"
cprop_2 = "calculated value"
| ClassB |
python | huggingface__transformers | src/transformers/models/kosmos2/modeling_kosmos2.py | {
"start": 18192,
"end": 22246
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Kosmos2VisionEncoderLayer`].
Args:
config: Kosmos2VisionConfig
"""
def __init__(self, config: Kosmos2VisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Similar to `transformers.models.clip.modeling_clip.CLIPVisionTransformer` but without docstring for `forward`
| Kosmos2VisionEncoder |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/network/session.py | {
"start": 10261,
"end": 18741
} | class ____(requests.Session):
timeout: Optional[int] = None
def __init__(
self,
*args: Any,
retries: int = 0,
cache: Optional[str] = None,
trusted_hosts: Sequence[str] = (),
index_urls: Optional[List[str]] = None,
ssl_context: Optional["SSLContext"] = None,
**kwargs: Any,
) -> None:
"""
:param trusted_hosts: Domains not to emit warnings for when not using
HTTPS.
"""
super().__init__(*args, **kwargs)
# Namespace the attribute with "pip_" just in case to prevent
# possible conflicts with the base class.
self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth(index_urls=index_urls)
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
# A 500 may indicate transient error in Amazon S3
# A 502 may be a transient error from a CDN like CloudFlare or CloudFront
# A 520 or 527 - may indicate transient error in CloudFlare
status_forcelist=[500, 502, 503, 520, 527],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
) # type: ignore
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching so we'll use it for all http:// URLs.
# If caching is disabled, we will also use it for
# https:// hosts that we've marked as ignoring
# TLS errors for (trusted-hosts).
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
# We want to _only_ cache responses on securely fetched origins or when
# the host is specified as trusted. We do this because
# we can't validate the response of an insecurely/untrusted fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
ssl_context=ssl_context,
)
self._trusted_host_adapter = InsecureCacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
self._trusted_host_adapter = insecure_adapter
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
for host in trusted_hosts:
self.add_trusted_host(host, suppress_logging=True)
def update_index_urls(self, new_index_urls: List[str]) -> None:
"""
:param new_index_urls: New index urls to update the authentication
handler with.
"""
self.auth.index_urls = new_index_urls
def add_trusted_host(
self, host: str, source: Optional[str] = None, suppress_logging: bool = False
) -> None:
"""
:param host: It is okay to provide a host that has previously been
added.
:param source: An optional source string, for logging where the host
string came from.
"""
if not suppress_logging:
msg = f"adding trusted host: {host!r}"
if source is not None:
msg += f" (from {source})"
logger.info(msg)
parsed_host, parsed_port = parse_netloc(host)
if parsed_host is None:
raise ValueError(f"Trusted host URL must include a host part: {host!r}")
if (parsed_host, parsed_port) not in self.pip_trusted_origins:
self.pip_trusted_origins.append((parsed_host, parsed_port))
self.mount(
build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
)
self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
if not parsed_port:
self.mount(
build_url_from_netloc(host, scheme="http") + ":",
self._trusted_host_adapter,
)
# Mount wildcard ports for the same host.
self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
yield from SECURE_ORIGINS
for host, port in self.pip_trusted_origins:
yield ("*", host, "*" if port is None else port)
def is_secure_origin(self, location: Link) -> bool:
# Determine if this url used a secure transport mechanism
parsed = urllib.parse.urlparse(str(location))
origin_protocol, origin_host, origin_port = (
parsed.scheme,
parsed.hostname,
parsed.port,
)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
origin_protocol = origin_protocol.rsplit("+", 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in self.iter_secure_origins():
secure_protocol, secure_host, secure_port = secure_origin
if origin_protocol != secure_protocol and secure_protocol != "*":
continue
try:
addr = ipaddress.ip_address(origin_host or "")
network = ipaddress.ip_network(secure_host)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (
origin_host
and origin_host.lower() != secure_host.lower()
and secure_host != "*"
):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port matches.
if (
origin_port != secure_port
and secure_port != "*"
and secure_port is not None
):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
origin_host,
origin_host,
)
return False
def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Allow setting a default proxies on a session
kwargs.setdefault("proxies", self.proxies)
# Dispatch the actual request
return super().request(method, url, *args, **kwargs)
| PipSession |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py | {
"start": 1540,
"end": 2839
} | class ____(Benchmark):
r"""
Yao-Liu 9 objective function.
This class defines the Yao-Liu [1]_ function 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu09}}(x) = \sum_{i=1}^n \left [ x_i^2
- 10 \cos(2 \pi x_i ) + 10 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Yao X., Liu Y. (1997) Fast evolution strategies.
In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)
Evolutionary Programming VI. EP 1997.
Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg
.. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)
| YaoLiu09 |
python | apache__airflow | airflow-core/tests/unit/jobs/test_triggerer_job.py | {
"start": 43394,
"end": 46522
} | class ____:
def test_message_types_in_triggerer(self):
"""
Test that ToSupervisor is a superset of ToTriggerSupervisor and ToTask is a superset of ToTriggerRunner.
This test ensures that when new message types are added to ToSupervisor or ToTask,
they are also properly handled in ToTriggerSupervisor and ToTriggerSupervisor.
"""
def get_type_names(union_type):
union_args = typing.get_args(union_type.__args__[0])
return {arg.__name__ for arg in union_args}
supervisor_types = get_type_names(ToSupervisor)
task_types = get_type_names(ToTask)
trigger_supervisor_types = get_type_names(ToTriggerSupervisor)
trigger_runner_types = get_type_names(ToTriggerRunner)
in_supervisor_but_not_in_trigger_supervisor = {
"DeferTask",
"GetAssetByName",
"GetAssetByUri",
"GetAssetEventByAsset",
"GetAssetEventByAssetAlias",
"GetPrevSuccessfulDagRun",
"GetPreviousDagRun",
"GetTaskBreadcrumbs",
"GetTaskRescheduleStartDate",
"GetXComCount",
"GetXComSequenceItem",
"GetXComSequenceSlice",
"RescheduleTask",
"RetryTask",
"SetRenderedFields",
"SkipDownstreamTasks",
"SucceedTask",
"ValidateInletsAndOutlets",
"TaskState",
"TriggerDagRun",
"ResendLoggingFD",
"CreateHITLDetailPayload",
"SetRenderedMapIndex",
}
in_task_but_not_in_trigger_runner = {
"AssetResult",
"AssetEventsResult",
"SentFDs",
"StartupDetails",
"TaskBreadcrumbsResult",
"TaskRescheduleStartDate",
"InactiveAssetsResult",
"CreateHITLDetailPayload",
"PrevSuccessfulDagRunResult",
"XComCountResponse",
"XComSequenceIndexResult",
"XComSequenceSliceResult",
"PreviousDagRunResult",
"HITLDetailRequestResult",
}
supervisor_diff = (
supervisor_types - trigger_supervisor_types - in_supervisor_but_not_in_trigger_supervisor
)
task_diff = task_types - trigger_runner_types - in_task_but_not_in_trigger_runner
assert not supervisor_diff, (
f"New message types in ToSupervisor not handled in ToTriggerSupervisor: "
f"{len(supervisor_diff)} types found:\n"
+ "\n".join(f" - {t}" for t in sorted(supervisor_diff))
+ "\n\nEither handle these types in ToTriggerSupervisor or update in_supervisor_but_not_in_trigger_supervisor list."
)
assert not task_diff, (
f"New message types in ToTask not handled in ToTriggerRunner: "
f"{len(task_diff)} types found:\n"
+ "\n".join(f" - {t}" for t in sorted(task_diff))
+ "\n\nEither handle these types in ToTriggerRunner or update in_task_but_not_in_trigger_runner list."
)
| TestTriggererMessageTypes |
python | python-poetry__poetry | src/poetry/puzzle/provider.py | {
"start": 2259,
"end": 3138
} | class ____(Exception):
"""
Exception when there are duplicate dependencies with incompatible constraints.
"""
def __init__(
self, package: Package, *dependencies: Dependency, with_sources: bool = False
) -> None:
constraints = []
for dep in dependencies:
constraint = dep.to_pep_508()
if dep.is_direct_origin():
# add version info because issue might be a version conflict
# with a version constraint
constraint += f" ({dep.constraint})"
if with_sources and dep.source_name:
constraint += f" ; source={dep.source_name}"
constraints.append(constraint)
super().__init__(
f"Incompatible constraints in requirements of {package}:\n"
+ "\n".join(constraints)
)
| IncompatibleConstraintsError |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 181724,
"end": 182370
} | class ____:
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.expon.fit, x)
| TestExpon |
python | getsentry__sentry | src/sentry/models/deploy.py | {
"start": 464,
"end": 3549
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
release = FlexibleForeignKey("sentry.Release")
environment_id = BoundedPositiveIntegerField(db_index=True)
date_finished = models.DateTimeField(default=timezone.now, db_index=True)
date_started = models.DateTimeField(null=True, blank=True)
name = models.CharField(max_length=64, null=True, blank=True)
url = models.URLField(null=True, blank=True)
notified = models.BooleanField(null=True, db_index=True, default=False)
class Meta:
app_label = "sentry"
db_table = "sentry_deploy"
@staticmethod
def get_lock_key(deploy_id):
return "deploy-notify:%s" % deploy_id
@classmethod
def notify_if_ready(cls, deploy_id, fetch_complete=False):
"""
create activity and send deploy notifications
if they haven't been sent
"""
from sentry.models.activity import Activity
from sentry.models.releasecommit import ReleaseCommit
from sentry.models.releaseheadcommit import ReleaseHeadCommit
lock_key = cls.get_lock_key(deploy_id)
lock = locks.get(lock_key, duration=30, name="deploy_notify")
with TimedRetryPolicy(10)(lock.acquire):
deploy = cls.objects.filter(id=deploy_id).select_related("release").get()
if deploy.notified:
return
release = deploy.release
environment = Environment.objects.get(
organization_id=deploy.organization_id, id=deploy.environment_id
)
if not fetch_complete:
release_has_commits = ReleaseCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists()
if not release_has_commits:
# check if we have head commits, which
# would indicate that we're waiting for
# fetch_commits to complete
if ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists():
return
activity = None
for project in deploy.release.projects.all():
activity = Activity.objects.create(
type=ActivityType.DEPLOY.value,
project=project,
ident=Activity.get_version_ident(release.version),
data={
"version": release.version,
"deploy_id": deploy.id,
"environment": environment.name,
},
datetime=deploy.date_finished,
)
# Somewhat hacky, only send notification for one
# Deploy Activity record because it will cover all projects
if activity is not None:
activity.send_notification()
deploy.update(notified=True)
| Deploy |
python | doocs__leetcode | solution/2400-2499/2453.Destroy Sequential Targets/Solution.py | {
"start": 0,
"end": 318
} | class ____:
def destroyTargets(self, nums: List[int], space: int) -> int:
cnt = Counter(v % space for v in nums)
ans = mx = 0
for v in nums:
t = cnt[v % space]
if t > mx or (t == mx and v < ans):
ans = v
mx = t
return ans
| Solution |
python | pytorch__pytorch | test/test_maskedtensor.py | {
"start": 21883,
"end": 35072
} | class ____(TestCase):
def test_max_not_implemented(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m)
with self.assertRaisesRegex(TypeError, "torch._ops.aten.max.default"):
mt.max()
def test_sum(self):
d = torch.tensor([[0, 1, 2, 6], [3, 4, 5.0, 7]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(17.0), torch.tensor(True)), mt.sum())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 13]),
torch.tensor([True, True, False, True]),
),
mt.sum(dim=0),
)
def test_sum_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor(1.0).expand_as(m), m))
def test_mean(self):
d = torch.tensor([[0, 1, 3, 2], [3, 4, 1.0, 4]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(2.5), torch.tensor(True)), mt.mean())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.mean(dim=0),
)
"""
The following block of tests "test_mean_grad_case_1[a through e] are used to test the functionality of
the two different ways of constructing MaskedTensors:
masked_tensor(data, mask, requires_grad=True/False) -- NO differentiable constructor and always a leaf
as_masked_tensor(data, mask) -- differentiable constructor
Like torch.tensor(data), masked_tensor(data, mask) will provide a UserWarning if data.requires_grad=True
as_masked_tensor does not take in requires_grad -- it just takes on the requires_grad from data
Therefore, there are 6 cases to test and we use `mean` as a proxy to test the different combinations
Assuming mt.mean().backward() is run after each constructor:
Case 1a:
values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
yields
- Provide a UserWarning because values.requires_grad=True
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 1b:
values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
yields
- values.grad = None
- mt.grad is a MaskedTensor with the correct gradient
Case 2a/2b:
values.requires_grad = True/False
mt = masked_tensor(values, mask, requires_grad=False)
will both yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected. When values.requires_grad=True, we will also get a UserWarning
Case 3a:
values.requires_grad = True
mt = as_masked_tensor(values, mask)
yields
- values.grad is a MaskedTensor with the correct gradient
- mt.grad is None and gives a UserWarning that
"The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
Case 3b:
values.requires_grad = False
mt = as_masked_tensor(values, mask)
will yield a RuntimeError of "element 0 of tensors does not require grad and does not have a grad_fn"
as expected.
"""
def test_mean_grad_case_1a(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1b(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=True)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean().backward()
self.assertIsNone(d.grad)
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
def test_mean_grad_case_1c(self):
""" values.requires_grad = True
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
with self.assertWarnsRegex(UserWarning, "It is not recommended to create a MaskedTensor"):
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1d(self):
""" values.requires_grad = False
mt = masked_tensor(values, mask, requires_grad=False)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=False)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_grad_case_1e(self):
""" values.requires_grad = True
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]], requires_grad=True)
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
mt.mean().backward()
_compare_mts(d.grad, masked_tensor(torch.tensor([[0.5, 0, 0], [0, 0.5, 0]]), m))
msg = "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad"
with self.assertWarnsRegex(UserWarning, msg):
self.assertIsNone(mt.grad)
def test_mean_grad_case_1f(self):
""" values.requires_grad = False
mt = as_masked_tensor(values, mask)
"""
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = as_masked_tensor(d, m)
result = mt.mean()
msg = "element 0 of tensors does not require grad and does not have a grad_fn"
with self.assertRaisesRegex(RuntimeError, msg):
result.backward()
def test_mean_dim_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, True, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.mean(1).sum().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.5, 0.5, 0], [0, 1, 0]]), m))
def test_amax(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(3.0), torch.tensor(True)), mt.amax())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, 3]),
torch.tensor([True, True, False, True]),
),
mt.amax(dim=0),
)
def test_amax_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amax().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[0.0, 0, 0], [0, 1, 0]]), m))
def test_amin(self):
d = torch.tensor([[0, 1, 3, -3], [3, -4, 1.0, 3]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(-4.0), torch.tensor(True)), mt.amin())
_compare_mts(
masked_tensor(
torch.tensor([0.0, -4.0, 1.0, -3]),
torch.tensor([True, True, False, True]),
),
mt.amin(dim=0),
)
def test_amin_grad(self):
d = torch.tensor([[0, 1, 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.amin().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[1.0, 0, 0], [0, 0, 0]]), m))
def test_prod(self):
d = torch.tensor([[0, 1, 3, 0.0], [float("nan"), 4, 1.0, 5.0]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(0.0), torch.tensor(True)), mt.prod())
_compare_mts(
masked_tensor(
torch.tensor([0.0, 4.0, 1.0, 0.0]),
torch.tensor([True, True, False, True]),
),
mt.prod(dim=0),
)
def test_prod_grad(self):
d = torch.tensor([[2, float("nan"), 2], [3, 4, 5.0]])
m = torch.tensor([[True, False, False], [False, True, False]])
mt = masked_tensor(d, m, requires_grad=True)
mt.prod().backward()
_compare_mts(mt.grad, masked_tensor(torch.tensor([[4.0, 0, 0], [0, 2, 0]]), m))
def test_all(self):
d = torch.tensor([[True, True, False, False], [False, True, True, True]])
m = torch.tensor([[True, False, False, True], [False, True, False, True]])
mt = masked_tensor(d, m)
_compare_mts(masked_tensor(torch.tensor(False), torch.tensor(True)), mt.all())
_compare_mts(
masked_tensor(
torch.tensor([True, True, True, False]),
torch.tensor([True, True, False, True]),
),
mt.all(dim=0),
)
m = torch.tensor([[True, False, True, False], [False, True, False, False]])
mt = masked_tensor(d, m)
_compare_mts(
masked_tensor(
torch.tensor([True, True, False, True]),
torch.tensor([True, True, True, False]),
),
mt.all(dim=0),
)
def test_grad_dtype(self):
d = torch.tensor([[True, True, False], [False, True, True]])
m = torch.tensor([[True, False, False], [False, True, False]])
msg = "Only Tensors of floating point and complex dtype can require gradients"
with self.assertRaisesRegex(RuntimeError, msg):
masked_tensor(d, m, requires_grad=True)
def test_any_true_dtype(self):
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5
)
msg = "expected a boolean tensor"
with self.assertRaisesRegex(ValueError, msg):
mt._is_any_true()
def test__is_any_true(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[True, False, False], [False, True, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(True), torch.tensor(True)),
mt._is_any_true(),
)
def test__is_any_true_false(self):
mt = torch.masked.MaskedTensor(
torch.tensor([[True, True, False], [False, False, True]]),
torch.tensor([[False, False, False], [False, False, False]]),
)
_compare_mts(
masked_tensor(torch.tensor(False), torch.tensor(True),),
mt._is_any_true(),
)
def test_backward(self):
# See https://github.com/pytorch/pytorch/issues/128557
with torch.autograd.detect_anomaly():
mt = torch.masked.MaskedTensor(
torch.rand(2, 2),
torch.rand(2, 2) > 0.5,
requires_grad=True
)
mt.sum().backward()
def is_unary(op):
return op.name in UNARY_NAMES
def is_binary(op):
return op.name in BINARY_NAMES
def is_reduction(op):
return op.name in REDUCE_NAMES and op.name not in {"all", "mean", "std", "var"}
mt_unary_ufuncs = [op for op in unary_ufuncs if is_unary(op)]
mt_binary_ufuncs = [op for op in binary_ufuncs if is_binary(op)]
mt_reduction_ufuncs = [op for op in reduction_ops if is_reduction(op)]
MASKEDTENSOR_FLOAT_TYPES = {
torch.float16,
torch.float32,
torch.float64,
}
| TestReductions |
python | imageio__imageio | tests/test_grab.py | {
"start": 855,
"end": 2603
} | class ____:
has_clipboard = True
@classmethod
def grab(cls):
return np.zeros((8, 8, 3), np.uint8)
@classmethod
def grabclipboard(cls):
if cls.has_clipboard:
return np.zeros((9, 9, 3), np.uint8)
else:
return None
def test_grab_simulated():
# Hard to test for real, if only because its only fully suppored on
# Windows, but we can monkey patch so we can test all the imageio bits.
imageio.plugins.grab.BaseGrabFormat._ImageGrab = FakeImageGrab
imageio.plugins.grab.BaseGrabFormat._pillow_imported = True
_plat = sys.platform
sys.platform = "win32"
try:
im = iio.imread("<screen>")
assert im.shape == (8, 8, 3)
reader = iio.get_reader("<screen>")
im1 = reader.get_data(0)
im2 = reader.get_data(0)
im3 = reader.get_data(1)
assert im1.shape == (8, 8, 3)
assert im2.shape == (8, 8, 3)
assert im3.shape == (8, 8, 3)
im = iio.imread("<clipboard>")
assert im.shape == (9, 9, 3)
reader = iio.get_reader("<clipboard>")
im1 = reader.get_data(0)
im2 = reader.get_data(0)
im3 = reader.get_data(1)
assert im1.shape == (9, 9, 3)
assert im2.shape == (9, 9, 3)
assert im3.shape == (9, 9, 3)
# Grabbing from clipboard can fail if there is no image data to grab
FakeImageGrab.has_clipboard = False
with raises(RuntimeError):
im = iio.imread("<clipboard>")
finally:
sys.platform = _plat
imageio.plugins.grab.BaseGrabFormat._ImageGrab = None
imageio.plugins.grab.BaseGrabFormat._pillow_imported = False
FakeImageGrab.has_clipboard = True
| FakeImageGrab |
python | cython__cython | docs/examples/tutorial/pure/cclass.py | {
"start": 31,
"end": 398
} | class ____:
cython.declare(a=cython.int, b=cython.int)
c = cython.declare(cython.int, visibility='public')
d = cython.declare(cython.int) # private by default.
e = cython.declare(cython.int, visibility='readonly')
def __init__(self, a, b, c, d=5, e=3):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
| A |
python | openai__openai-python | src/openai/types/moderation.py | {
"start": 3133,
"end": 4983
} | class ____(BaseModel):
harassment: List[Literal["text"]]
"""The applied input type(s) for the category 'harassment'."""
harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening")
"""The applied input type(s) for the category 'harassment/threatening'."""
hate: List[Literal["text"]]
"""The applied input type(s) for the category 'hate'."""
hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening")
"""The applied input type(s) for the category 'hate/threatening'."""
illicit: List[Literal["text"]]
"""The applied input type(s) for the category 'illicit'."""
illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent")
"""The applied input type(s) for the category 'illicit/violent'."""
self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm")
"""The applied input type(s) for the category 'self-harm'."""
self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions")
"""The applied input type(s) for the category 'self-harm/instructions'."""
self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent")
"""The applied input type(s) for the category 'self-harm/intent'."""
sexual: List[Literal["text", "image"]]
"""The applied input type(s) for the category 'sexual'."""
sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors")
"""The applied input type(s) for the category 'sexual/minors'."""
violence: List[Literal["text", "image"]]
"""The applied input type(s) for the category 'violence'."""
violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic")
"""The applied input type(s) for the category 'violence/graphic'."""
| CategoryAppliedInputTypes |
python | apache__thrift | lib/py/src/transport/TTransport.py | {
"start": 3352,
"end": 3560
} | class ____(object):
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
| TBufferedTransportFactory |
python | astropy__astropy | astropy/units/tests/test_format.py | {
"start": 13896,
"end": 39498
} | class ____(RoundtripBase):
format_ = u_format.OGIP
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.OGIP._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
ids=str,
)
def test_roundtrip(self, unit):
if str(unit) == "0.001 Crab":
# Special-case mCrab, which the default check does not recognize
# as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ("mag", "byte", "Crab"):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match="power of 10")
elif str(unit) == "0.001 Crab":
ctx = pytest.warns(UnitsWarning, match="deprecated")
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize(
"unit_formatter_class,n_units",
[(u_format.FITS, 766), (u_format.VOUnit, 1304), (u_format.CDS, 3326)],
)
def test_units_available(unit_formatter_class, n_units):
assert len(unit_formatter_class._units) == n_units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string("latex") == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_new_style_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert f"{fluxunit:latex}" == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_latex_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex = r"$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$"
assert fluxunit.to_string("latex") == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex_inline = r"$\mathrm{1 \times 10^{-24}\,erg\,Hz^{-1}\,s^{-1}\,cm^{-2}}$"
assert fluxunit.to_string("latex_inline") == latex_inline
@pytest.mark.parametrize(
"format_spec, string, decomposed",
[
("generic", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("s", "erg / (Angstrom s cm2)", "1e+07 kg / (m s3)"),
("console", "erg Angstrom^-1 s^-1 cm^-2", "10000000 kg m^-1 s^-3"),
(
"latex",
r"$\mathrm{\frac{erg}{\mathring{A}\,s\,cm^{2}}}$",
r"$\mathrm{10000000\,\frac{kg}{m\,s^{3}}}$",
),
(
"latex_inline",
r"$\mathrm{erg\,\mathring{A}^{-1}\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{10000000\,kg\,m^{-1}\,s^{-3}}$",
),
("unicode", "erg Å⁻¹ s⁻¹ cm⁻²", "10000000 kg m⁻¹ s⁻³"),
(">25s", " erg / (Angstrom s cm2)", " 1e+07 kg / (m s3)"),
("cds", "erg.Angstrom-1.s-1.cm-2", "10000000kg.m-1.s-3"),
("ogip", "erg / (angstrom s cm**2)", "1e+07 kg / (m s**3)"),
("fits", "erg Angstrom-1 s-1 cm-2", "10**7 kg m-1 s-3"),
("vounit", "erg.Angstrom**-1.s**-1.cm**-2", "10000000kg.m**-1.s**-3"),
# TODO: make fits and vounit less awful!
],
)
def test_format_styles(format_spec, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s * u.Angstrom)
if format_spec == "vounit":
# erg and Angstrom are deprecated in vounit.
with pytest.warns(UnitsWarning, match="deprecated"):
formatted = format(fluxunit, format_spec)
else:
formatted = format(fluxunit, format_spec)
assert formatted == string
# Decomposed mostly to test that scale factors are dealt with properly
# in the various formats.
assert format(fluxunit.decompose(), format_spec) == decomposed
@pytest.mark.parametrize(
"format_spec, fraction, string, decomposed",
[
("generic", False, "erg s-1 cm-2", "0.001 kg s-3"),
(
"console",
"multiline",
" erg \n------\ns cm^2",
" kg \n0.001 ---\n s^3",
),
("console", "inline", "erg / (s cm^2)", "0.001 kg / s^3"),
("unicode", "multiline", " erg \n─────\ns cm²", " kg\n0.001 ──\n s³"),
("unicode", "inline", "erg / (s cm²)", "0.001 kg / s³"),
(
"latex",
False,
r"$\mathrm{erg\,s^{-1}\,cm^{-2}}$",
r"$\mathrm{0.001\,kg\,s^{-3}}$",
),
(
"latex",
"inline",
r"$\mathrm{erg / (s\,cm^{2})}$",
r"$\mathrm{0.001\,kg / s^{3}}$",
),
# TODO: make generic with fraction=False less awful!
],
)
def test_format_styles_non_default_fraction(format_spec, fraction, string, decomposed):
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string(format_spec, fraction=fraction) == string
assert fluxunit.decompose().to_string(format_spec, fraction=fraction) == decomposed
@pytest.mark.parametrize("format_spec", u_format.Base.registry)
def test_multiline_fraction_different_if_available(format_spec):
fluxunit = u.W / u.m**2
inline_format = fluxunit.to_string(format_spec, fraction="inline")
if format_spec in ["generic", "cds", "fits", "ogip", "vounit"]:
with pytest.warns(UnitsWarning, match="does not support multiline"):
multiline_format = fluxunit.to_string(format_spec, fraction="multiline")
assert multiline_format == inline_format
else:
multiline_format = fluxunit.to_string(format_spec, fraction="multiline")
assert multiline_format != inline_format
@pytest.mark.parametrize("format_spec", u_format.Base.registry)
def test_unknown_fraction_style(format_spec):
fluxunit = u.W / u.m**2
msg = r"^fraction can only be False, 'inline', or 'multiline', not 'parrot'\.$"
with pytest.raises(ValueError, match=msg):
fluxunit.to_string(format_spec, fraction="parrot")
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string("fits") == "erg Hz-1"
myunit2 = myunit * u.bit**3
assert myunit2.to_string("fits") == "bit3 erg Hz-1"
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string("fits")
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string("console")
@pytest.mark.parametrize(
"test_pair",
list_format_string_pairs(
("generic", "10"),
("console", "10"),
("unicode", "10"),
("cds", "10"),
("latex", r"$\mathrm{10}$"),
),
ids=lambda x: x.format,
)
def test_scale_only(test_pair: FormatStringPair):
assert u.Unit(10).to_string(test_pair.format) == test_pair.string
def test_flexible_float():
assert u.min._represents.to_string("latex") == r"$\mathrm{60\,s}$"
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match="unit argument must be"):
u_format.FITS.to_string(None)
def test_fraction_repr():
area = u.cm**2.0
assert "." not in area.to_string("latex")
fractional = u.cm**2.5
assert "5/2" in fractional.to_string("latex")
assert fractional.to_string("unicode") == "cm⁵⸍²"
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3.0 * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit("%") == u.percent == u.Unit(0.01)
assert u.Unit("%", format="cds") == u.Unit(0.01)
assert u.Unit(0.01).to_string("cds") == "%"
with pytest.raises(ValueError):
u.Unit("%", format="fits")
with pytest.raises(ValueError):
u.Unit("%", format="vounit")
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit("0.1") == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit("1.e-4") == u.Unit(1.0e-4)
assert u.Unit("10-4", format="cds") == u.Unit(1.0e-4)
assert u.Unit("10+8").to_string("cds") == "10+8"
with pytest.raises(ValueError):
u.Unit(0.15).to_string("fits")
assert u.Unit(0.1).to_string("fits") == "10**-1"
with pytest.raises(ValueError):
u.Unit(0.1).to_string("vounit")
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit("ANGSTROM", format="fits")
assert "Did you mean Angstrom or angstrom?" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit("crab", format="ogip")
assert "Crab (deprecated)" in str(exc_info.value)
assert "mCrab (deprecated)" in str(exc_info.value)
with pytest.raises(
ValueError,
match=(
r"Did you mean 0\.1nm, Angstrom \(deprecated\) or angstrom \(deprecated\)\?"
),
):
u.Unit("ANGSTROM", format="vounit")
with pytest.warns(UnitsWarning, match=r".* 0\.1nm\.") as w:
u.Unit("angstrom", format="vounit")
assert len(w) == 1
def test_invalid_deprecated_units_handling():
with pytest.raises(
ValueError,
match=(
r"^invalid deprecation handling option: 'ignore'\. Valid options are "
r"'silent', 'warn', 'raise', 'convert'\.$"
),
):
u.erg.to_string(format="vounit", deprecations="ignore")
@pytest.mark.parametrize(
"unit,string",
[
pytest.param(u.erg, "cm**2.g.s**-2", id="simple unit"),
pytest.param(u.erg / u.s, "cm**2.g.s**-3", id="composite unit"),
],
)
def test_deprecated_units_conversion_success(unit, string):
assert unit.to_string(format="vounit", deprecations="convert") == string
def test_deprecated_units_conversion_failure():
Crab = u_format.OGIP._units["Crab"]
with pytest.warns(
UnitsWarning,
match=(
r"^The unit 'Crab' has been deprecated in the OGIP standard\. "
r"It cannot be automatically converted\.$"
),
):
assert Crab.to_string(format="ogip", deprecations="convert") == "Crab"
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
u_format.FITS().parse(string)
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
# ct, dex also raise warnings - irrelevant here.
warnings.simplefilter("ignore")
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
assert u.Unit("KiB", format="vounit") == u.Unit("1024 B")
assert u.Unit("Kibyte", format="vounit") == u.Unit("1024 B")
assert u.Unit("Kibit", format="vounit") == u.Unit("128 B")
with pytest.raises(ValueError, match="not supported by the VOUnit standard"):
u.Unit("kibibyte", format="vounit")
def test_vounit_unknown():
assert u.Unit("unknown", format="vounit") is None
assert u.Unit("UNKNOWN", format="vounit") is None
assert u.Unit("", format="vounit") is u.dimensionless_unscaled
def test_vounit_details():
assert u.Unit("Pa", format="vounit") is u.Pascal
assert u.Unit("ka", format="vounit") == u.Unit("1000 yr")
assert u.Unit("pix", format="vounit") == u.Unit("pixel", format="vounit")
# Regression test for astropy/astroquery#2480
assert u.Unit("Sun", format="vounit") is u.Sun
# Test that adding a prefix to a simple units raises a warning
with pytest.warns(
UnitsWarning, match="Unit 'kdB' not supported by the VOUnit standard.*"
):
u.Unit("kdB", format="vounit", parse_strict="warn")
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string("vounit") == "10m"
assert u.Unit("dam dag").to_string("vounit") == "100g.m"
# Parse round-trip
with pytest.warns(UnitsWarning, match="deprecated"):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == "erg.Angstrom**-1.s**-1.cm**-2"
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize(
"unit, vounit, number, scale, voscale",
[
("nm", "nm", 0.1, "10^-1", "0.1"),
("fm", "fm", 100.0, "10+2", "100"),
("m^2", "m**2", 100.0, "100.0", "100"),
("cm", "cm", 2.54, "2.54", "2.54"),
("kg", "kg", 1.898124597e27, "1.898124597E27", "1.8981246e+27"),
("m/s", "m.s**-1", 299792458.0, "299792458", "2.9979246e+08"),
("cm2", "cm**2", 1.0e-20, "10^(-20)", "1e-20"),
],
)
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f"{scale} {unit}")
assert x == number * u.Unit(unit)
assert x.to_string(format="vounit") == voscale + vounit
@pytest.mark.parametrize(
"unit, vounit",
[
("m s^-1", "m/s"),
("s^-1", "1/s"),
("100 s^-2", "100/s**2"),
("kg m-1 s-2", "kg/(m.s**2)"),
],
)
@pytest.mark.parametrize("fraction", [True, "inline"])
def test_vounit_fraction(unit, vounit, fraction):
x = u.Unit(unit)
assert x.to_string(format="vounit", fraction=fraction) == vounit
@pytest.mark.parametrize(
"unit, vounit",
[
("m^2", "m**2"),
("s^-1", "s**-1"),
("s(0.333)", "s**(0.333)"),
("s(-0.333)", "s**(-0.333)"),
("s(1/3)", "s**(1/3)"),
("s(-1/3)", "s**(-1/3)"),
],
)
def test_vounit_power(unit, vounit):
x = u.Unit(unit)
assert x.to_string(format="vounit") == vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format="vounit")
x_vounit = x.to_string("vounit")
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format="vounit")
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string("vounit")
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == "m mfoo"
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit", parse_strict="warn")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == "urlong"
assert len(w) == 2
assert "furlong" in str(w[0].message)
assert "week" in str(w[1].message)
@pytest.mark.parametrize(
"scale, number, string",
[
("10+2", 100, "10**2"),
("10(+2)", 100, "10**2"),
("10**+2", 100, "10**2"),
("10**(+2)", 100, "10**2"),
("10^+2", 100, "10**2"),
("10^(+2)", 100, "10**2"),
("10**2", 100, "10**2"),
("10**(2)", 100, "10**2"),
("10^2", 100, "10**2"),
("10^(2)", 100, "10**2"),
("10-20", 10 ** (-20), "10**-20"),
("10(-20)", 10 ** (-20), "10**-20"),
("10**-20", 10 ** (-20), "10**-20"),
("10**(-20)", 10 ** (-20), "10**-20"),
("10^-20", 10 ** (-20), "10**-20"),
("10^(-20)", 10 ** (-20), "10**-20"),
],
)
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + " erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
x = u.Unit(scale + "*erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " erg Angstrom-1 s-1 cm-2"
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit("1000 erg/(s cm**2 Angstrom)", format="fits")
with pytest.raises(ValueError):
x = u.Unit("12 erg/(s cm**2 Angstrom)", format="fits")
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format="fits")
x = u.Unit(100.0 * u.erg)
assert x.to_string(format="fits") == "10**2 erg"
@pytest.mark.parametrize(
"unit, latex, unicode",
[
(u.deg, r"$\mathrm{{}^{\circ}}$", "°"),
(u.deg**2, r"$\mathrm{deg^{2}}$", "deg²"),
(u.arcmin, r"$\mathrm{{}^{\prime}}$", "′"),
(u.arcmin**2, r"$\mathrm{arcmin^{2}}$", "arcmin²"),
(u.arcsec, r"$\mathrm{{}^{\prime\prime}}$", "″"),
(u.arcsec**2, r"$\mathrm{arcsec^{2}}$", "arcsec²"),
(u.hourangle, r"$\mathrm{{}^{h}}$", "ʰ"),
(u.hourangle**2, r"$\mathrm{hourangle^{2}}$", "hourangle²"),
(u.electron, r"$\mathrm{e^{-}}$", "e⁻"),
(u.electron**2, r"$\mathrm{electron^{2}}$", "electron²"),
],
)
def test_double_superscript(unit, latex, unicode):
"""Regression test for #5870, #8699, #9218, #14403; avoid double superscripts."""
assert unit.to_string("latex") == latex
assert unit.to_string("unicode") == unicode
def test_no_prefix_superscript():
"""Regression test for gh-911 and #14419."""
assert u.mdeg.to_string("latex") == r"$\mathrm{mdeg}$"
assert u.narcmin.to_string("latex") == r"$\mathrm{narcmin}$"
assert u.parcsec.to_string("latex") == r"$\mathrm{parcsec}$"
assert u.mdeg.to_string("unicode") == "mdeg"
assert u.narcmin.to_string("unicode") == "narcmin"
assert u.parcsec.to_string("unicode") == "parcsec"
@pytest.mark.parametrize(
"power,expected",
(
(1.0, "m"),
(2.0, "m2"),
(-10, "1 / m10"),
(1.5, "m(3/2)"),
(2 / 3, "m(2/3)"),
(7 / 11, "m(7/11)"),
(-1 / 64, "1 / m(1/64)"),
(1 / 100, "m(1/100)"),
(2 / 101, "m(0.019801980198019802)"),
(Fraction(2, 101), "m(2/101)"),
),
)
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m**power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize(
"string,unit",
[
("\N{MICRO SIGN}g", u.microgram),
("\N{GREEK SMALL LETTER MU}g", u.microgram),
("g\N{MINUS SIGN}1", u.g ** (-1)),
("m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m**-1),
("m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m / u.s),
("m\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT THREE}", u.m**3),
("m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}", u.m**10),
("\N{GREEK CAPITAL LETTER OMEGA}", u.ohm),
("\N{OHM SIGN}", u.ohm), # deprecated but for compatibility
("\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}", u.microOhm),
("\N{ANGSTROM SIGN}", u.Angstrom),
("\N{ANGSTROM SIGN} \N{OHM SIGN}", u.Angstrom * u.Ohm),
("\N{LATIN CAPITAL LETTER A WITH RING ABOVE}", u.Angstrom),
("\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}", u.Angstrom),
("m\N{ANGSTROM SIGN}", u.milliAngstrom),
("°C", u.deg_C),
("°", u.deg),
("M⊙", u.Msun), # \N{CIRCLED DOT OPERATOR}
("L☉", u.Lsun), # \N{SUN}
("M⊕", u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
("M♁", u.Mearth), # be generous with \N{EARTH}
("R♃", u.Rjup), # \N{JUPITER}
("′", u.arcmin), # \N{PRIME}
("R∞", u.Ry),
("Mₚ", u.M_p),
],
)
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
# Should work in composites too.
assert u.Unit(f"{string}/s") == unit / u.s
assert u.Unit(f"m {string}") == u.m * unit
assert u.Unit(f"{string} {string}") == unit**2
# Not obvious that "°2" should be "deg**2", but not easy to reject,
# and "R♃²" should work. But don't run on examples with a space or that
# already end in a number.
if re.match(r"^\S*[^\d⁰¹²³⁴⁵⁶⁷⁸⁹]$", string):
assert u.Unit(f"{string}2") == unit**2
assert u.Unit(f"{string}/{string}") == u.dimensionless_unscaled
# Finally, check round-trip
assert u.Unit(unit.to_string("unicode")) == unit
@pytest.mark.parametrize(
"string",
[
"g\N{MICRO SIGN}",
"g\N{MINUS SIGN}",
"m\N{SUPERSCRIPT MINUS}1",
"m+\N{SUPERSCRIPT ONE}",
"m\N{MINUS SIGN}\N{SUPERSCRIPT ONE}",
"k\N{ANGSTROM SIGN}",
],
)
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize("format_", ("unicode", "latex", "latex_inline"))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match="not parse"):
u.Unit("m", format=format_)
@pytest.mark.parametrize(
"parser,error_type,err_msg_start",
[
pytest.param("foo", ValueError, "Unknown format 'foo'", id="ValueError"),
pytest.param(
{}, TypeError, "Expected a formatter name, not {}", id="TypeError"
),
],
)
def test_unknown_parser(parser, error_type, err_msg_start):
with pytest.raises(
error_type,
match=(
f"^{err_msg_start}\\.\nValid parser names are: "
"'cds', 'generic', 'fits', 'ogip', 'vounit'$"
),
):
u.Unit("m", format=parser)
@pytest.mark.parametrize(
"formatter,error_type,err_msg_start",
[
pytest.param("abc", ValueError, "Unknown format 'abc'", id="ValueError"),
pytest.param(
float,
TypeError,
"Expected a formatter name, not <class 'float'>",
id="TypeError",
),
],
)
def test_unknown_output_format(formatter, error_type, err_msg_start):
with pytest.raises(
error_type,
match=(
f"^{err_msg_start}\\.\nValid formatter names are: "
"'cds', 'console', 'generic', 'fits', 'latex', 'latex_inline', 'ogip', "
"'unicode', 'vounit'$"
),
):
u.m.to_string(formatter)
def test_celsius_fits():
assert u.Unit("Celsius", format="fits") == u.deg_C
assert u.Unit("deg C", format="fits") == u.deg_C
# check that compounds do what we expect: what do we expect?
assert u.Unit("deg C kg-1", format="fits") == u.C * u.deg / u.kg
assert u.Unit("Celsius kg-1", format="fits") == u.deg_C / u.kg
assert u.deg_C.to_string("fits") == "Celsius"
@pytest.mark.parametrize(
"test_pair",
list_format_string_pairs(
("generic", "dB(1 / m)"),
("latex", r"$\mathrm{dB\left(\frac{1}{m}\right)}$"),
("latex_inline", r"$\mathrm{dB\left(m^{-1}\right)}$"),
("console", "dB(m^-1)"),
("unicode", "dB(m⁻¹)"),
),
ids=lambda x: x.format,
)
def test_function_format_styles(test_pair: FormatStringPair):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(test_pair.format) == test_pair.string
assert f"{dbunit:{test_pair.format}}" == test_pair.string
@pytest.mark.parametrize(
"format_spec, fraction, string",
[
("console", "multiline", " 1\ndB(-)\n m"),
("console", "inline", "dB(1 / m)"),
("unicode", "multiline", " 1\ndB(─)\n m"),
("unicode", "inline", "dB(1 / m)"),
("latex", False, r"$\mathrm{dB\left(m^{-1}\right)}$"),
("latex", "inline", r"$\mathrm{dB\left(1 / m\right)}$"),
],
)
def test_function_format_styles_non_default_fraction(format_spec, fraction, string):
dbunit = u.decibel(u.m**-1)
assert dbunit.to_string(format_spec, fraction=fraction) == string
@pytest.mark.parametrize(
"test_pair",
list_format_string_pairs(
("", "1"),
(".1g", "1"),
(".3g", "1"),
(".1e", "1.0"),
(".1f", "1.0"),
(".3e", "1.000"),
),
ids=lambda x: repr(x.format),
)
def test_format_latex_one(test_pair: FormatStringPair):
# see https://github.com/astropy/astropy/issues/12571
assert (
u_format.Latex.format_exponential_notation(1, test_pair.format)
== test_pair.string
)
def test_Fits_name_deprecation():
with pytest.warns(
AstropyDeprecationWarning,
match=(
r'^The class "Fits" has been renamed to "FITS" in version 7\.0\. '
r"The old name is deprecated and may be removed in a future version\.\n"
r" Use FITS instead\.$"
),
):
from astropy.units.format import Fits
assert Fits is u.format.FITS
@pytest.mark.parametrize("format_spec", ["generic", "unicode"])
def test_liter(format_spec):
assert format(u.liter, format_spec) == "l"
| TestRoundtripOGIP |
python | PyCQA__pylint | pylint/config/callback_actions.py | {
"start": 12340,
"end": 13391
} | class ____(_CallbackAction):
"""Action that has access to the ArgumentParser object."""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
nargs: None = None,
const: None = None,
default: None = None,
type: None = None,
choices: None = None,
required: bool = False,
help: str = "",
metavar: str = "",
**kwargs: argparse.ArgumentParser,
) -> None:
self.parser = kwargs["parser"]
super().__init__(
option_strings,
dest,
0,
const,
default,
type,
choices,
required,
help,
metavar,
)
@abc.abstractmethod
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
raise NotImplementedError # pragma: no cover
| _AccessParserAction |
python | doocs__leetcode | solution/0300-0399/0370.Range Addition/Solution.py | {
"start": 0,
"end": 275
} | class ____:
def getModifiedArray(self, length: int, updates: List[List[int]]) -> List[int]:
d = [0] * length
for l, r, c in updates:
d[l] += c
if r + 1 < length:
d[r + 1] -= c
return list(accumulate(d))
| Solution |
python | coleifer__peewee | peewee.py | {
"start": 262084,
"end": 262146
} | class ____(_ModelWriteQueryHelper, Delete):
pass
| ModelDelete |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 45411,
"end": 45526
} | class ____(BaseModel):
collection_data: Dict[str, "HardwareUsage"] = Field(..., description="")
| HardwareTelemetry |
python | encode__httpx | httpx/_exceptions.py | {
"start": 3237,
"end": 3342
} | class ____(TimeoutException):
"""
Timed out while receiving data from the host.
"""
| ReadTimeout |
python | tiangolo__fastapi | tests/test_serialize_response_model.py | {
"start": 170,
"end": 4276
} | class ____(BaseModel):
name: str = Field(alias="aliased_name")
price: Optional[float] = None
owner_ids: Optional[List[int]] = None
@app.get("/items/valid", response_model=Item)
def get_valid():
return Item(aliased_name="valid", price=1.0)
@app.get("/items/coerce", response_model=Item)
def get_coerce():
return Item(aliased_name="coerce", price="1.0")
@app.get("/items/validlist", response_model=List[Item])
def get_validlist():
return [
Item(aliased_name="foo"),
Item(aliased_name="bar", price=1.0),
Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
]
@app.get("/items/validdict", response_model=Dict[str, Item])
def get_validdict():
return {
"k1": Item(aliased_name="foo"),
"k2": Item(aliased_name="bar", price=1.0),
"k3": Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
}
@app.get(
"/items/valid-exclude-unset", response_model=Item, response_model_exclude_unset=True
)
def get_valid_exclude_unset():
return Item(aliased_name="valid", price=1.0)
@app.get(
"/items/coerce-exclude-unset",
response_model=Item,
response_model_exclude_unset=True,
)
def get_coerce_exclude_unset():
return Item(aliased_name="coerce", price="1.0")
@app.get(
"/items/validlist-exclude-unset",
response_model=List[Item],
response_model_exclude_unset=True,
)
def get_validlist_exclude_unset():
return [
Item(aliased_name="foo"),
Item(aliased_name="bar", price=1.0),
Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
]
@app.get(
"/items/validdict-exclude-unset",
response_model=Dict[str, Item],
response_model_exclude_unset=True,
)
def get_validdict_exclude_unset():
return {
"k1": Item(aliased_name="foo"),
"k2": Item(aliased_name="bar", price=1.0),
"k3": Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
}
client = TestClient(app)
def test_valid():
response = client.get("/items/valid")
response.raise_for_status()
assert response.json() == {"aliased_name": "valid", "price": 1.0, "owner_ids": None}
def test_coerce():
response = client.get("/items/coerce")
response.raise_for_status()
assert response.json() == {
"aliased_name": "coerce",
"price": 1.0,
"owner_ids": None,
}
def test_validlist():
response = client.get("/items/validlist")
response.raise_for_status()
assert response.json() == [
{"aliased_name": "foo", "price": None, "owner_ids": None},
{"aliased_name": "bar", "price": 1.0, "owner_ids": None},
{"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
def test_validdict():
response = client.get("/items/validdict")
response.raise_for_status()
assert response.json() == {
"k1": {"aliased_name": "foo", "price": None, "owner_ids": None},
"k2": {"aliased_name": "bar", "price": 1.0, "owner_ids": None},
"k3": {"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
}
def test_valid_exclude_unset():
response = client.get("/items/valid-exclude-unset")
response.raise_for_status()
assert response.json() == {"aliased_name": "valid", "price": 1.0}
def test_coerce_exclude_unset():
response = client.get("/items/coerce-exclude-unset")
response.raise_for_status()
assert response.json() == {"aliased_name": "coerce", "price": 1.0}
def test_validlist_exclude_unset():
response = client.get("/items/validlist-exclude-unset")
response.raise_for_status()
assert response.json() == [
{"aliased_name": "foo"},
{"aliased_name": "bar", "price": 1.0},
{"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
def test_validdict_exclude_unset():
response = client.get("/items/validdict-exclude-unset")
response.raise_for_status()
assert response.json() == {
"k1": {"aliased_name": "foo"},
"k2": {"aliased_name": "bar", "price": 1.0},
"k3": {"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
}
| Item |
python | chroma-core__chroma | chromadb/utils/embedding_functions/huggingface_embedding_function.py | {
"start": 249,
"end": 4646
} | class ____(EmbeddingFunction[Documents]):
"""
This class is used to get embeddings for a list of texts using the HuggingFace API.
It requires an API key and a model name. The default model name is "sentence-transformers/all-MiniLM-L6-v2".
"""
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "sentence-transformers/all-MiniLM-L6-v2",
api_key_env_var: str = "CHROMA_HUGGINGFACE_API_KEY",
):
"""
Initialize the HuggingFaceEmbeddingFunction.
Args:
api_key_env_var (str, optional): Environment variable name that contains your API key for the HuggingFace API.
Defaults to "CHROMA_HUGGINGFACE_API_KEY".
model_name (str, optional): The name of the model to use for text embeddings.
Defaults to "sentence-transformers/all-MiniLM-L6-v2".
"""
try:
import httpx
except ImportError:
raise ValueError(
"The httpx python package is not installed. Please install it with `pip install httpx`"
)
if api_key is not None:
warnings.warn(
"Direct api_key configuration will not be persisted. "
"Please use environment variables via api_key_env_var for persistent storage.",
DeprecationWarning,
)
if os.getenv("HUGGINGFACE_API_KEY") is not None:
self.api_key_env_var = "HUGGINGFACE_API_KEY"
else:
self.api_key_env_var = api_key_env_var
self.api_key = api_key or os.getenv(self.api_key_env_var)
if not self.api_key:
raise ValueError(
f"The {self.api_key_env_var} environment variable is not set."
)
self.model_name = model_name
self._api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_name}"
self._session = httpx.Client()
self._session.headers.update({"Authorization": f"Bearer {self.api_key}"})
def __call__(self, input: Documents) -> Embeddings:
"""
Get the embeddings for a list of texts.
Args:
input (Documents): A list of texts to get embeddings for.
Returns:
Embeddings: The embeddings for the texts.
Example:
>>> hugging_face = HuggingFaceEmbeddingFunction(api_key_env_var="CHROMA_HUGGINGFACE_API_KEY")
>>> texts = ["Hello, world!", "How are you?"]
>>> embeddings = hugging_face(texts)
"""
# Call HuggingFace Embedding API for each document
response = self._session.post(
self._api_url,
json={"inputs": input, "options": {"wait_for_model": True}},
).json()
# Convert to numpy arrays
return [np.array(embedding, dtype=np.float32) for embedding in response]
@staticmethod
def name() -> str:
return "huggingface"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(config: Dict[str, Any]) -> "EmbeddingFunction[Documents]":
api_key_env_var = config.get("api_key_env_var")
model_name = config.get("model_name")
if api_key_env_var is None or model_name is None:
assert False, "This code should not be reached"
return HuggingFaceEmbeddingFunction(
api_key_env_var=api_key_env_var, model_name=model_name
)
def get_config(self) -> Dict[str, Any]:
return {"api_key_env_var": self.api_key_env_var, "model_name": self.model_name}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "model_name" in new_config:
raise ValueError(
"The model name cannot be changed after the embedding function has been initialized."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "huggingface")
| HuggingFaceEmbeddingFunction |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 3652,
"end": 4160
} | class ____(BaseModel):
class Config:
extra = Extra.allow
type: Literal["CustomErrorHandler"]
class_name: str = Field(
...,
description="Fully-qualified name of the class that will be implementing the custom error handler. The format is `source_<name>.<package>.<class_name>`.",
examples=["source_railz.components.MyCustomErrorHandler"],
title="Class Name",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| CustomErrorHandler |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py | {
"start": 1458,
"end": 23123
} | class ____(LLM):
"""
Konko LLM.
Examples:
`pip install llama-index-llms-konko`
```python
import os
from llama_index.llms.konko import Konko
from llama_index.core.llms import ChatMessage
# Set up the Konko LLM with the desired model
llm = Konko(model="meta-llama/llama-2-13b-chat")
# Set the Konko API key
os.environ["KONKO_API_KEY"] = "<your-api-key>"
# Create a ChatMessage object
message = ChatMessage(role="user", content="Explain Big Bang Theory briefly")
# Call the chat method with the ChatMessage object
response = llm.chat([message])
# Print the response
print(response)
```
"""
model: str = Field(
default=DEFAULT_KONKO_MODEL, description="The konko model to use."
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use during generation.",
ge=0.0,
le=1.0,
)
max_tokens: Optional[int] = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The maximum number of tokens to generate.",
gt=0,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the konko API."
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", ge=0
)
konko_api_key: str = Field(default=None, description="The konko API key.")
openai_api_key: str = Field(default=None, description="The Openai API key.")
api_type: str = Field(default=None, description="The konko API type.")
model_info_dict: Dict[str, ModelInfo]
def __init__(
self,
model: str = DEFAULT_KONKO_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
konko_api_key: Optional[str] = None,
openai_api_key: Optional[str] = None,
api_type: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
model_info_dict: Optional[Dict[str, ModelInfo]] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
(
konko_api_key,
openai_api_key,
api_type,
api_base,
api_version,
) = resolve_konko_credentials(
konko_api_key=konko_api_key,
openai_api_key=openai_api_key,
api_type=api_type,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
konko_api_key=konko_api_key,
openai_api_key=openai_api_key,
api_type=api_type,
api_version=api_version,
api_base=api_base,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
model_info_dict=self._create_model_info_dict(),
**kwargs,
)
def _get_model_name(self) -> str:
return self.model
@classmethod
def class_name(cls) -> str:
return "Konko_LLM"
def _create_model_info_dict(self) -> Dict[str, ModelInfo]:
models_info_dict = {}
if is_openai_v1():
models = konko.models.list().data
for model in models:
model_info = ModelInfo(
name=model.name,
max_context_length=model.max_context_length,
is_chat_model=model.is_chat,
)
models_info_dict[model.name] = model_info
else:
models = konko.Model.list().data
for model in models:
model_info = ModelInfo(
name=model["name"],
max_context_length=model["max_context_length"],
is_chat_model=model["is_chat"],
)
models_info_dict[model["name"]] = model_info
return models_info_dict
def _get_model_info(self) -> ModelInfo:
model_name = self._get_model_name()
model_info = self.model_info_dict.get(model_name)
if model_info is None:
raise ValueError(
f"Unknown model: {model_name}. Please provide a valid Konko model name. "
"Known models are: " + ", ".join(self.model_info_dict.keys())
)
return model_info
def _is_chat_model(self) -> bool:
"""
Check if the specified model is a chat model.
Args:
- model_id (str): The ID of the model to check.
Returns:
- bool: True if the model is a chat model, False otherwise.
Raises:
- ValueError: If the model_id is not found in the list of models.
"""
model_info = self._get_model_info()
return model_info.is_chat_model
@property
def metadata(self) -> LLMMetadata:
model_info = self._get_model_info()
return LLMMetadata(
context_window=model_info.max_context_length,
num_output=self.max_tokens,
is_chat_model=model_info.is_chat_model,
model_name=self.model,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if self._is_chat_model():
chat_fn = self._chat
else:
chat_fn = completion_to_chat_decorator(self._complete)
return chat_fn(messages, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._is_chat_model():
stream_chat_fn = self._stream_chat
else:
stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
return stream_chat_fn(messages, **kwargs)
@property
def _credential_kwargs(self) -> Dict[str, Any]:
return {
"konko_api_key": self.konko_api_key,
"api_type": self.api_type,
"openai_api_key": self.openai_api_key,
}
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if not self._is_chat_model():
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
response = completion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
if is_openai_v1():
message_dict = response.choices[0].message
else:
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if not self._is_chat_model():
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
def gen() -> ChatResponseGen:
content = ""
for response in completion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
if is_openai_v1():
if len(response.choices) == 0 and response.prompt_annotations:
continue
delta = (
response.choices[0].delta if len(response.choices) > 0 else {}
)
role_value = delta.role
content_delta = delta.content or ""
else:
if "choices" not in response or len(response["choices"]) == 0:
continue
delta = response["choices"][0].get("delta", {})
role_value = delta["role"]
content_delta = delta["content"] or ""
role = role_value if role_value is not None else "assistant"
content += content_delta
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self._is_chat_model():
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._is_chat_model():
stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
else:
stream_complete_fn = self._stream_complete
return stream_complete_fn(prompt, **kwargs)
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if not isinstance(raw_response, dict):
return {}
usage = raw_response.get("usage", {})
# NOTE: other model providers that use the OpenAI client may not report usage
if usage is None:
return {}
return {
"prompt_tokens": usage.get("prompt_tokens", 0),
"completion_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
}
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
if self._is_chat_model():
raise ValueError("This model is a chat model.")
all_kwargs = self._get_all_kwargs(**kwargs)
if self.max_tokens is None:
# NOTE: non-chat completion endpoint requires max_tokens to be set
max_tokens = self._get_max_token_for_prompt(prompt)
all_kwargs["max_tokens"] = max_tokens
response = completion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
prompt=prompt,
stream=False,
**all_kwargs,
)
if is_openai_v1():
text = response.choices[0].text
else:
text = response["choices"][0]["text"]
return CompletionResponse(
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
if self._is_chat_model():
raise ValueError("This model is a chat model.")
all_kwargs = self._get_all_kwargs(**kwargs)
if self.max_tokens is None:
# NOTE: non-chat completion endpoint requires max_tokens to be set
max_tokens = self._get_max_token_for_prompt(prompt)
all_kwargs["max_tokens"] = max_tokens
def gen() -> CompletionResponseGen:
text = ""
for response in completion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
prompt=prompt,
stream=True,
**all_kwargs,
):
if is_openai_v1():
if len(response.choices) > 0:
delta = response.choices[0].text
else:
delta = ""
else:
if len(response["choices"]) > 0:
delta = response["choices"][0].text
else:
delta = ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _get_max_token_for_prompt(self, prompt: str) -> int:
try:
import tiktoken
except ImportError:
raise ImportError(
"Please install tiktoken to use the max_tokens=None feature."
)
context_window = self.metadata.context_window
encoding = tiktoken.encoding_for_model(self._get_model_name())
tokens = encoding.encode(prompt)
max_token = context_window - len(tokens)
if max_token <= 0:
raise ValueError(
f"The prompt is too long for the model. "
f"Please use a prompt that is less than {context_window} tokens."
)
return max_token
# ===== Async Endpoints =====
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
achat_fn: Callable[..., Awaitable[ChatResponse]]
if self._is_chat_model():
achat_fn = self._achat
else:
achat_fn = acompletion_to_chat_decorator(self._acomplete)
return await achat_fn(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]]
if self._is_chat_model():
astream_chat_fn = self._astream_chat
else:
astream_chat_fn = astream_completion_to_chat_decorator(
self._astream_complete
)
return await astream_chat_fn(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self._is_chat_model():
acomplete_fn = achat_to_completion_decorator(self._achat)
else:
acomplete_fn = self._acomplete
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._is_chat_model():
astream_complete_fn = astream_chat_to_completion_decorator(
self._astream_chat
)
else:
astream_complete_fn = self._astream_complete
return await astream_complete_fn(prompt, **kwargs)
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
if not self._is_chat_model():
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
response = await acompletion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
if is_openai_v1: # type: ignore
message_dict = response.choices[0].message
else:
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if not self._is_chat_model():
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
async def gen() -> ChatResponseAsyncGen:
content = ""
_function_call: Optional[dict] = None
async for response in await acompletion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
if is_openai_v1():
if len(response.choices) > 0:
delta = response.choices[0].delta
else:
delta = {}
role = delta.role
content_delta = delta.content
else:
if len(response["choices"]) > 0:
delta = response["choices"][0].delta
else:
delta = {}
role = delta["role"]
content_delta = delta["content"]
content += content_delta
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
if self._is_chat_model():
raise ValueError("This model is a chat model.")
all_kwargs = self._get_all_kwargs(**kwargs)
if self.max_tokens is None:
# NOTE: non-chat completion endpoint requires max_tokens to be set
max_tokens = self._get_max_token_for_prompt(prompt)
all_kwargs["max_tokens"] = max_tokens
response = await acompletion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
prompt=prompt,
stream=False,
**all_kwargs,
)
if is_openai_v1():
text = response.choices[0].text
else:
text = response["choices"][0]["text"]
return CompletionResponse(
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._is_chat_model():
raise ValueError("This model is a chat model.")
all_kwargs = self._get_all_kwargs(**kwargs)
if self.max_tokens is None:
# NOTE: non-chat completion endpoint requires max_tokens to be set
max_tokens = self._get_max_token_for_prompt(prompt)
all_kwargs["max_tokens"] = max_tokens
async def gen() -> CompletionResponseAsyncGen:
text = ""
async for response in await acompletion_with_retry(
is_chat_model=self._is_chat_model(),
max_retries=self.max_retries,
prompt=prompt,
stream=True,
**all_kwargs,
):
if is_openai_v1():
if len(response.choices) > 0:
delta = response.choices[0].text
else:
delta = ""
else:
if len(response["choices"]) > 0:
delta = response["choices"][0].text
else:
delta = ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
| Konko |
python | pandas-dev__pandas | pandas/tests/series/accessors/test_sparse_accessor.py | {
"start": 28,
"end": 296
} | class ____:
def test_sparse_accessor_updates_on_inplace(self):
ser = Series([1, 1, 2, 3], dtype="Sparse[int]")
return_value = ser.drop([0, 1], inplace=True)
assert return_value is None
assert ser.sparse.density == 1.0
| TestSparseAccessor |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-vectara/destination_vectara/client.py | {
"start": 601,
"end": 8113
} | class ____:
BASE_URL = "https://api.vectara.io/v1"
def __init__(self, config: VectaraConfig):
if isinstance(config, dict):
config = VectaraConfig.parse_obj(config)
self.customer_id = config.customer_id
self.corpus_name = config.corpus_name
self.client_id = config.oauth2.client_id
self.client_secret = config.oauth2.client_secret
self.parallelize = config.parallelize
self.check()
def check(self):
"""
Check for an existing corpus in Vectara.
If more than one exists - then return a message
If exactly one exists with this name - ensure that the corpus has the correct metadata fields, and use it.
If not, create it.
"""
try:
jwt_token = self._get_jwt_token()
if not jwt_token:
return "Unable to get JWT Token. Confirm your Client ID and Client Secret."
list_corpora_response = self._request(endpoint="list-corpora", data={"numResults": 100, "filter": self.corpus_name})
possible_corpora_ids_names_map = {
corpus.get("id"): corpus.get("name")
for corpus in list_corpora_response.get("corpus")
if corpus.get("name") == self.corpus_name
}
if len(possible_corpora_ids_names_map) > 1:
return f"Multiple Corpora exist with name {self.corpus_name}"
if len(possible_corpora_ids_names_map) == 1:
self.corpus_id = list(possible_corpora_ids_names_map.keys())[0]
else:
data = {
"corpus": {
"name": self.corpus_name,
"filterAttributes": [
{
"name": METADATA_STREAM_FIELD,
"indexed": True,
"type": "FILTER_ATTRIBUTE_TYPE__TEXT",
"level": "FILTER_ATTRIBUTE_LEVEL__DOCUMENT",
},
],
}
}
create_corpus_response = self._request(endpoint="create-corpus", data=data)
self.corpus_id = create_corpus_response.get("corpusId")
except Exception as e:
return str(e) + "\n" + "".join(traceback.TracebackException.from_exception(e).format())
def _get_jwt_token(self):
"""Connect to the server and get a JWT token."""
token_endpoint = f"https://vectara-prod-{self.customer_id}.auth.us-west-2.amazoncognito.com/oauth2/token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
data = {"grant_type": "client_credentials", "client_id": self.client_id, "client_secret": self.client_secret}
request_time = datetime.datetime.now().timestamp()
response = requests.request(method="POST", url=token_endpoint, headers=headers, data=data)
response_json = response.json()
self.jwt_token = response_json.get("access_token")
self.jwt_token_expires_ts = request_time + response_json.get("expires_in")
return self.jwt_token
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=5, giveup=user_error)
def _request(self, endpoint: str, http_method: str = "POST", params: Mapping[str, Any] = None, data: Mapping[str, Any] = None):
url = f"{self.BASE_URL}/{endpoint}"
current_ts = datetime.datetime.now().timestamp()
if self.jwt_token_expires_ts - current_ts <= 60:
self._get_jwt_token()
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {self.jwt_token}",
"customer-id": self.customer_id,
"X-source": "airbyte",
}
response = requests.request(method=http_method, url=url, headers=headers, params=params, data=json.dumps(data))
response.raise_for_status()
return response.json()
def delete_doc_by_metadata(self, metadata_field_name, metadata_field_values):
document_ids = []
for value in metadata_field_values:
data = {
"query": [
{
"query": "",
"numResults": 100,
"corpusKey": [
{
"customerId": self.customer_id,
"corpusId": self.corpus_id,
"metadataFilter": f"doc.{metadata_field_name} = '{value}'",
}
],
}
]
}
query_documents_response = self._request(endpoint="query", data=data)
document_ids.extend([document.get("id") for document in query_documents_response.get("responseSet")[0].get("document")])
self.delete_docs_by_id(document_ids=document_ids)
def delete_docs_by_id(self, document_ids):
for document_id in document_ids:
self._request(
endpoint="delete-doc", data={"customerId": self.customer_id, "corpusId": self.corpus_id, "documentId": document_id}
)
def index_document(self, document):
document_section, document_metadata, document_title, document_id = document
if len(document_section) == 0:
return None # Document is empty, so skip it
document_metadata = self._normalize(document_metadata)
data = {
"customerId": self.customer_id,
"corpusId": self.corpus_id,
"document": {
"documentId": document_id,
"metadataJson": json.dumps(document_metadata),
"title": document_title,
"section": [
{"text": f"{section_key}: {section_value}"}
for section_key, section_value in document_section.items()
if section_key != METADATA_STREAM_FIELD
],
},
}
index_document_response = self._request(endpoint="index", data=data)
return index_document_response
def index_documents(self, documents):
if self.parallelize:
with ThreadPoolExecutor() as executor:
futures = [executor.submit(self.index_document, doc) for doc in documents]
for future in futures:
try:
response = future.result()
if response is None:
continue
assert (
response.get("status").get("code") == "OK"
or response.get("status").get("statusDetail") == "Document should have at least one part."
)
except AssertionError as e:
# Handle the assertion error
pass
else:
for doc in documents:
self.index_document(doc)
def _normalize(self, metadata: dict) -> dict:
result = {}
for key, value in metadata.items():
if isinstance(value, (str, int, float, bool)):
result[key] = value
else:
# JSON encode all other types
result[key] = json.dumps(value)
return result
| VectaraClient |
python | patrick-kidger__equinox | equinox/internal/_noinline.py | {
"start": 4469,
"end": 5006
} | class ____(Module):
undefined: PyTree[jax.core.ShapedArray]
def __call__(self, static_fn):
def _transpose_transform_impl(args):
defined, cts_out = args
def _to_transpose(_undefined):
_args = combine(defined, _undefined)
return static_fn(_args)
(cts_undefined,) = jax.linear_transpose(_to_transpose, self.undefined)(
cts_out
)
return cts_undefined
return _transpose_transform_impl
| _MetaTransposeTransform |
python | walkccc__LeetCode | solutions/104. Maximum Depth of Binary Tree/104.py | {
"start": 0,
"end": 172
} | class ____:
def maxDepth(self, root: TreeNode | None) -> int:
if not root:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/singledispatchmethod.py | {
"start": 183,
"end": 468
} | class ____:
@singledispatchmethod
def foo(self, x: Union[MutableMapping, Mapping]) -> int:
raise NotImplementedError
@foo.register
def _(self, x: MutableMapping) -> int:
return 0
@foo.register
def _(self, x: Mapping) -> int:
return 0
| Foo |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/antlr_asset_selection/antlr_asset_selection.py | {
"start": 1199,
"end": 7707
} | class ____(AssetSelectionVisitor):
def __init__(self, include_sources: bool):
self.include_sources = include_sources
def visitStart(self, ctx: AssetSelectionParser.StartContext):
return self.visit(ctx.expr())
def visitTraversalAllowedExpression(
self, ctx: AssetSelectionParser.TraversalAllowedExpressionContext
):
return self.visit(ctx.traversalAllowedExpr())
def visitUpAndDownTraversalExpression(
self, ctx: AssetSelectionParser.UpAndDownTraversalExpressionContext
):
selection: AssetSelection = self.visit(ctx.traversalAllowedExpr())
# upTraversal => optional DIGITS? PLUS
up_digits = ctx.upTraversal().DIGITS()
up_depth = parse_traversal_depth(up_digits)
# downTraversal => PLUS DIGITS?
down_digits = ctx.downTraversal().DIGITS()
down_depth = parse_traversal_depth(down_digits)
return selection.upstream(depth=up_depth) | selection.downstream(depth=down_depth)
def visitUpTraversalExpression(self, ctx: AssetSelectionParser.UpTraversalExpressionContext):
selection: AssetSelection = self.visit(ctx.traversalAllowedExpr())
up_digits = ctx.upTraversal().DIGITS()
up_depth = parse_traversal_depth(up_digits)
return selection.upstream(depth=up_depth)
def visitDownTraversalExpression(
self, ctx: AssetSelectionParser.DownTraversalExpressionContext
):
selection: AssetSelection = self.visit(ctx.traversalAllowedExpr())
down_digits = ctx.downTraversal().DIGITS()
down_depth = parse_traversal_depth(down_digits)
return selection.downstream(depth=down_depth)
def visitNotExpression(self, ctx: AssetSelectionParser.NotExpressionContext):
selection: AssetSelection = self.visit(ctx.expr())
return AssetSelection.all(include_sources=self.include_sources) - selection
def visitAndExpression(self, ctx: AssetSelectionParser.AndExpressionContext):
left: AssetSelection = self.visit(ctx.expr(0))
right: AssetSelection = self.visit(ctx.expr(1))
return left & right
def visitOrExpression(self, ctx: AssetSelectionParser.OrExpressionContext):
left: AssetSelection = self.visit(ctx.expr(0))
right: AssetSelection = self.visit(ctx.expr(1))
return left | right
def visitAllExpression(self, ctx: AssetSelectionParser.AllExpressionContext):
return AssetSelection.all(include_sources=self.include_sources)
def visitAttributeExpression(self, ctx: AssetSelectionParser.AttributeExpressionContext):
return self.visit(ctx.attributeExpr())
def visitFunctionCallExpression(self, ctx: AssetSelectionParser.FunctionCallExpressionContext):
function = self.visit(ctx.functionName())
selection: AssetSelection = self.visit(ctx.expr())
if function == "sinks":
return selection.sinks()
elif function == "roots":
return selection.roots()
def visitParenthesizedExpression(
self, ctx: AssetSelectionParser.ParenthesizedExpressionContext
):
return self.visit(ctx.expr())
def visitFunctionName(self, ctx: AssetSelectionParser.FunctionNameContext):
if ctx.SINKS():
return "sinks"
elif ctx.ROOTS():
return "roots"
def visitKeyExpr(self, ctx: AssetSelectionParser.KeyExprContext):
value = self.visit(ctx.keyValue())
return KeyWildCardAssetSelection(selected_key_wildcard=value)
def visitTagAttributeExpr(self, ctx: AssetSelectionParser.TagAttributeExprContext):
key = self.visit(ctx.value(0))
value = self.visit(ctx.value(1)) if ctx.EQUAL() else None
return AssetSelection.tag(key, value or "", include_sources=self.include_sources)
def visitOwnerAttributeExpr(self, ctx: AssetSelectionParser.OwnerAttributeExprContext):
owner = self.visit(ctx.value())
return AssetSelection.owner(owner)
def visitGroupAttributeExpr(self, ctx: AssetSelectionParser.GroupAttributeExprContext):
group = self.visit(ctx.value())
return AssetSelection.groups(
*([] if not group else [group]), include_sources=self.include_sources
)
def visitKindAttributeExpr(self, ctx: AssetSelectionParser.KindAttributeExprContext):
kind = self.visit(ctx.value())
return AssetSelection.kind(kind, include_sources=self.include_sources)
def visitCodeLocationAttributeExpr(
self, ctx: AssetSelectionParser.CodeLocationAttributeExprContext
):
code_location = self.visit(ctx.value())
return CodeLocationAssetSelection(selected_code_location=code_location)
def visitKeyValue(self, ctx: AssetSelectionParser.KeyValueContext):
if ctx.QUOTED_STRING():
return ctx.QUOTED_STRING().getText().strip('"')
elif ctx.UNQUOTED_WILDCARD_STRING():
return ctx.UNQUOTED_WILDCARD_STRING().getText()
elif ctx.UNQUOTED_STRING():
return ctx.UNQUOTED_STRING().getText()
def visitValue(self, ctx: AssetSelectionParser.ValueContext):
if ctx.QUOTED_STRING():
return ctx.QUOTED_STRING().getText().strip('"')
elif ctx.UNQUOTED_STRING():
return ctx.UNQUOTED_STRING().getText()
elif ctx.NULL_STRING():
return None
def visitStatusAttributeExpr(self, ctx: AssetSelectionParser.StatusAttributeExprContext):
status = self.visit(ctx.value())
return StatusAssetSelection(selected_status=status)
def visitColumnAttributeExpr(self, ctx: AssetSelectionParser.ColumnAttributeExprContext):
column = self.visit(ctx.value())
return ColumnAssetSelection(selected_column=column)
def visitTableNameAttributeExpr(self, ctx: AssetSelectionParser.TableNameAttributeExprContext):
table_name = self.visit(ctx.value())
return TableNameAssetSelection(selected_table_name=table_name)
def visitColumnTagAttributeExpr(self, ctx: AssetSelectionParser.ColumnTagAttributeExprContext):
key = self.visit(ctx.value(0))
value = self.visit(ctx.value(1)) if ctx.EQUAL() else None
return ColumnTagAssetSelection(key=key, value=value or "")
def visitChangedInBranchAttributeExpr(
self, ctx: AssetSelectionParser.ChangedInBranchAttributeExprContext
):
branch = self.visit(ctx.value())
return ChangedInBranchAssetSelection(selected_changed_in_branch=branch)
| AntlrAssetSelectionVisitor |
python | getsentry__sentry | src/sentry/incidents/models/alert_rule.py | {
"start": 13482,
"end": 14221
} | class ____:
def __init__(self) -> None:
# Two kinds of index. The value sets should be equal at all times.
self.by_action_service: dict[ActionService, ActionHandlerFactory] = {}
self.by_slug: dict[str, ActionHandlerFactory] = {}
def register(self, factory: ActionHandlerFactory) -> None:
if factory.service_type in self.by_action_service:
raise Exception(f"Handler already registered for type {factory.service_type}")
if factory.slug in self.by_slug:
raise Exception(f"Handler already registered with slug={factory.slug!r}")
self.by_action_service[factory.service_type] = factory
self.by_slug[factory.slug] = factory
@region_silo_model
| _FactoryRegistry |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 15648,
"end": 17078
} | class ____:
dummy_table_sql = """CREATE TEMPORARY TABLE test_table
(
row_id SERIAL PRIMARY KEY,
value_int INTEGER,
value_float FLOAT,
value_string VARCHAR(200),
value_uuid CHAR(36),
value_binary BYTEA,
value_binary_string BYTEA,
created TIMESTAMP
);"""
@tests.skip_unless(postgres_requirement)
def setUp(self):
self._dbmodule = psycopg2
self._auth = tests.get_database_auth()['psycopg2']
super().setUp()
def tearDown(self):
super().tearDown()
def create_db(self):
dbname = 'test%s' % os.getpid()
self._auth['database'] = dbname
try:
self.drop_db()
except Exception:
pass
auth = self._auth.copy()
auth.pop('database') # can't create if you're connecting to it
conn = self._dbmodule.connect(**auth)
conn.set_isolation_level(0)
db = conn.cursor()
db.execute("create database " + dbname)
db.close()
conn.close()
def drop_db(self):
auth = self._auth.copy()
auth.pop('database') # can't drop database we connected to
conn = self._dbmodule.connect(**auth)
conn.set_isolation_level(0)
db = conn.cursor()
db.execute("drop database IF EXISTS " + self._auth['database'])
db.close()
conn.close()
| Psycopg2ConnectionPool |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_addition.py | {
"start": 11517,
"end": 12253
} | class ____(_Adder):
"""Handles additions resulting in a TriL operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE.union({_TRIL}))
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
| _AddAndReturnTriL |
python | tornadoweb__tornado | tornado/web.py | {
"start": 80712,
"end": 93476
} | class ____(ReversibleRouter):
r"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
.. warning::
Applications that do not use TLS may be vulnerable to :ref:`DNS
rebinding <dnsrebinding>` attacks. This attack is especially
relevant to applications that only listen on ``127.0.0.1`` or
other private networks. Appropriate host patterns must be used
(instead of the default of ``r'.*'``) to prevent this risk. The
``default_host`` argument must not be used in applications that
may be vulnerable to DNS rebinding.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(
self,
handlers: Optional[_RuleList] = None,
default_host: Optional[str] = None,
transforms: Optional[List[Type["OutputTransform"]]] = None,
**settings: Any,
) -> None:
if transforms is None:
self.transforms = [] # type: List[Type[OutputTransform]]
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {
"linkify": _linkify,
"xsrf_form_html": _xsrf_form_html,
"Template": TemplateModule,
}
self.ui_methods = {} # type: Dict[str, Callable[..., str]]
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix", "/static/")
static_handler_class = settings.get(
"static_handler_class", StaticFileHandler
)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args["path"] = path
for pattern in [
re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)",
r"/(robots\.txt)",
]:
handlers.insert(0, (pattern, static_handler_class, static_handler_args))
if self.settings.get("debug"):
self.settings.setdefault("autoreload", True)
self.settings.setdefault("compiled_template_cache", False)
self.settings.setdefault("static_hash_cache", False)
self.settings.setdefault("serve_traceback", True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(
self, [Rule(AnyMatches(), self.wildcard_router)]
)
# Automatically reload modified modules
if self.settings.get("autoreload"):
from tornado import autoreload
autoreload.start()
def listen(
self,
port: int,
address: Optional[str] = None,
*,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = tornado.netutil._DEFAULT_BACKLOG,
flags: Optional[int] = None,
reuse_port: bool = False,
**kwargs: Any,
) -> HTTPServer:
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer` object and
calling its listen method. Keyword arguments not supported by
`HTTPServer.listen <.TCPServer.listen>` are passed to the `.HTTPServer`
constructor. For advanced uses (e.g. multi-process mode), do not use
this method; create an `.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` (or run within ``asyncio.run``) to start
the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
.. versionchanged:: 6.2
Added support for new keyword arguments in `.TCPServer.listen`,
including ``reuse_port``.
"""
server = HTTPServer(self, **kwargs)
server.listen(
port,
address=address,
family=family,
backlog=backlog,
flags=flags,
reuse_port=reuse_port,
)
return server
def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None:
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules(
[(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)]
)
def add_transform(self, transform_class: Type["OutputTransform"]) -> None:
self.transforms.append(transform_class)
def _load_ui_methods(self, methods: Any) -> None:
if isinstance(methods, types.ModuleType):
self._load_ui_methods({n: getattr(methods, n) for n in dir(methods)})
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if (
not name.startswith("_")
and hasattr(fn, "__call__")
and name[0].lower() == name[0]
):
self.ui_methods[name] = fn
def _load_ui_modules(self, modules: Any) -> None:
if isinstance(modules, types.ModuleType):
self._load_ui_modules({n: getattr(modules, n) for n in dir(modules)})
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(
self, request: httputil.HTTPServerRequest
) -> Optional[Awaitable[None]]:
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(
self, request: httputil.HTTPServerRequest, **kwargs: Any
) -> "_HandlerDelegate":
route = self.default_router.find_handler(request)
if route is not None:
return cast("_HandlerDelegate", route)
if self.settings.get("default_handler_class"):
return self.get_handler_delegate(
request,
self.settings["default_handler_class"],
self.settings.get("default_handler_args", {}),
)
return self.get_handler_delegate(request, ErrorHandler, {"status_code": 404})
def get_handler_delegate(
self,
request: httputil.HTTPServerRequest,
target_class: Type[RequestHandler],
target_kwargs: Optional[Dict[str, Any]] = None,
path_args: Optional[List[bytes]] = None,
path_kwargs: Optional[Dict[str, bytes]] = None,
) -> "_HandlerDelegate":
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs
)
def reverse_url(self, name: str, *args: Any) -> str:
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler: RequestHandler) -> None:
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method(
"%d %s %.2fms",
handler.get_status(),
handler._request_summary(),
request_time,
)
| Application |
python | mlflow__mlflow | mlflow/genai/judges/tools/types.py | {
"start": 1332,
"end": 1743
} | class ____:
"""Feedback for a trace (simplified for judge tools)."""
name: str
source: str
rationale: str | None
span_id: str | None
assessment_id: str | None
value: FeedbackValueType | None
error_code: str | None
error_message: str | None
stack_trace: str | None
overrides: str | None
valid: bool | None
@experimental(version="3.5.0")
@dataclass
| JudgeToolFeedback |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/react/types.py | {
"start": 410,
"end": 867
} | class ____(BaseReasoningStep):
"""Action Reasoning step."""
thought: str
action: str
action_input: Dict
def get_content(self) -> str:
"""Get content."""
return (
f"Thought: {self.thought}\nAction: {self.action}\n"
f"Action Input: {self.action_input}"
)
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return False
| ActionReasoningStep |
python | scipy__scipy | benchmarks/benchmarks/interpolate.py | {
"start": 12149,
"end": 13632
} | class ____(interpolate.RegularGridInterpolator):
def __init__(self, points, xi, **kwargs):
# create fake values for initialization
values = np.zeros(tuple([len(pt) for pt in points]))
super().__init__(points, values, **kwargs)
self._is_initialized = False
# precompute values
(self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds) = self._prepare_xi(xi)
self.indices, self.norm_distances = self._find_indices(xi.T)
self._is_initialized = True
def _prepare_xi(self, xi):
if not self._is_initialized:
return super()._prepare_xi(xi)
else:
# just give back precomputed values
return (self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds)
def _find_indices(self, xi):
if not self._is_initialized:
return super()._find_indices(xi)
else:
# just give back pre-computed values
return self.indices, self.norm_distances
def __call__(self, values, method=None):
values = self._check_values(values)
# check fillvalue
self._check_fill_value(values, self.fill_value)
# check dimensionality
self._check_dimensionality(self.grid, values)
# flip, if needed
self._values = np.flip(values, axis=self._descending_dimensions)
return super().__call__(self.xi, method=method)
| RegularGridInterpolatorValues |
python | ansible__ansible | lib/ansible/playbook/attribute.py | {
"start": 5668,
"end": 7033
} | class ____(Attribute):
def __init__(self, extend=False, prepend=False, **kwargs):
super().__init__(**kwargs)
self.extend = extend
self.prepend = prepend
def __get__(self, obj, obj_type=None):
if getattr(obj, '_squashed', False) or getattr(obj, '_finalized', False):
value = getattr(obj, f'_{self.name}', Sentinel)
else:
try:
value = obj._get_parent_attribute(self.name)
except AttributeError:
method = f'_get_attr_{self.name}'
if hasattr(obj, method):
# NOTE this appears to be not needed in the codebase,
# _get_attr_connection has been replaced by ConnectionFieldAttribute.
# Leaving it here for test_attr_method from
# test/units/playbook/test_base.py to pass and for backwards compat.
if getattr(obj, '_squashed', False):
value = getattr(obj, f'_{self.name}', Sentinel)
else:
value = getattr(obj, method)()
else:
value = getattr(obj, f'_{self.name}', Sentinel)
if value is Sentinel:
value = self.default
if callable(value):
value = value()
return value
| FieldAttribute |
python | python-poetry__poetry | src/poetry/repositories/installed_repository.py | {
"start": 726,
"end": 11270
} | class ____(Repository):
def __init__(self, packages: Sequence[Package] | None = None) -> None:
super().__init__("poetry-installed", packages)
self.system_site_packages: list[Package] = []
def add_package(self, package: Package, *, is_system_site: bool = False) -> None:
super().add_package(package)
if is_system_site:
self.system_site_packages.append(package)
@classmethod
def get_package_paths(cls, env: Env, name: str) -> set[Path]:
"""
Process a .pth file within the site-packages directories, and return any valid
paths. We skip executable .pth files as there is no reliable means to do this
without side-effects to current run-time. Mo check is made that the item refers
to a directory rather than a file, however, in order to maintain backwards
compatibility, we allow non-existing paths to be discovered. The latter
behaviour is different to how Python's site-specific hook configuration works.
Reference: https://docs.python.org/3.8/library/site.html
:param env: The environment to search for the .pth file in.
:param name: The name of the package to search .pth file for.
:return: A `Set` of valid `Path` objects.
"""
paths = set()
# we identify the candidate pth files to check, this is done so to handle cases
# where the pth file for foo-bar might have been installed as either foo-bar.pth
# or foo_bar.pth (expected) in either pure or platform lib directories.
candidates = itertools.product(
{env.purelib, env.platlib},
{name, module_name(name)},
)
for lib, module in candidates:
pth_file = lib.joinpath(module).with_suffix(".pth")
if not pth_file.exists():
continue
with pth_file.open(encoding=getencoding()) as f:
for line in f:
line = line.strip()
if line and not line.startswith(("#", "import ", "import\t")):
path = Path(line)
if not path.is_absolute():
path = lib.joinpath(path).resolve()
paths.add(path)
src_path = env.path / "src" / name
if not paths and src_path.exists():
paths.add(src_path)
return paths
@classmethod
def get_package_vcs_properties_from_path(cls, src: Path) -> tuple[str, str, str]:
from poetry.vcs.git import Git
info = Git.info(repo=src)
return "git", info.origin, info.revision
@classmethod
def is_vcs_package(cls, package: Path | Package, env: Env) -> bool:
# A VCS dependency should have been installed
# in the src directory.
src = env.path / "src"
if isinstance(package, Package):
return src.joinpath(package.name).is_dir()
try:
package.relative_to(env.path / "src")
except ValueError:
return False
else:
return True
@classmethod
def _create_package_from_distribution(
cls, path: Path, dist_metadata: metadata.PackageMetadata, env: Env
) -> Package:
# We first check for a direct_url.json file to determine
# the type of package.
if (
path.name.endswith(".dist-info")
and path.joinpath("direct_url.json").exists()
):
return cls._create_package_from_pep610(path, dist_metadata)
is_standard_package = env.is_path_relative_to_lib(path)
source_type = None
source_url = None
source_reference = None
source_resolved_reference = None
source_subdirectory = None
if is_standard_package:
if path.name.endswith(".dist-info"):
paths = cls.get_package_paths(env=env, name=dist_metadata["name"])
if paths:
is_editable_package = False
for src in paths:
if cls.is_vcs_package(src, env):
(
source_type,
source_url,
source_reference,
) = cls.get_package_vcs_properties_from_path(src)
break
if not (
is_editable_package or env.is_path_relative_to_lib(src)
):
is_editable_package = True
else:
# TODO: handle multiple source directories?
if is_editable_package:
source_type = "directory"
path = paths.pop()
if path.name == "src":
path = path.parent
source_url = path.as_posix()
elif cls.is_vcs_package(path, env):
(
source_type,
source_url,
source_reference,
) = cls.get_package_vcs_properties_from_path(
env.path / "src" / canonicalize_name(dist_metadata["name"])
)
elif is_python_project(path.parent):
source_type = "directory"
source_url = str(path.parent)
package = Package(
dist_metadata["name"],
dist_metadata["version"],
source_type=source_type,
source_url=source_url,
source_reference=source_reference,
source_resolved_reference=source_resolved_reference,
source_subdirectory=source_subdirectory,
)
package.description = dist_metadata.get( # type: ignore[attr-defined]
"summary",
"",
)
return package
@classmethod
def _create_package_from_pep610(
cls, path: Path, dist_metadata: metadata.PackageMetadata
) -> Package:
source_type = None
source_url = None
source_reference = None
source_resolved_reference = None
source_subdirectory = None
develop = False
url_reference = json.loads(
path.joinpath("direct_url.json").read_text(encoding="utf-8")
)
if "archive_info" in url_reference:
# File or URL distribution
if url_reference["url"].startswith("file:"):
# File distribution
source_type = "file"
source_url = url_to_path(url_reference["url"]).as_posix()
else:
# URL distribution
source_type = "url"
source_url = url_reference["url"]
elif "dir_info" in url_reference:
# Directory distribution
source_type = "directory"
source_url = url_to_path(url_reference["url"]).as_posix()
develop = url_reference["dir_info"].get("editable", False)
elif "vcs_info" in url_reference:
# VCS distribution
source_type = url_reference["vcs_info"]["vcs"]
source_url = url_reference["url"]
source_resolved_reference = url_reference["vcs_info"]["commit_id"]
source_reference = url_reference["vcs_info"].get(
"requested_revision", source_resolved_reference
)
source_subdirectory = url_reference.get("subdirectory")
package = Package(
dist_metadata["name"],
dist_metadata["version"],
source_type=source_type,
source_url=source_url,
source_reference=source_reference,
source_resolved_reference=source_resolved_reference,
source_subdirectory=source_subdirectory,
develop=develop,
)
package.description = dist_metadata.get( # type: ignore[attr-defined]
"summary",
"",
)
return package
@classmethod
def load(cls, env: Env, with_dependencies: bool = False) -> InstalledRepository:
"""
Load installed packages.
"""
from poetry.core.packages.dependency import Dependency
repo = cls()
seen = set()
skipped = set()
base_env = (
env.parent_env
if isinstance(env, VirtualEnv) and env.includes_system_site_packages
else None
)
for entry in env.sys_path:
if not entry.strip():
logger.debug(
"Project environment contains an empty path in <c1>sys_path</>,"
" ignoring."
)
continue
for distribution in sorted(
metadata.distributions(path=[entry]),
key=lambda d: str(d._path), # type: ignore[attr-defined]
):
path = Path(str(distribution._path)) # type: ignore[attr-defined]
if path in skipped:
continue
dist_metadata = distribution.metadata # type: ignore[attr-defined]
name = (
dist_metadata.get("name") # type: ignore[attr-defined]
if dist_metadata
else None
)
if not dist_metadata or name is None:
logger.warning(
"Project environment contains an invalid distribution"
" (<c1>%s</>). Consider removing it manually or recreate"
" the environment.",
path,
)
skipped.add(path)
continue
name = canonicalize_name(name)
if name in seen:
continue
package = cls._create_package_from_distribution(
path, dist_metadata, env
)
if with_dependencies:
for require in dist_metadata.get_all("requires-dist", []):
dep = Dependency.create_from_pep_508(require)
package.add_dependency(dep)
seen.add(package.name)
repo.add_package(
package,
is_system_site=bool(
base_env and base_env.is_path_relative_to_lib(path)
),
)
return repo
| InstalledRepository |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployment_schedules.py | {
"start": 11815,
"end": 15250
} | class ____:
@pytest.fixture
async def schedule_to_delete(
self,
get_server_session: AsyncSessionGetter,
deployment_with_schedules,
):
async with get_server_session() as session:
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_with_schedules.id,
)
return schedules[0]
async def test_can_delete_schedule(
self,
get_server_session: AsyncSessionGetter,
client: AsyncClient,
deployment_with_schedules,
schedules_url: Callable[..., str],
schedule_to_delete: schemas.core.DeploymentSchedule,
):
async with get_server_session() as session:
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_with_schedules.id,
)
assert schedule_to_delete.id in [schedule.id for schedule in schedules]
url = schedules_url(
deployment_with_schedules.id, schedule_id=schedule_to_delete.id
)
response = await client.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
async with get_server_session() as session:
schedules = await models.deployments.read_deployment_schedules(
session=session,
deployment_id=deployment_with_schedules.id,
)
assert schedule_to_delete.id not in [schedule.id for schedule in schedules]
async def test_404_non_existent_deployment(
self,
client: AsyncClient,
schedules_url: Callable[..., str],
schedule_to_delete: schemas.core.DeploymentSchedule,
):
url = schedules_url(uuid4(), schedule_id=schedule_to_delete.id)
response = await client.delete(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert b"Deployment" in response.content
async def test_404_non_existent_schedule(
self,
deployment,
client: AsyncClient,
schedules_url: Callable[..., str],
):
url = schedules_url(deployment.id, schedule_id=uuid4())
response = await client.delete(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert b"Schedule" in response.content
async def test_deletes_schedule_runs(
self,
db: PrefectDBInterface,
get_server_session: AsyncSessionGetter,
client: AsyncClient,
deployment_with_schedules,
schedules_url: Callable[..., str],
schedule_to_delete: schemas.core.DeploymentSchedule,
scheduled_flow_runs,
):
url = schedules_url(
deployment_with_schedules.id, schedule_id=schedule_to_delete.id
)
response = await client.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
async with get_server_session() as session:
result = await session.execute(
sa.select(db.FlowRun).where(
db.FlowRun.deployment_id == deployment_with_schedules.id,
db.FlowRun.auto_scheduled.is_(True),
)
)
flow_runs = result.scalars().all()
# Deleting the schedule should remove all scheduled runs
assert len(flow_runs) == 0
| TestDeleteDeploymentSchedule |
python | tensorflow__tensorflow | tensorflow/lite/python/metrics/metrics_nonportable.py | {
"start": 4398,
"end": 5037
} | class ____(TFLiteMetrics):
"""Similar to TFLiteMetrics but specialized for converter.
A unique session id will be created for each new TFLiteConverterMetrics.
"""
def __init__(self) -> None:
super(TFLiteConverterMetrics, self).__init__()
session_id = uuid.uuid4().hex
self._metrics_exporter = metrics_wrapper.MetricsWrapper(session_id)
self._exported = False
def __del__(self):
if not self._exported:
self.export_metrics()
def set_export_required(self):
self._exported = False
def export_metrics(self):
self._metrics_exporter.ExportMetrics()
self._exported = True
| TFLiteConverterMetrics |
python | apache__airflow | providers/grpc/tests/unit/grpc/operators/test_grpc.py | {
"start": 1025,
"end": 3993
} | class ____:
def custom_conn_func(self, connection):
pass
@mock.patch("airflow.providers.grpc.operators.grpc.GrpcHook")
def test_with_interceptors(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
interceptors=[],
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=[], custom_connection_func=None)
@mock.patch("airflow.providers.grpc.operators.grpc.GrpcHook")
def test_with_custom_connection_func(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
custom_connection_func=self.custom_conn_func,
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with(
"grpc_default", interceptors=None, custom_connection_func=self.custom_conn_func
)
@mock.patch("airflow.providers.grpc.operators.grpc.GrpcHook")
def test_execute_with_log(self, mock_hook):
mocked_hook = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{"run.return_value": ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
log_response=True,
task_id="test_grpc",
)
with mock.patch.object(operator.log, "info") as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
mock_info.assert_any_call("Calling gRPC service")
mock_info.assert_any_call("%r", "value1")
mock_info.assert_any_call("%r", "value2")
@mock.patch("airflow.providers.grpc.operators.grpc.GrpcHook")
def test_execute_with_callback(self, mock_hook):
mocked_hook = mock.Mock()
callback = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{"run.return_value": ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass, call_func="stream_call", task_id="test_grpc", response_callback=callback
)
with mock.patch.object(operator.log, "info") as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
assert ("'value1'", "'value2'") not in mock_info.call_args_list
mock_info.assert_any_call("Calling gRPC service")
callback.assert_any_call("value1", {})
callback.assert_any_call("value2", {})
| TestGrpcOperator |
python | pydantic__pydantic | tests/mypy/modules/root_models.py | {
"start": 388,
"end": 481
} | class ____(RootModel[list[str]]):
pets: list[str]
T = TypeVar('T')
V = TypeVar('V')
| Pets4 |
python | ansible__ansible | lib/ansible/_internal/_templating/_datatag.py | {
"start": 752,
"end": 830
} | class ____:
template: str
deprecated: Deprecated
| _TrippedDeprecationInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1364,
"end": 1551
} | class ____:
abc: DescriptorD[str] = DescriptorD()
stack: ExitStack
def test(self, value: ContextManager[str]) -> None:
self.abc = self.stack.enter_context(value)
| ClassD |
python | numba__numba | numba/tests/npyufunc/test_gufunc.py | {
"start": 3203,
"end": 5326
} | class ____(MemoryLeakMixin, TestCase):
target = 'cpu'
def test_multiple_outputs_same_type_passed_in(self):
@guvectorize('(x)->(x),(x)',
target=self.target)
def copy(A, B, C):
for i in range(B.size):
B[i] = A[i]
C[i] = A[i]
A = np.arange(10, dtype=np.float32) + 1
B = np.zeros_like(A)
C = np.zeros_like(A)
copy(A, B, C)
np.testing.assert_allclose(A, B)
np.testing.assert_allclose(A, C)
def test_multiple_outputs_distinct_values(self):
@guvectorize('(x)->(x),(x)',
target=self.target)
def copy_and_double(A, B, C):
for i in range(B.size):
B[i] = A[i]
C[i] = A[i] * 2
A = np.arange(10, dtype=np.float32) + 1
B = np.zeros_like(A)
C = np.zeros_like(A)
copy_and_double(A, B, C)
np.testing.assert_allclose(A, B)
np.testing.assert_allclose(A * 2, C)
def test_multiple_output_dtypes(self):
@guvectorize('(x)->(x),(x)',
target=self.target)
def copy_and_multiply(A, B, C):
for i in range(B.size):
B[i] = A[i]
C[i] = A[i] * 1.5
A = np.arange(10, dtype=np.int32) + 1
B = np.zeros_like(A)
C = np.zeros_like(A, dtype=np.float64)
copy_and_multiply(A, B, C)
np.testing.assert_allclose(A, B)
np.testing.assert_allclose(A * np.float64(1.5), C)
def test_incorrect_number_of_pos_args(self):
@guvectorize('(m),(m)->(m),(m)', target=self.target)
def f(x, y, z, w):
pass
arr = np.arange(5, dtype=np.int32)
# Inputs only, too few
msg = "Too few arguments for function 'f'"
with self.assertRaises(TypeError) as te:
f(arr)
self.assertIn(msg, str(te.exception))
# Inputs and outputs, too many
with self.assertRaises(TypeError) as te:
f(arr, arr, arr, arr, arr)
self.assertIn(msg, str(te.exception))
| TestMultipleOutputs |
python | pyca__cryptography | src/cryptography/x509/base.py | {
"start": 17409,
"end": 23208
} | class ____:
_extensions: list[Extension[ExtensionType]]
_revoked_certificates: list[RevokedCertificate]
def __init__(
self,
issuer_name: Name | None = None,
last_update: datetime.datetime | None = None,
next_update: datetime.datetime | None = None,
extensions: list[Extension[ExtensionType]] = [],
revoked_certificates: list[RevokedCertificate] = [],
):
self._issuer_name = issuer_name
self._last_update = last_update
self._next_update = next_update
self._extensions = extensions
self._revoked_certificates = revoked_certificates
def issuer_name(
self, issuer_name: Name
) -> CertificateRevocationListBuilder:
if not isinstance(issuer_name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateRevocationListBuilder(
issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def last_update(
self, last_update: datetime.datetime
) -> CertificateRevocationListBuilder:
if not isinstance(last_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._last_update is not None:
raise ValueError("Last update may only be set once.")
last_update = _convert_to_naive_utc_time(last_update)
if last_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after 1950 January 1."
)
if self._next_update is not None and last_update > self._next_update:
raise ValueError(
"The last update date must be before the next update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def next_update(
self, next_update: datetime.datetime
) -> CertificateRevocationListBuilder:
if not isinstance(next_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._next_update is not None:
raise ValueError("Last update may only be set once.")
next_update = _convert_to_naive_utc_time(next_update)
if next_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after 1950 January 1."
)
if self._last_update is not None and next_update < self._last_update:
raise ValueError(
"The next update date must be after the last update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
next_update,
self._extensions,
self._revoked_certificates,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> CertificateRevocationListBuilder:
"""
Adds an X.509 extension to the certificate revocation list.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
[*self._extensions, extension],
self._revoked_certificates,
)
def add_revoked_certificate(
self, revoked_certificate: RevokedCertificate
) -> CertificateRevocationListBuilder:
"""
Adds a revoked certificate to the CRL.
"""
if not isinstance(revoked_certificate, RevokedCertificate):
raise TypeError("Must be an instance of RevokedCertificate")
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions,
[*self._revoked_certificates, revoked_certificate],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: _AllowedHashTypes | None,
backend: typing.Any = None,
*,
rsa_padding: padding.PSS | padding.PKCS1v15 | None = None,
ecdsa_deterministic: bool | None = None,
) -> CertificateRevocationList:
if self._issuer_name is None:
raise ValueError("A CRL must have an issuer name")
if self._last_update is None:
raise ValueError("A CRL must have a last update time")
if self._next_update is None:
raise ValueError("A CRL must have a next update time")
if rsa_padding is not None:
if not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15)):
raise TypeError("Padding must be PSS or PKCS1v15")
if not isinstance(private_key, rsa.RSAPrivateKey):
raise TypeError("Padding is only supported for RSA keys")
if ecdsa_deterministic is not None:
if not isinstance(private_key, ec.EllipticCurvePrivateKey):
raise TypeError(
"Deterministic ECDSA is only supported for EC keys"
)
return rust_x509.create_x509_crl(
self,
private_key,
algorithm,
rsa_padding,
ecdsa_deterministic,
)
| CertificateRevocationListBuilder |
python | mlflow__mlflow | mlflow/gateway/schemas/chat.py | {
"start": 1139,
"end": 1882
} | class ____(RequestModel):
"""
A tool definition for the chat endpoint with Unity Catalog integration.
The Gateway request accepts a special tool type 'uc_function' for Unity Catalog integration.
https://mlflow.org/docs/latest/llms/deployments/uc_integration.html
"""
type: Literal["function", "uc_function"]
function: FunctionToolDefinition | None = None
uc_function: UnityCatalogFunctionToolDefinition | None = None
_REQUEST_PAYLOAD_EXTRA_SCHEMA = {
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
"temperature": 0.0,
"max_tokens": 64,
"stop": ["END"],
"n": 1,
"stream": False,
}
| ChatToolWithUC |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/rule_based_profiler/data_assistant_result/statistics_data_assistant_result.py | {
"start": 243,
"end": 1666
} | class ____(DataAssistantResult):
"""
Note (9/30/2022): Plotting functionality is experimental.
"""
@property
def metric_expectation_map(self) -> Dict[Union[str, Tuple[str]], str]:
"""
A mapping is defined for which metrics to plot and their associated expectations.
"""
return {}
@property
def metric_types(self) -> Dict[str, AltairDataTypes]:
"""
A mapping is defined for the Altair data type associated with each metric.
"""
# Altair data types can be one of:
# - Nominal: Metric is a discrete unordered category
# - Ordinal: Metric is a discrete ordered quantity
# - Quantitative: Metric is a continuous real-valued quantity
# - Temporal: Metric is a time or date value
return {
"table.columns": AltairDataTypes.NOMINAL,
"table.row_count": AltairDataTypes.QUANTITATIVE,
"column.distinct_values.count": AltairDataTypes.QUANTITATIVE,
"column.min": AltairDataTypes.QUANTITATIVE,
"column.max": AltairDataTypes.QUANTITATIVE,
"column.mean": AltairDataTypes.QUANTITATIVE,
"column.median": AltairDataTypes.QUANTITATIVE,
"column.standard_deviation": AltairDataTypes.QUANTITATIVE,
"column.quantile_values": AltairDataTypes.QUANTITATIVE,
}
| StatisticsDataAssistantResult |
python | MongoEngine__mongoengine | tests/fields/test_file_field.py | {
"start": 823,
"end": 16994
} | class ____(MongoDBTestCase):
def tearDown(self):
self.db.drop_collection("fs.files")
self.db.drop_collection("fs.chunks")
def test_file_field_optional(self):
# Make sure FileField is optional and not required
class DemoFile(Document):
the_file = FileField()
DemoFile.objects.create()
def test_file_fields(self):
"""Ensure that file fields can be written to and their data retrieved"""
class PutFile(Document):
the_file = FileField()
PutFile.drop_collection()
text = b"Hello, World!"
content_type = "text/plain"
putfile = PutFile()
putfile.the_file.put(text, content_type=content_type, filename="hello")
putfile.save()
result = PutFile.objects.first()
assert putfile == result
assert (
"%s" % result.the_file
== "<GridFSProxy: hello (%s)>" % result.the_file.grid_id
)
assert result.the_file.read() == text
assert result.the_file.content_type == content_type
result.the_file.delete() # Remove file from GridFS
PutFile.objects.delete()
# Ensure file-like objects are stored
PutFile.drop_collection()
putfile = PutFile()
putstring = BytesIO()
putstring.write(text)
putstring.seek(0)
putfile.the_file.put(putstring, content_type=content_type)
putfile.save()
result = PutFile.objects.first()
assert putfile == result
assert result.the_file.read() == text
assert result.the_file.content_type == content_type
result.the_file.delete()
def test_file_fields_stream(self):
"""Ensure that file fields can be written to and their data retrieved"""
class StreamFile(Document):
the_file = FileField()
StreamFile.drop_collection()
text = b"Hello, World!"
more_text = b"Foo Bar"
content_type = "text/plain"
streamfile = StreamFile()
streamfile.the_file.new_file(content_type=content_type)
streamfile.the_file.write(text)
streamfile.the_file.write(more_text)
streamfile.the_file.close()
streamfile.save()
result = StreamFile.objects.first()
assert streamfile == result
assert result.the_file.read() == text + more_text
assert result.the_file.content_type == content_type
result.the_file.seek(0)
assert result.the_file.tell() == 0
assert result.the_file.read(len(text)) == text
assert result.the_file.tell() == len(text)
assert result.the_file.read(len(more_text)) == more_text
assert result.the_file.tell() == len(text + more_text)
result.the_file.delete()
# Ensure deleted file returns None
assert result.the_file.read() is None
def test_file_fields_stream_after_none(self):
"""Ensure that a file field can be written to after it has been saved as
None
"""
class StreamFile(Document):
the_file = FileField()
StreamFile.drop_collection()
text = b"Hello, World!"
more_text = b"Foo Bar"
streamfile = StreamFile()
streamfile.save()
streamfile.the_file.new_file()
streamfile.the_file.write(text)
streamfile.the_file.write(more_text)
streamfile.the_file.close()
streamfile.save()
result = StreamFile.objects.first()
assert streamfile == result
assert result.the_file.read() == text + more_text
# assert result.the_file.content_type == content_type
result.the_file.seek(0)
assert result.the_file.tell() == 0
assert result.the_file.read(len(text)) == text
assert result.the_file.tell() == len(text)
assert result.the_file.read(len(more_text)) == more_text
assert result.the_file.tell() == len(text + more_text)
result.the_file.delete()
# Ensure deleted file returns None
assert result.the_file.read() is None
def test_file_fields_set(self):
class SetFile(Document):
the_file = FileField()
text = b"Hello, World!"
more_text = b"Foo Bar"
SetFile.drop_collection()
setfile = SetFile()
setfile.the_file = text
setfile.save()
result = SetFile.objects.first()
assert setfile == result
assert result.the_file.read() == text
# Try replacing file with new one
result.the_file.replace(more_text)
result.save()
result = SetFile.objects.first()
assert setfile == result
assert result.the_file.read() == more_text
result.the_file.delete()
def test_file_field_no_default(self):
class GridDocument(Document):
the_file = FileField()
GridDocument.drop_collection()
with tempfile.TemporaryFile() as f:
f.write(b"Hello World!")
f.flush()
# Test without default
doc_a = GridDocument()
doc_a.save()
doc_b = GridDocument.objects.with_id(doc_a.id)
doc_b.the_file.replace(f, filename="doc_b")
doc_b.save()
assert doc_b.the_file.grid_id is not None
# Test it matches
doc_c = GridDocument.objects.with_id(doc_b.id)
assert doc_b.the_file.grid_id == doc_c.the_file.grid_id
# Test with default
doc_d = GridDocument(the_file=b"")
doc_d.save()
doc_e = GridDocument.objects.with_id(doc_d.id)
assert doc_d.the_file.grid_id == doc_e.the_file.grid_id
doc_e.the_file.replace(f, filename="doc_e")
doc_e.save()
doc_f = GridDocument.objects.with_id(doc_e.id)
assert doc_e.the_file.grid_id == doc_f.the_file.grid_id
db = GridDocument._get_db()
grid_fs = gridfs.GridFS(db)
assert ["doc_b", "doc_e"] == grid_fs.list()
def test_file_uniqueness(self):
"""Ensure that each instance of a FileField is unique"""
class TestFile(Document):
name = StringField()
the_file = FileField()
# First instance
test_file = TestFile()
test_file.name = "Hello, World!"
test_file.the_file.put(b"Hello, World!")
test_file.save()
# Second instance
test_file_dupe = TestFile()
data = test_file_dupe.the_file.read() # Should be None
assert test_file.name != test_file_dupe.name
assert test_file.the_file.read() != data
TestFile.drop_collection()
def test_file_saving(self):
"""Ensure you can add meta data to file"""
class Animal(Document):
genus = StringField()
family = StringField()
photo = FileField()
Animal.drop_collection()
marmot = Animal(genus="Marmota", family="Sciuridae")
marmot_photo_content = get_file(TEST_IMAGE_PATH) # Retrieve a photo from disk
marmot.photo.put(marmot_photo_content, content_type="image/jpeg", foo="bar")
marmot.photo.close()
marmot.save()
marmot = Animal.objects.get()
assert marmot.photo.content_type == "image/jpeg"
assert marmot.photo.foo == "bar"
def test_file_reassigning(self):
class TestFile(Document):
the_file = FileField()
TestFile.drop_collection()
test_file = TestFile(the_file=get_file(TEST_IMAGE_PATH)).save()
assert test_file.the_file.get().length == 8313
test_file = TestFile.objects.first()
test_file.the_file = get_file(TEST_IMAGE2_PATH)
test_file.save()
assert test_file.the_file.get().length == 4971
def test_file_boolean(self):
"""Ensure that a boolean test of a FileField indicates its presence"""
class TestFile(Document):
the_file = FileField()
TestFile.drop_collection()
test_file = TestFile()
assert not bool(test_file.the_file)
test_file.the_file.put(b"Hello, World!", content_type="text/plain")
test_file.save()
assert bool(test_file.the_file)
test_file = TestFile.objects.first()
assert test_file.the_file.content_type == "text/plain"
def test_file_cmp(self):
"""Test comparing against other types"""
class TestFile(Document):
the_file = FileField()
test_file = TestFile()
assert test_file.the_file not in [{"test": 1}]
def test_file_disk_space(self):
"""Test disk space usage when we delete/replace a file"""
class TestFile(Document):
the_file = FileField()
text = b"Hello, World!"
content_type = "text/plain"
testfile = TestFile()
testfile.the_file.put(text, content_type=content_type, filename="hello")
testfile.save()
# Now check fs.files and fs.chunks
db = TestFile._get_db()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 1
assert len(list(chunks)) == 1
# Deleting the document should delete the files
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 0
assert len(list(chunks)) == 0
# Test case where we don't store a file in the first place
testfile = TestFile()
testfile.save()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 0
assert len(list(chunks)) == 0
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 0
assert len(list(chunks)) == 0
# Test case where we overwrite the file
testfile = TestFile()
testfile.the_file.put(text, content_type=content_type, filename="hello")
testfile.save()
text = b"Bonjour, World!"
testfile.the_file.replace(text, content_type=content_type, filename="hello")
testfile.save()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 1
assert len(list(chunks)) == 1
testfile.delete()
files = db.fs.files.find()
chunks = db.fs.chunks.find()
assert len(list(files)) == 0
assert len(list(chunks)) == 0
@require_pil
def test_image_field(self):
class TestImage(Document):
image = ImageField()
TestImage.drop_collection()
with tempfile.TemporaryFile() as f:
f.write(b"Hello World!")
f.flush()
t = TestImage()
try:
t.image.put(f)
self.fail("Should have raised an invalidation error")
except ValidationError as e:
assert "%s" % e == "Invalid image: cannot identify image file %s" % f
t = TestImage()
t.image.put(get_file(TEST_IMAGE_PATH))
t.save()
t = TestImage.objects.first()
assert t.image.format == "PNG"
w, h = t.image.size
assert w == 371
assert h == 76
t.image.delete()
@require_pil
def test_image_field_reassigning(self):
class TestFile(Document):
the_file = ImageField()
TestFile.drop_collection()
test_file = TestFile(the_file=get_file(TEST_IMAGE_PATH)).save()
assert test_file.the_file.size == (371, 76)
test_file = TestFile.objects.first()
test_file.the_file = get_file(TEST_IMAGE2_PATH)
test_file.save()
assert test_file.the_file.size == (45, 101)
@require_pil
def test_image_field_resize(self):
class TestImage(Document):
image = ImageField(size=(185, 37, True))
TestImage.drop_collection()
t = TestImage()
t.image.put(get_file(TEST_IMAGE_PATH))
t.save()
t = TestImage.objects.first()
assert t.image.format == "PNG"
w, h = t.image.size
assert w == 185
assert h == 37
t.image.delete()
@require_pil
def test_image_field_resize_force(self):
class TestImage(Document):
image = ImageField(size=(185, 37, True))
TestImage.drop_collection()
t = TestImage()
t.image.put(get_file(TEST_IMAGE_PATH))
t.save()
t = TestImage.objects.first()
assert t.image.format == "PNG"
w, h = t.image.size
assert w == 185
assert h == 37
t.image.delete()
@require_pil
def test_image_field_thumbnail(self):
class TestImage(Document):
image = ImageField(thumbnail_size=(92, 18, True))
TestImage.drop_collection()
t = TestImage()
t.image.put(get_file(TEST_IMAGE_PATH))
t.save()
t = TestImage.objects.first()
assert t.image.thumbnail.format == "PNG"
assert t.image.thumbnail.width == 92
assert t.image.thumbnail.height == 18
t.image.delete()
def test_file_multidb(self):
register_connection("test_files", "test_files")
class TestFile(Document):
name = StringField()
the_file = FileField(db_alias="test_files", collection_name="macumba")
TestFile.drop_collection()
# delete old filesystem
get_db("test_files").macumba.files.drop()
get_db("test_files").macumba.chunks.drop()
# First instance
test_file = TestFile()
test_file.name = "Hello, World!"
test_file.the_file.put(b"Hello, World!", name="hello.txt")
test_file.save()
data = get_db("test_files").macumba.files.find_one()
assert data.get("name") == "hello.txt"
test_file = TestFile.objects.first()
assert test_file.the_file.read() == b"Hello, World!"
test_file = TestFile.objects.first()
test_file.the_file = b"Hello, World!"
test_file.save()
test_file = TestFile.objects.first()
assert test_file.the_file.read() == b"Hello, World!"
def test_copyable(self):
class PutFile(Document):
the_file = FileField()
PutFile.drop_collection()
text = b"Hello, World!"
content_type = "text/plain"
putfile = PutFile()
putfile.the_file.put(text, content_type=content_type)
putfile.save()
class TestFile(Document):
name = StringField()
assert putfile == copy.copy(putfile)
assert putfile == copy.deepcopy(putfile)
@require_pil
def test_get_image_by_grid_id(self):
class TestImage(Document):
image1 = ImageField()
image2 = ImageField()
TestImage.drop_collection()
t = TestImage()
t.image1.put(get_file(TEST_IMAGE_PATH))
t.image2.put(get_file(TEST_IMAGE2_PATH))
t.save()
test = TestImage.objects.first()
grid_id = test.image1.grid_id
assert 1 == TestImage.objects(Q(image1=grid_id) or Q(image2=grid_id)).count()
def test_complex_field_filefield(self):
"""Ensure you can add meta data to file"""
class Animal(Document):
genus = StringField()
family = StringField()
photos = ListField(FileField())
Animal.drop_collection()
marmot = Animal(genus="Marmota", family="Sciuridae")
with open(TEST_IMAGE_PATH, "rb") as marmot_photo: # Retrieve a photo from disk
photos_field = marmot._fields["photos"].field
new_proxy = photos_field.get_proxy_obj("photos", marmot)
new_proxy.put(marmot_photo, content_type="image/jpeg", foo="bar")
marmot.photos.append(new_proxy)
marmot.save()
marmot = Animal.objects.get()
assert marmot.photos[0].content_type == "image/jpeg"
assert marmot.photos[0].foo == "bar"
assert marmot.photos[0].get().length == 8313
if __name__ == "__main__":
unittest.main()
| TestFileField |
python | falconry__falcon | tests/test_request_media.py | {
"start": 1340,
"end": 1619
} | class ____:
def __init__(self, expected_error):
self._expected_error = expected_error
def on_post(self, req, resp, **kwargs):
with pytest.raises(self._expected_error) as error:
req.media
self.captured_error = error
| ResourceInvalidMedia |
python | scrapy__scrapy | tests/test_pipeline_files.py | {
"start": 1999,
"end": 9353
} | class ____:
def setup_method(self):
self.tempdir = mkdtemp()
settings_dict = {"FILES_STORE": self.tempdir}
crawler = get_crawler(DefaultSpider, settings_dict=settings_dict)
crawler.spider = crawler._create_spider()
self.pipeline = FilesPipeline.from_crawler(crawler)
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider()
def teardown_method(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
assert (
file_path(Request("https://dev.mydeco.com/mydeco.pdf"))
== "full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf"
)
assert (
file_path(
Request(
"http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt"
)
)
== "full/4ce274dd83db0368bafd7e406f382ae088e39219.txt"
)
assert (
file_path(
Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")
)
== "full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc"
)
assert (
file_path(
Request(
"http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg"
)
)
== "full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532/"))
== "full/97ee6f8a46cbbb418ea91502fd24176865cf39b2"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532"))
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1"
)
assert (
file_path(
Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object(),
)
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1"
)
assert (
file_path(
Request(
"http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg.bohaha"
)
)
== "full/76c00cef2ef669ae65052661f68d451162829507"
)
assert (
file_path(
Request(
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAR0AAACxCAMAAADOHZloAAACClBMVEX/\
//+F0tzCwMK76ZKQ21AMqr7oAAC96JvD5aWM2kvZ78J0N7fmAAC46Y4Ap7y"
)
)
== "full/178059cbeba2e34120a67f2dc1afc3ecc09b61cb.png"
)
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
assert self.pipeline.store.basedir == self.tempdir
path = "some/image/key.jpg"
fullpath = Path(self.tempdir, "some", "image", "key.jpg")
assert self.pipeline.store._get_filesystem_path(path) == fullpath
@inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={"checksum": "abc", "last_modified": time.time()},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url)],
),
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item)
assert result["files"][0]["checksum"] == "abc"
assert result["files"][0]["status"] == "uptodate"
for p in patchers:
p.stop()
@inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={
"checksum": "abc",
"last_modified": time.time()
- (self.pipeline.expires * 60 * 60 * 24 * 2),
},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url)],
),
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item)
assert result["files"][0]["checksum"] != "abc"
assert result["files"][0]["status"] == "downloaded"
for p in patchers:
p.stop()
@inlineCallbacks
def test_file_cached(self):
item_url = "http://example.com/file3.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={
"checksum": "abc",
"last_modified": time.time()
- (self.pipeline.expires * 60 * 60 * 24 * 2),
},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url, flags=["cached"])],
),
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item)
assert result["files"][0]["checksum"] != "abc"
assert result["files"][0]["status"] == "cached"
for p in patchers:
p.stop()
def test_file_path_from_item(self):
"""
Custom file path based on item data, overriding default implementation
"""
class CustomFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, item=None):
return f"full/{item.get('path')}"
file_path = CustomFilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": self.tempdir})
).file_path
item = {"path": "path-to-store-file"}
request = Request("http://example.com")
assert file_path(request, item=item) == "full/path-to-store-file"
@pytest.mark.parametrize(
"bad_type",
[
"http://example.com/file.pdf",
("http://example.com/file.pdf",),
{"url": "http://example.com/file.pdf"},
123,
None,
],
)
def test_rejects_non_list_file_urls(self, tmp_path, bad_type):
pipeline = FilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": str(tmp_path)})
)
item = ItemWithFiles()
item["file_urls"] = bad_type
with pytest.raises(TypeError, match="file_urls must be a list of URLs"):
list(pipeline.get_media_requests(item, None))
| TestFilesPipeline |
python | huggingface__transformers | src/transformers/models/pop2piano/configuration_pop2piano.py | {
"start": 786,
"end": 6005
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used
to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 2400):
Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens
that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`].
composer_vocab_size (`int`, *optional*, defaults to 21):
Denotes the number of composers.
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `Pop2PianoBlock`.
num_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
dense_act_fn (`string`, *optional*, defaults to `"relu"`):
Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`.
"""
model_type = "pop2piano"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=2400,
composer_vocab_size=21,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="gated-gelu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
dense_act_fn="relu",
**kwargs,
):
self.vocab_size = vocab_size
self.composer_vocab_size = composer_vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
self.dense_act_fn = dense_act_fn
self.is_gated_act = self.feed_forward_proj.split("-")[0] == "gated"
self.hidden_size = self.d_model
self.num_attention_heads = num_heads
self.num_hidden_layers = num_layers
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.tie_encoder_decoder = True # forcing it
__all__ = ["Pop2PianoConfig"]
| Pop2PianoConfig |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 173260,
"end": 174474
} | class ____(CodeCacheFuture):
"""
A statically launchable CachingAutotuner, loaded from TritonBundler
"""
def __init__(self, static_autotuner: CachingAutotuner) -> None:
# Pickled version of CachingAutotuner
self.static_autotuner = static_autotuner
# This needs to be set in AsyncCompile.triton, in case
# we need to reload the CachingAutotuner from its source code
# We don't store the source code on the CachingAutotuner itself
# since it can be very large.
self.reload_kernel_from_src: Callable[[], Any] | None = None
def result(self) -> CachingAutotuner:
assert self.reload_kernel_from_src is not None
with dynamo_timed("StaticAutotunerFuture.warm_precompile"):
self.static_autotuner.recheck_autotune_cache(
reload_kernel_from_src=self.reload_kernel_from_src
)
self.static_autotuner.precompile( # type: ignore[union-attr]
warm_cache_only=False,
reload_kernel=self.reload_kernel_from_src,
static_triton_bundle_key=None, # no need to save again
)
return self.static_autotuner
| StaticAutotunerFuture |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_registration.py | {
"start": 1243,
"end": 6066
} | class ____:
"""A wrapper of onnx-script function with additional metadata.
onnx_function: The onnx-script function from torchlib.
fx_target: The PyTorch node callable target.
signature: The ONNX signature of the function. When None, the signature is inferred.
is_custom: Whether the function is a custom function.
is_complex: Whether the function is a function that handles complex valued inputs.
opset_introduced:
The ONNX opset version in which the function was introduced.
Its specifies the minimum ONNX opset version required to use the function.
device: The device the function is registered to. If None, it is registered to all devices.
skip_signature_inference: Whether to skip signature inference for the function.
"""
onnx_function: Callable
fx_target: TorchOp
signature: _schemas.OpSignature | None
is_custom: bool = False
is_complex: bool = False
opset_introduced: int = 18
device: Literal["cuda", "cpu"] | str | None = None # noqa: PYI051
skip_signature_inference: bool = False
def __post_init__(self) -> None:
if self.signature is None and not self.skip_signature_inference:
try:
if isinstance(self.onnx_function, onnxscript.OnnxFunction):
signature = _schemas.OpSignature.from_function( # type: ignore[attr-defined]
self.onnx_function,
# pyrefly: ignore [missing-attribute]
self.onnx_function.function_ir.domain,
# pyrefly: ignore [missing-attribute]
self.onnx_function.name,
# pyrefly: ignore [missing-attribute]
opset_version=self.onnx_function.opset.version,
)
else:
signature = _schemas.OpSignature.from_function(
self.onnx_function, "__traced", self.onnx_function.__name__
)
except Exception as e:
# Log an warning if the op is custom. Raise exception for builtin ops.
if not self.is_custom:
raise
else:
# When the function is targeting an HOP, for example, it will accept
# functions as arguments and fail to generate an ONNX signature.
# In this case we set signature to None and dispatch to this function always.
logger.warning( # noqa: G200
"Failed to infer the signature for function '%s' because '%s'"
"All nodes targeting `%s` will be dispatched to this function",
self.onnx_function,
e,
self.fx_target,
)
else:
self.signature = signature
self.onnx_function._pt_onnx_signature = signature # type: ignore[attr-defined]
def _get_overload(qualified_name: str) -> torch._ops.OpOverload | None:
"""Obtain the torch op from <namespace>::<op_name>[.<overload>]"""
# TODO(justinchuby): Handle arbitrary custom ops
namespace, opname_overload = qualified_name.split("::")
op_name, *maybe_overload = opname_overload.split(".", 1)
if namespace == "_operator":
# Builtin functions
return getattr(operator, op_name)
if namespace == "math":
return getattr(math, op_name)
if namespace == "torchvision":
if importlib.util.find_spec("torchvision") is None:
logger.warning("torchvision is not installed. Skipping %s", qualified_name)
return None
try:
op_packet = getattr(getattr(torch.ops, namespace), op_name)
if maybe_overload:
overload = maybe_overload[0]
elif "default" in op_packet._overload_names or "" in op_packet._overload_names:
# Has a default overload
overload = "default"
else:
logger.warning(
"'%s' does not have a 'default' overload. This could be an error in specifying the op name. Ignoring.",
qualified_name,
stacklevel=1,
)
return None
return getattr(op_packet, overload) # type: ignore[call-overload]
except AttributeError:
if qualified_name.endswith("getitem"):
# This is a special case where we registered the function incorrectly,
# but for BC reasons (pt<=2.4) we need to keep it.
return None
logger.info("'%s' is not found in this version of PyTorch.", qualified_name)
return None
except Exception:
logger.exception("Failed to find torch op '%s'", qualified_name)
return None
| OnnxDecompMeta |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/generic_utils.py | {
"start": 5175,
"end": 5497
} | class ____(object):
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared objects
(e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
| NoopLoadingScope |
python | bokeh__bokeh | tests/unit/bokeh/models/test_plots.py | {
"start": 12017,
"end": 17639
} | class ____(BaseTwinAxis):
"""Test whether extra x and y ranges can be Range1d"""
@staticmethod
def get_range_instance():
return Range1d(0, 42)
def test_plot_with_no_title_specified_creates_an_empty_title() -> None:
plot = Plot()
assert plot.title.text == ""
def test_plot_if_title_is_converted_from_string_to_Title() -> None:
plot = Plot()
plot.title = "A title"
plot.title.text_color = "olive"
assert isinstance(plot.title, Title)
assert plot.title.text == "A title"
assert plot.title.text_color == "olive"
def test__check_required_scale_has_scales() -> None:
plot = Plot()
check = plot._check_required_scale()
assert check == []
def test__check_required_scale_missing_scales() -> None:
with pytest.raises(ValueError):
Plot(x_scale=None, y_scale=None)
def test__check_compatible_scale_and_ranges_compat_numeric() -> None:
plot = Plot(x_scale=LinearScale(), x_range=Range1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
plot = Plot(y_scale=LogScale(), y_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_compat_factor() -> None:
plot = Plot(x_scale=CategoricalScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_incompat_numeric_scale_and_factor_range() -> None:
plot = Plot(x_scale=LinearScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check != []
def test__check_compatible_scale_and_ranges_incompat_factor_scale_and_numeric_range() -> None:
plot = Plot(x_scale=CategoricalScale(), x_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check != []
@pytest.mark.parametrize("test_input, provider", [
("OpenStreetMap Mapnik", xyz.OpenStreetMap.Mapnik),
("OSM", xyz.OpenStreetMap.Mapnik),
("CARTODBPOSITRON", xyz.CartoDB.Positron),
("CARTODBPOSITRON_RETINA", xyz.CartoDB.Positron),
("STAMEN_TERRAIN", xyz.Stadia.StamenTerrain),
("STAMEN_TERRAIN_RETINA", xyz.Stadia.StamenTerrain),
("STAMEN_TONER", xyz.Stadia.StamenToner),
("STAMEN_TONER_BACKGROUND", xyz.Stadia.StamenTonerBackground),
("STAMEN_TONER_LABELS", xyz.Stadia.StamenTonerLabels),
("ESRI_IMAGERY", xyz.Esri.WorldImagery),
(xyz.Stadia.StamenTerrain, xyz.Stadia.StamenTerrain),
])
def test_add_tile(test_input, provider):
plot = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
plot.add_tile(test_input)
tile_source = plot.renderers[0].tile_source
sf = "@2x" if "RETINA" in test_input else None
assert tile_source.url == provider.build_url(scale_factor=sf)
assert tile_source.attribution == provider.html_attribution
if hasattr(provider, "max_zoom"):
assert tile_source.max_zoom == provider.max_zoom
# test retina keyword
if "RETINA" not in test_input and "{r}" in provider.url:
plot2 = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
plot2.add_tile(test_input, retina=True)
tile_source2 = plot2.renderers[0].tile_source
assert tile_source2.url == provider.build_url(scale_factor="@2x")
def test_add_tile_tilesource():
mapnik = xyz.OpenStreetMap.Mapnik
tilesource = WMTSTileSource(
url=mapnik.build_url(),
attribution=mapnik.html_attribution,
min_zoom=mapnik.get("min_zoom", 0),
max_zoom=mapnik.get("max_zoom", 30),
)
plot = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
plot.add_tile(tilesource)
tile_source = plot.renderers[0].tile_source
assert tile_source.url == mapnik.build_url()
assert tile_source.attribution == mapnik.html_attribution
def test_Plot_add_tools() -> None:
plot = Plot()
assert len(plot.tools) == 0
pan = PanTool()
plot.add_tools(pan)
assert plot.tools == [pan]
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
plot.add_tools("reset", zoom_in, zoom_out)
assert plot.tools[0] == pan
assert isinstance(plot.tools[1], ResetTool)
assert plot.tools[2:] == [zoom_in, zoom_out]
with pytest.raises(ValueError):
plot.add_tools("foobar")
with pytest.raises(ValueError):
plot.add_tools(0)
def test_remove_tools_single():
pan = PanTool()
reset = ResetTool()
plot = Plot(tools=[pan, reset])
plot.remove_tools(pan)
assert len(plot.tools) == 1
assert plot.tools[0] == reset
def test_remove_tools_multiple():
pan = PanTool()
reset = ResetTool()
plot = Plot(tools=[pan, reset])
plot.remove_tools(pan, reset)
assert len(plot.tools) == 0
def test_remove_tools_invalid():
pan = PanTool()
reset = ResetTool()
plot = Plot(tools=[pan, reset])
zoom_in = ZoomInTool()
with pytest.raises(ValueError) as e:
plot.remove_tools(zoom_in)
assert str(e.value).startswith("ValueError: Invalid tool ZoomInTool")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| TestLinearTwinAxis |
python | vyperlang__vyper | vyper/warnings.py | {
"start": 1579,
"end": 1670
} | class ____(VyperWarning):
"""
General deprecation warning
"""
pass
| Deprecation |
python | encode__django-rest-framework | tests/test_relations_slug.py | {
"start": 7258,
"end": 11817
} | class ____(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
for idx in range(1, 4):
if idx == 3:
target = None
source = NullableForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve_with_null(self):
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 'target-1'},
{'id': 2, 'name': 'source-2', 'target': 'target-1'},
{'id': 3, 'name': 'source-3', 'target': None},
]
assert serializer.data == expected
def test_foreign_key_create_with_valid_null(self):
data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert serializer.data == data
assert obj.name == 'source-4'
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 'target-1'},
{'id': 2, 'name': 'source-2', 'target': 'target-1'},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
assert serializer.data == expected
def test_foreign_key_create_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 4, 'name': 'source-4', 'target': ''}
expected_data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert serializer.data == expected_data
assert obj.name == 'source-4'
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 'target-1'},
{'id': 2, 'name': 'source-2', 'target': 'target-1'},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
assert serializer.data == expected
def test_foreign_key_update_with_valid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
assert serializer.is_valid()
serializer.save()
assert serializer.data == data
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 'target-1'},
{'id': 3, 'name': 'source-3', 'target': None}
]
assert serializer.data == expected
def test_foreign_key_update_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 1, 'name': 'source-1', 'target': ''}
expected_data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
assert serializer.is_valid()
serializer.save()
assert serializer.data == expected_data
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 'target-1'},
{'id': 3, 'name': 'source-3', 'target': None}
]
assert serializer.data == expected
| SlugNullableForeignKeyTests |
python | boto__boto3 | boto3/docs/subresource.py | {
"start": 937,
"end": 5766
} | class ____(NestedDocumenter):
def document_sub_resources(self, section):
add_resource_type_overview(
section=section,
resource_type='Sub-resources',
description=(
'Sub-resources are methods that create a new instance of a'
' child resource. This resource\'s identifiers get passed'
' along to the child.'
),
intro_link='subresources_intro',
)
sub_resources = sorted(
self._resource.meta.resource_model.subresources,
key=lambda sub_resource: sub_resource.name,
)
sub_resources_list = []
self.member_map['sub-resources'] = sub_resources_list
for sub_resource in sub_resources:
sub_resources_list.append(sub_resource.name)
# Create a new DocumentStructure for each sub_resource and add contents.
sub_resource_doc = DocumentStructure(
sub_resource.name, target='html'
)
breadcrumb_section = sub_resource_doc.add_new_section('breadcrumb')
breadcrumb_section.style.ref(self._resource_class_name, 'index')
breadcrumb_section.write(f' / Sub-Resource / {sub_resource.name}')
sub_resource_doc.add_title_section(sub_resource.name)
sub_resource_section = sub_resource_doc.add_new_section(
sub_resource.name,
context={'qualifier': f'{self.class_name}.'},
)
document_sub_resource(
section=sub_resource_section,
resource_name=self._resource_name,
sub_resource_model=sub_resource,
service_model=self._service_model,
)
# Write sub_resources in individual/nested files.
# Path: <root>/reference/services/<service>/<resource_name>/<sub_resource_name>.rst
sub_resources_dir_path = os.path.join(
self._root_docs_path,
f'{self._service_name}',
f'{self._resource_sub_path}',
)
sub_resource_doc.write_to_file(
sub_resources_dir_path, sub_resource.name
)
def document_sub_resource(
section,
resource_name,
sub_resource_model,
service_model,
include_signature=True,
):
"""Documents a resource action
:param section: The section to write to
:param resource_name: The name of the resource
:param sub_resource_model: The model of the subresource
:param service_model: The model of the service
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
identifiers_needed = []
for identifier in sub_resource_model.resource.identifiers:
if identifier.source == 'input':
identifiers_needed.append(xform_name(identifier.target))
if include_signature:
signature_args = get_identifier_args_for_signature(identifiers_needed)
full_sub_resource_name = (
f"{section.context.get('qualifier', '')}{sub_resource_model.name}"
)
section.style.start_sphinx_py_method(
full_sub_resource_name, signature_args
)
method_intro_section = section.add_new_section('method-intro')
description = f'Creates a {sub_resource_model.resource.type} resource.'
method_intro_section.include_doc_string(description)
example_section = section.add_new_section('example')
example_values = get_identifier_values_for_example(identifiers_needed)
example_resource_name = xform_name(resource_name)
if service_model.service_name == resource_name:
example_resource_name = resource_name
example = f'{xform_name(sub_resource_model.resource.type)} = {example_resource_name}.{sub_resource_model.name}({example_values})'
example_section.style.start_codeblock()
example_section.write(example)
example_section.style.end_codeblock()
param_section = section.add_new_section('params')
for identifier in identifiers_needed:
description = get_identifier_description(
sub_resource_model.name, identifier
)
param_section.write(f':type {identifier}: string')
param_section.style.new_line()
param_section.write(f':param {identifier}: {description}')
param_section.style.new_line()
return_section = section.add_new_section('return')
return_section.style.new_line()
return_section.write(
f':rtype: :py:class:`{get_service_module_name(service_model)}.{sub_resource_model.resource.type}`'
)
return_section.style.new_line()
return_section.write(
f':returns: A {sub_resource_model.resource.type} resource'
)
return_section.style.new_line()
| SubResourceDocumenter |
python | ray-project__ray | rllib/evaluation/sample_batch_builder.py | {
"start": 2215,
"end": 10039
} | class ____:
"""Util to build SampleBatches for each policy in a multi-agent env.
Input data is per-agent, while output data is per-policy. There is an M:N
mapping between agents and policies. We retain one local batch builder
per agent. When an agent is done, then its local batch is appended into the
corresponding policy batch for the agent's policy.
"""
def __init__(
self,
policy_map: Dict[PolicyID, Policy],
clip_rewards: bool,
callbacks: "RLlibCallback",
):
"""Initialize a MultiAgentSampleBatchBuilder.
Args:
policy_map (Dict[str,Policy]): Maps policy ids to policy instances.
clip_rewards (Union[bool,float]): Whether to clip rewards before
postprocessing (at +/-1.0) or the actual value to +/- clip.
callbacks: RLlib callbacks.
"""
if log_once("MultiAgentSampleBatchBuilder"):
deprecation_warning(old="MultiAgentSampleBatchBuilder", error=False)
self.policy_map = policy_map
self.clip_rewards = clip_rewards
# Build the Policies' SampleBatchBuilders.
self.policy_builders = {k: SampleBatchBuilder() for k in policy_map.keys()}
# Whenever we observe a new agent, add a new SampleBatchBuilder for
# this agent.
self.agent_builders = {}
# Internal agent-to-policy map.
self.agent_to_policy = {}
self.callbacks = callbacks
# Number of "inference" steps taken in the environment.
# Regardless of the number of agents involved in each of these steps.
self.count = 0
def total(self) -> int:
"""Returns the total number of steps taken in the env (all agents).
Returns:
int: The number of steps taken in total in the environment over all
agents.
"""
return sum(a.count for a in self.agent_builders.values())
def has_pending_agent_data(self) -> bool:
"""Returns whether there is pending unprocessed data.
Returns:
bool: True if there is at least one per-agent builder (with data
in it).
"""
return len(self.agent_builders) > 0
def add_values(self, agent_id: AgentID, policy_id: AgentID, **values: Any) -> None:
"""Add the given dictionary (row) of values to this batch.
Args:
agent_id: Unique id for the agent we are adding values for.
policy_id: Unique id for policy controlling the agent.
values: Row of values to add for this agent.
"""
if agent_id not in self.agent_builders:
self.agent_builders[agent_id] = SampleBatchBuilder()
self.agent_to_policy[agent_id] = policy_id
# Include the current agent id for multi-agent algorithms.
if agent_id != _DUMMY_AGENT_ID:
values["agent_id"] = agent_id
self.agent_builders[agent_id].add_values(**values)
def postprocess_batch_so_far(self, episode=None) -> None:
"""Apply policy postprocessors to any unprocessed rows.
This pushes the postprocessed per-agent batches onto the per-policy
builders, clearing per-agent state.
Args:
episode (Optional[Episode]): The Episode object that
holds this MultiAgentBatchBuilder object.
"""
# Materialize the batches so far.
pre_batches = {}
for agent_id, builder in self.agent_builders.items():
pre_batches[agent_id] = (
self.policy_map[self.agent_to_policy[agent_id]],
builder.build_and_reset(),
)
# Apply postprocessor.
post_batches = {}
if self.clip_rewards is True:
for _, (_, pre_batch) in pre_batches.items():
pre_batch["rewards"] = np.sign(pre_batch["rewards"])
elif self.clip_rewards:
for _, (_, pre_batch) in pre_batches.items():
pre_batch["rewards"] = np.clip(
pre_batch["rewards"],
a_min=-self.clip_rewards,
a_max=self.clip_rewards,
)
for agent_id, (_, pre_batch) in pre_batches.items():
other_batches = pre_batches.copy()
del other_batches[agent_id]
policy = self.policy_map[self.agent_to_policy[agent_id]]
if (
not pre_batch.is_single_trajectory()
or len(set(pre_batch[SampleBatch.EPS_ID])) > 1
):
raise ValueError(
"Batches sent to postprocessing must only contain steps "
"from a single trajectory.",
pre_batch,
)
# Call the Policy's Exploration's postprocess method.
post_batches[agent_id] = pre_batch
if getattr(policy, "exploration", None) is not None:
policy.exploration.postprocess_trajectory(
policy, post_batches[agent_id], policy.get_session()
)
post_batches[agent_id] = policy.postprocess_trajectory(
post_batches[agent_id], other_batches, episode
)
if log_once("after_post"):
logger.info(
"Trajectory fragment after postprocess_trajectory():\n\n{}\n".format(
summarize(post_batches)
)
)
# Append into policy batches and reset
from ray.rllib.evaluation.rollout_worker import get_global_worker
for agent_id, post_batch in sorted(post_batches.items()):
self.callbacks.on_postprocess_trajectory(
worker=get_global_worker(),
episode=episode,
agent_id=agent_id,
policy_id=self.agent_to_policy[agent_id],
policies=self.policy_map,
postprocessed_batch=post_batch,
original_batches=pre_batches,
)
self.policy_builders[self.agent_to_policy[agent_id]].add_batch(post_batch)
self.agent_builders.clear()
self.agent_to_policy.clear()
def check_missing_dones(self) -> None:
for agent_id, builder in self.agent_builders.items():
if not builder.buffers.is_terminated_or_truncated():
raise ValueError(
"The environment terminated for all agents, but we still "
"don't have a last observation for "
"agent {} (policy {}). ".format(
agent_id, self.agent_to_policy[agent_id]
)
+ "Please ensure that you include the last observations "
"of all live agents when setting '__all__' terminated|truncated "
"to True. "
)
def build_and_reset(self, episode=None) -> MultiAgentBatch:
"""Returns the accumulated sample batches for each policy.
Any unprocessed rows will be first postprocessed with a policy
postprocessor. The internal state of this builder will be reset.
Args:
episode (Optional[Episode]): The Episode object that
holds this MultiAgentBatchBuilder object or None.
Returns:
MultiAgentBatch: Returns the accumulated sample batches for each
policy.
"""
self.postprocess_batch_so_far(episode)
policy_batches = {}
for policy_id, builder in self.policy_builders.items():
if builder.count > 0:
policy_batches[policy_id] = builder.build_and_reset()
old_count = self.count
self.count = 0
return MultiAgentBatch.wrap_as_needed(policy_batches, old_count)
| MultiAgentSampleBatchBuilder |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 114664,
"end": 128554
} | class ____:
"""Convert a function that uses callbacks to an iterator.
Let *func* be a function that takes a `callback` keyword argument.
For example:
>>> def func(callback=None):
... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
... if callback:
... callback(i, c)
... return 4
Use ``with callback_iter(func)`` to get an iterator over the parameters
that are delivered to the callback.
>>> with callback_iter(func) as it:
... for args, kwargs in it:
... print(args)
(1, 'a')
(2, 'b')
(3, 'c')
The function will be called in a background thread. The ``done`` property
indicates whether it has completed execution.
>>> it.done
True
If it completes successfully, its return value will be available
in the ``result`` property.
>>> it.result
4
Notes:
* If the function uses some keyword argument besides ``callback``, supply
*callback_kwd*.
* If it finished executing, but raised an exception, accessing the
``result`` property will raise the same exception.
* If it hasn't finished executing, accessing the ``result``
property from within the ``with`` block will raise ``RuntimeError``.
* If it hasn't finished executing, accessing the ``result`` property from
outside the ``with`` block will raise a
``more_itertools.AbortThread`` exception.
* Provide *wait_seconds* to adjust how frequently the it is polled for
output.
"""
def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
self._func = func
self._callback_kwd = callback_kwd
self._aborted = False
self._future = None
self._wait_seconds = wait_seconds
# Lazily import concurrent.future
self._executor = __import__(
'concurrent.futures'
).futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._aborted = True
self._executor.shutdown()
def __iter__(self):
return self
def __next__(self):
return next(self._iterator)
@property
def done(self):
if self._future is None:
return False
return self._future.done()
@property
def result(self):
if not self.done:
raise RuntimeError('Function has not yet completed')
return self._future.result()
def _reader(self):
q = Queue()
def callback(*args, **kwargs):
if self._aborted:
raise AbortThread('canceled by user')
q.put((args, kwargs))
self._future = self._executor.submit(
self._func, **{self._callback_kwd: callback}
)
while True:
try:
item = q.get(timeout=self._wait_seconds)
except Empty:
pass
else:
q.task_done()
yield item
if self._future.done():
break
remaining = []
while True:
try:
item = q.get_nowait()
except Empty:
break
else:
q.task_done()
remaining.append(item)
q.join()
yield from remaining
def windowed_complete(iterable, n):
"""
Yield ``(beginning, middle, end)`` tuples, where:
* Each ``middle`` has *n* items from *iterable*
* Each ``beginning`` has the items before the ones in ``middle``
* Each ``end`` has the items after the ones in ``middle``
>>> iterable = range(7)
>>> n = 3
>>> for beginning, middle, end in windowed_complete(iterable, n):
... print(beginning, middle, end)
() (0, 1, 2) (3, 4, 5, 6)
(0,) (1, 2, 3) (4, 5, 6)
(0, 1) (2, 3, 4) (5, 6)
(0, 1, 2) (3, 4, 5) (6,)
(0, 1, 2, 3) (4, 5, 6) ()
Note that *n* must be at least 0 and most equal to the length of
*iterable*.
This function will exhaust the iterable and may require significant
storage.
"""
if n < 0:
raise ValueError('n must be >= 0')
seq = tuple(iterable)
size = len(seq)
if n > size:
raise ValueError('n must be <= len(seq)')
for i in range(size - n + 1):
beginning = seq[:i]
middle = seq[i : i + n]
end = seq[i + n :]
yield beginning, middle, end
def all_unique(iterable, key=None):
"""
Returns ``True`` if all the elements of *iterable* are unique (no two
elements are equal).
>>> all_unique('ABCB')
False
If a *key* function is specified, it will be used to make comparisons.
>>> all_unique('ABCb')
True
>>> all_unique('ABCb', str.lower)
False
The function returns as soon as the first non-unique element is
encountered. Iterables with a mix of hashable and unhashable items can
be used, but the function will be slower for unhashable items.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
for element in map(key, iterable) if key else iterable:
try:
if element in seenset:
return False
seenset_add(element)
except TypeError:
if element in seenlist:
return False
seenlist_add(element)
return True
def nth_product(index, *args):
"""Equivalent to ``list(product(*args))[index]``.
The products of *args* can be ordered lexicographically.
:func:`nth_product` computes the product at sort position *index* without
computing the previous products.
>>> nth_product(8, range(2), range(2), range(2), range(2))
(1, 0, 0, 0)
``IndexError`` will be raised if the given *index* is invalid.
"""
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = []
for pool, n in zip(pools, ns):
result.append(pool[index % n])
index //= n
return tuple(reversed(result))
def nth_permutation(iterable, r, index):
"""Equivalent to ``list(permutations(iterable, r))[index]```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`nth_permutation`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences.
>>> nth_permutation('ghijk', 2, 5)
('h', 'i')
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = list(iterable)
n = len(pool)
if r is None or r == n:
r, c = n, factorial(n)
elif not 0 <= r < n:
raise ValueError
else:
c = perm(n, r)
assert c > 0 # factortial(n)>0, and r<n so perm(n,r) is never zero
if index < 0:
index += c
if not 0 <= index < c:
raise IndexError
result = [0] * r
q = index * factorial(n) // c if r < n else index
for d in range(1, n + 1):
q, i = divmod(q, d)
if 0 <= n - d < r:
result[n - d] = i
if q == 0:
break
return tuple(map(pool.pop, result))
def nth_combination_with_replacement(iterable, r, index):
"""Equivalent to
``list(combinations_with_replacement(iterable, r))[index]``.
The subsequences with repetition of *iterable* that are of length *r* can
be ordered lexicographically. :func:`nth_combination_with_replacement`
computes the subsequence at sort position *index* directly, without
computing the previous subsequences with replacement.
>>> nth_combination_with_replacement(range(5), 3, 5)
(0, 1, 1)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = comb(n + r - 1, r)
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
i = 0
while r:
r -= 1
while n >= 0:
num_combs = comb(n + r - 1, r)
if index < num_combs:
break
n -= 1
i += 1
index -= num_combs
result.append(pool[i])
return tuple(result)
def value_chain(*args):
"""Yield all arguments passed to the function in the same order in which
they were passed. If an argument itself is iterable then iterate over its
values.
>>> list(value_chain(1, 2, 3, [4, 5, 6]))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and are emitted
as-is:
>>> list(value_chain('12', '34', ['56', '78']))
['12', '34', '56', '78']
Pre- or postpend a single element to an iterable:
>>> list(value_chain(1, [2, 3, 4, 5, 6]))
[1, 2, 3, 4, 5, 6]
>>> list(value_chain([1, 2, 3, 4, 5], 6))
[1, 2, 3, 4, 5, 6]
Multiple levels of nesting are not flattened.
"""
for value in args:
if isinstance(value, (str, bytes)):
yield value
continue
try:
yield from value
except TypeError:
yield value
def product_index(element, *args):
"""Equivalent to ``list(product(*args)).index(element)``
The products of *args* can be ordered lexicographically.
:func:`product_index` computes the first index of *element* without
computing the previous products.
>>> product_index([8, 2], range(10), range(5))
42
``ValueError`` will be raised if the given *element* isn't in the product
of *args*.
"""
index = 0
for x, pool in zip_longest(element, args, fillvalue=_marker):
if x is _marker or pool is _marker:
raise ValueError('element is not a product of args')
pool = tuple(pool)
index = index * len(pool) + pool.index(x)
return index
def combination_index(element, iterable):
"""Equivalent to ``list(combinations(iterable, r)).index(element)``
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`combination_index` computes the index of the
first *element*, without computing the previous combinations.
>>> combination_index('adf', 'abcdefg')
10
``ValueError`` will be raised if the given *element* isn't one of the
combinations of *iterable*.
"""
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = enumerate(iterable)
for n, x in pool:
if x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
else:
raise ValueError('element is not a combination of iterable')
n, _ = last(pool, default=(n, None))
# Python versions below 3.8 don't have math.comb
index = 1
for i, j in enumerate(reversed(indexes), start=1):
j = n - j
if i <= j:
index += comb(j, i)
return comb(n + 1, k + 1) - index
def combination_with_replacement_index(element, iterable):
"""Equivalent to
``list(combinations_with_replacement(iterable, r)).index(element)``
The subsequences with repetition of *iterable* that are of length *r* can
be ordered lexicographically. :func:`combination_with_replacement_index`
computes the index of the first *element*, without computing the previous
combinations with replacement.
>>> combination_with_replacement_index('adf', 'abcdefg')
20
``ValueError`` will be raised if the given *element* isn't one of the
combinations with replacement of *iterable*.
"""
element = tuple(element)
l = len(element)
element = enumerate(element)
k, y = next(element, (None, None))
if k is None:
return 0
indexes = []
pool = tuple(iterable)
for n, x in enumerate(pool):
while x == y:
indexes.append(n)
tmp, y = next(element, (None, None))
if tmp is None:
break
else:
k = tmp
if y is None:
break
else:
raise ValueError(
'element is not a combination with replacement of iterable'
)
n = len(pool)
occupations = [0] * n
for p in indexes:
occupations[p] += 1
index = 0
cumulative_sum = 0
for k in range(1, n):
cumulative_sum += occupations[k - 1]
j = l + n - 1 - k - cumulative_sum
i = n - k
if i <= j:
index += comb(j, i)
return index
def permutation_index(element, iterable):
"""Equivalent to ``list(permutations(iterable, r)).index(element)```
The subsequences of *iterable* that are of length *r* where order is
important can be ordered lexicographically. :func:`permutation_index`
computes the index of the first *element* directly, without computing
the previous permutations.
>>> permutation_index([1, 3, 2], range(5))
19
``ValueError`` will be raised if the given *element* isn't one of the
permutations of *iterable*.
"""
index = 0
pool = list(iterable)
for i, x in zip(range(len(pool), -1, -1), element):
r = pool.index(x)
index = index * i + r
del pool[r]
return index
| callback_iter |
python | django__django | django/tasks/base.py | {
"start": 801,
"end": 1367
} | class ____(TextChoices):
# The Task has just been enqueued, or is ready to be executed again.
READY = ("READY", pgettext_lazy("Task", "Ready"))
# The Task is currently running.
RUNNING = ("RUNNING", pgettext_lazy("Task", "Running"))
# The Task raised an exception during execution, or was unable to start.
FAILED = ("FAILED", pgettext_lazy("Task", "Failed"))
# The Task has finished running successfully.
SUCCESSFUL = ("SUCCESSFUL", pgettext_lazy("Task", "Successful"))
@dataclass(frozen=True, slots=True, kw_only=True)
| TaskResultStatus |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 9712,
"end": 9901
} | class ____(AlertBase):
def __init__(self, proto: AlertProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "success"
@dataclass(repr=False)
| Success |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 111327,
"end": 113710
} | class ____(ClauseList, ColumnElement[TupleAny]):
"""Represent a SQL tuple."""
__visit_name__ = "tuple"
_traverse_internals: _TraverseInternalsType = (
ClauseList._traverse_internals + []
)
type: TupleType
@util.preload_module("sqlalchemy.sql.sqltypes")
def __init__(
self,
*clauses: _ColumnExpressionArgument[Any],
types: Optional[Sequence[_TypeEngineArgument[Any]]] = None,
):
sqltypes = util.preloaded.sql_sqltypes
if types is None:
init_clauses: List[ColumnElement[Any]] = [
coercions.expect(roles.ExpressionElementRole, c)
for c in clauses
]
else:
if len(types) != len(clauses):
raise exc.ArgumentError(
"Wrong number of elements for %d-tuple: %r "
% (len(types), clauses)
)
init_clauses = [
coercions.expect(
roles.ExpressionElementRole,
c,
type_=typ if not typ._isnull else None,
)
for typ, c in zip(types, clauses)
]
self.type = sqltypes.TupleType(*[arg.type for arg in init_clauses])
super().__init__(*init_clauses)
@property
def _select_iterable(self) -> _SelectIterable:
return (self,)
def _bind_param(self, operator, obj, type_=None, expanding=False):
if expanding:
return BindParameter(
None,
value=obj,
_compared_to_operator=operator,
unique=True,
expanding=True,
type_=type_,
_compared_to_type=self.type,
)
else:
return Tuple(
*[
BindParameter(
None,
o,
_compared_to_operator=operator,
_compared_to_type=compared_to_type,
unique=True,
type_=type_,
)
for o, compared_to_type in zip(obj, self.type.types)
]
)
def self_group(self, against: Optional[OperatorType] = None) -> Self:
# Tuple is parenthesized by definition.
return self
| Tuple |
python | cython__cython | Cython/Compiler/TreeFragment.py | {
"start": 3824,
"end": 7548
} | class ____(VisitorTransform):
"""
Makes a copy of a template tree while doing substitutions.
A dictionary "substitutions" should be passed in when calling
the transform; mapping names to replacement nodes. Then replacement
happens like this:
- If an ExprStatNode contains a single NameNode, whose name is
a key in the substitutions dictionary, the ExprStatNode is
replaced with a copy of the tree given in the dictionary.
It is the responsibility of the caller that the replacement
node is a valid statement.
- If a single NameNode is otherwise encountered, it is replaced
if its name is listed in the substitutions dictionary in the
same way. It is the responsibility of the caller to make sure
that the replacement nodes is a valid expression.
Also a list "temps" should be passed. Any names listed will
be transformed into anonymous, temporary names.
Currently supported for tempnames is:
NameNode
(various function and class definition nodes etc. should be added to this)
Each replacement node gets the position of the substituted node
recursively applied to every member node.
"""
temp_name_counter = 0
def __call__(self, node, substitutions, temps, pos):
self.substitutions = substitutions
self.pos = pos
tempmap = {}
temphandles = []
for temp in temps:
TemplateTransform.temp_name_counter += 1
handle = UtilNodes.TempHandle(PyrexTypes.py_object_type)
tempmap[temp] = handle
temphandles.append(handle)
self.tempmap = tempmap
result = super().__call__(node)
if temps:
result = UtilNodes.TempsBlockNode(self.get_pos(node),
temps=temphandles,
body=result)
return result
def get_pos(self, node):
if self.pos:
return self.pos
else:
return node.pos
def visit_Node(self, node):
if node is None:
return None
else:
c = node.clone_node()
if self.pos is not None:
c.pos = self.pos
self.visitchildren(c)
return c
def try_substitution(self, node, key):
sub = self.substitutions.get(key)
if sub is not None:
pos = self.pos
if pos is None: pos = node.pos
return ApplyPositionAndCopy(pos)(sub)
else:
return self.visit_Node(node) # make copy as usual
def visit_NameNode(self, node):
temphandle = self.tempmap.get(node.name)
if temphandle:
# Replace name with temporary
return temphandle.ref(self.get_pos(node))
else:
return self.try_substitution(node, node.name)
def visit_ExprStatNode(self, node):
# If an expression-as-statement consists of only a replaceable
# NameNode, we replace the entire statement, not only the NameNode
if isinstance(node.expr, NameNode):
return self.try_substitution(node, node.expr.name)
else:
return self.visit_Node(node)
def copy_code_tree(node):
return TreeCopier()(node)
_match_indent = re.compile("^ *").match
def strip_common_indent(lines):
"""Strips empty lines and common indentation from the list of strings given in lines"""
# TODO: Facilitate textwrap.indent instead
lines = [x for x in lines if x.strip() != ""]
if lines:
minindent = min([len(_match_indent(x).group(0)) for x in lines])
lines = [x[minindent:] for x in lines]
return lines
| TemplateTransform |
python | RaRe-Technologies__gensim | gensim/interfaces.py | {
"start": 672,
"end": 4896
} | class ____(utils.SaveLoad):
"""Interface for corpus classes from :mod:`gensim.corpora`.
Corpus is simply an iterable object, where each iteration step yields one document:
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus # inherits from the CorpusABC class
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> for doc in corpus:
... pass # do something with the doc...
A document represented in the bag-of-word (BoW) format, i.e. list of (attr_id, attr_value),
like ``[(1, 0.2), (4, 0.6), ...]``.
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus # inherits from the CorpusABC class
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> doc = next(iter(corpus))
>>> print(doc)
[(0, 1.0), (1, 1.0), (2, 1.0)]
Remember that the save/load methods only pickle the corpus object, not
the (streamed) corpus data itself!
To save the corpus data, please use this pattern :
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus # MmCorpus inherits from CorpusABC
>>> from gensim.test.utils import datapath, get_tmpfile
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> tmp_path = get_tmpfile("temp_corpus.mm")
>>>
>>> MmCorpus.serialize(tmp_path, corpus) # serialize corpus to disk in the MmCorpus format
>>> loaded_corpus = MmCorpus(tmp_path) # load corpus through constructor
>>> for (doc_1, doc_2) in zip(corpus, loaded_corpus):
... assert doc_1 == doc_2 # no change between the original and loaded corpus
See Also
--------
:mod:`gensim.corpora`
Corpora in different formats.
"""
def __iter__(self):
"""Iterate all over corpus."""
raise NotImplementedError('cannot instantiate abstract base class')
def save(self, *args, **kwargs):
"""Saves the in-memory state of the corpus (pickles the object).
Warnings
--------
This saves only the "internal state" of the corpus object, not the corpus data!
To save the corpus data, use the `serialize` method of your desired output format
instead, e.g. :meth:`gensim.corpora.mmcorpus.MmCorpus.serialize`.
"""
import warnings
warnings.warn(
"corpus.save() stores only the (tiny) iteration object in memory; "
"to serialize the actual corpus content, use e.g. MmCorpus.serialize(corpus)"
)
super(CorpusABC, self).save(*args, **kwargs)
def __len__(self):
"""Get the corpus size = the total number of documents in it."""
raise NotImplementedError("must override __len__() before calling len(corpus)")
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""Save `corpus` to disk.
Some formats support saving the dictionary (`feature_id -> word` mapping),
which can be provided by the optional `id2word` parameter.
Notes
-----
Some corpora also support random access via document indexing, so that the documents on disk
can be accessed in O(1) time (see the :class:`gensim.corpora.indexedcorpus.IndexedCorpus` base class).
In this case, :meth:`~gensim.interfaces.CorpusABC.save_corpus` is automatically called internally by
:func:`serialize`, which does :meth:`~gensim.interfaces.CorpusABC.save_corpus` plus saves the index
at the same time.
Calling :func:`serialize() is preferred to calling :meth:`gensim.interfaces.CorpusABC.save_corpus`.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of list of (int, number)
Corpus in BoW format.
id2word : :class:`~gensim.corpora.Dictionary`, optional
Dictionary of corpus.
metadata : bool, optional
Write additional metadata to a separate too?
"""
raise NotImplementedError('cannot instantiate abstract base class')
| CorpusABC |
python | google__jax | jax/experimental/mosaic/gpu/fragmented_array.py | {
"start": 1520,
"end": 10593
} | class ____:
"""A tiling expression describing a permutation of elements of an nd-array.
To apply one level of tiling to an array, each of the trailing dimensions (up
to the rank of the tile) is unfolded into two dimensions: first equal to the
ratio of the dimension size and the tile size, and second equal to the tile
size. Then, all newly unfolded minor dimensions are transposed to appear at
the end.
This expression describes multi-level tiling, by applying each element of
`tiles` in sequence to the array.
See https://openxla.org/xla/tiled_layout for a more detailed explanation.
"""
tiles: tuple[tuple[int, ...], ...]
def __post_init__(self):
if not self.tiles:
return
last_tile_rank = len(self.tiles[0])
for tile in self.tiles:
if len(tile) > last_tile_rank:
raise ValueError("Tiles must have a decreasing rank")
if not tile:
raise ValueError("Tiles must not be empty")
if any(d <= 0 for d in tile):
raise ValueError(f"Tile shape must only have positive sizes, got: {self.tiles}")
last_tile_rank = len(tile)
def __str__(self):
return f"Tiling({''.join(map(str, self.tiles))})"
def tile_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
"""Computes the shape of an array after tiling."""
orig_shape = shape
def fail():
raise ValueError(f"Tiling {self.tiles} does not apply to shape {orig_shape}")
for tile in self.tiles:
if len(tile) > len(shape):
fail()
untiled_dims, tiled_dims = shape[:-len(tile)], shape[-len(tile):]
if any(s % t != 0 for s, t in zip(tiled_dims, tile)):
fail()
shape = (*untiled_dims, *(d // t for d, t in zip(tiled_dims, tile)), *tile)
return shape
def untile_shape(self, shape: tuple[int, ...]) -> tuple[int, ...]:
"""Computes the shape of an array before tiling from its tiled shape."""
orig_shape = shape
def fail():
raise ValueError(
f"shape {orig_shape} is not a valid result of applying tiling {self}."
)
for tile in reversed(self.tiles):
if len(tile) > len(shape):
fail()
untiled_dims = shape[:-2 * len(tile)]
tiled_dims = shape[-2 * len(tile):-len(tile)]
tiling_dims = shape[-len(tile):]
if tiling_dims != tile:
fail()
shape = (*untiled_dims, *(d * t for d, t in zip(tiled_dims, tile)))
return shape
def canonicalize(self) -> Tiling:
"""Returns a canonicalized version of the tiling.
We define a tiling to be canonical if, at each step (except the first one,
which defines the base tile shape):
1. The tiling partitions at least one dimension in more than 1 tile. For
example, the tiling `(8, 8)(8, 8)` is not canonical, as applying it
yields a shape `(1, 1, 8, 8)`. We canonicalize it to `(8, 8)`, which
allows getting rid of the unnecessary `1` dimensions.
2. The leading dimensions of each tile are not `1`. If canonicalizing a
tile in this way leads to an empty tile, then the tile is given shape
`(1,)`---which is still a meaningful (final) tile. For example, the
tiling `(8, 8)(1, 4)` is not canonical, as applying it yields a shape
`(8, 2, 1, 4)`. We canonicalize it to `(8, 8)(4,)`, which allows
getting rid of the unnecessary `1` dimension, and yields a shape
`(8, 2, 4)`.
"""
if len(self.tiles) <= 1:
return self
shape = self.tiles[0]
new_tiling = [self.tiles[0]]
for tile in self.tiles[1:]:
for i, d in enumerate(tile):
if d != 1:
canonical_tile = tile[i:]
break
else:
canonical_tile = (1,)
tiled_dims = shape[-len(canonical_tile):]
if tiled_dims == canonical_tile:
continue
shape = canonical_tile
new_tiling.append(canonical_tile)
return Tiling(tuple(new_tiling))
def tile_strides(self, strides: tuple[int, ...]) -> tuple[int, ...]:
"""Computes the strides of an array after tiling."""
for tile in self.tiles:
untiled, tiled = strides[:-len(tile)], strides[-len(tile):]
strides = (*untiled, *(s * t for s, t in zip(tiled, tile)), *tiled)
return strides
def tile_dimension(self, dim: int) -> tuple[bool, ...]:
"""Result is True whenever the tiled dim originated from the given input dim."""
tiling_rank = len(self.tiles[0])
if dim < 0 or dim >= tiling_rank:
raise ValueError(f"Invalid dimension {dim} for tiling {self}")
strides = [1] * tiling_rank
strides[dim] = 0
return tuple(s == 0 for s in self.tile_strides(tuple(strides)))
def remove_dimension(self, dim: int) -> Tiling:
"""Returns a tiling with the given dimension removed."""
tiling_rank = len(self.tiles[0])
if dim < 0 or dim >= tiling_rank:
raise ValueError(f"Invalid dimension {dim} for tiling {self}")
dim_in_tile = dim
tiles = []
last_tile_rank = len(self.tiles[0])
for t in self.tiles:
assert last_tile_rank >= len(t)
dim_in_tile -= last_tile_rank - len(t)
last_tile_rank = len(t)
if dim_in_tile >= 0:
t = t[:dim_in_tile] + t[dim_in_tile + 1:]
if not t: # If this tile is empty, all other tiles will be empty too.
break
tiles.append(t)
return Tiling(tuple(tiles))
def tile_nested_shape_strides(
self,
shape: tuple[tuple[int, ...], ...],
strides: tuple[tuple[int, ...], ...],
) -> tuple[tuple[tuple[int, ...], ...], tuple[tuple[int, ...], ...]]:
"""A fused version of `tile_shape` and `tile_strides` for nested shapes.
By nested shape we mean that each logical dimension (i.e. each element of
shape/strides) is actually composed out of multiple physical dimensions.
For example, a row-major array of logical shape (128, 128) that is tiled
into (64, 64) tiles would have a nested shape ((2, 64), (2, 64)) (i.e. each
dim is split into two sub-dims) and nested strides of
((2 * 64 * 64, 64), (64 * 64, 1)).
"""
if len(shape) != len(strides):
raise ValueError(
f"Shape {shape} and strides {strides} must have the same length"
)
def fail_if(cond, shape=shape): # Capture shape now.
if cond:
raise ValueError(f"Tiling {self.tiles} does not apply to shape {shape}")
for tile in self.tiles:
fail_if(len(tile) > len(shape))
untiled_shape, tiled_shape = shape[:-len(tile)], shape[-len(tile):]
untiled_strides, tiled_strides = strides[:-len(tile)], strides[-len(tile):]
major_dim_shapes, major_dim_strides = [], []
minor_dim_shapes, minor_dim_strides = [], []
for t, dim_shape, dim_strides in zip(tile, tiled_shape, tiled_strides):
major_dim_shape_rev, major_dim_stride_rev = [], []
minor_dim_shape_rev, minor_dim_stride_rev = [], []
for d, s in zip(reversed(dim_shape), reversed(dim_strides), strict=True):
if d < t: # We will need to tile more dims
fail_if(t % d != 0)
t //= d
minor_dim_shape_rev.append(d)
minor_dim_stride_rev.append(s)
elif t != 1: # Last dim to tile!
fail_if(d % t != 0)
minor_dim_shape_rev.append(t)
minor_dim_stride_rev.append(s)
if d != t: # No need to insert singleton dims.
major_dim_shape_rev.append(d // t)
major_dim_stride_rev.append(s * t)
t = 1
else: # Done tiling!
major_dim_shape_rev.append(d)
major_dim_stride_rev.append(s)
fail_if(t != 1)
major_dim_shapes.append(major_dim_shape_rev[::-1])
minor_dim_shapes.append(minor_dim_shape_rev[::-1])
major_dim_strides.append(major_dim_stride_rev[::-1])
minor_dim_strides.append(minor_dim_stride_rev[::-1])
shape = (*untiled_shape, *major_dim_shapes, *minor_dim_shapes) # type: ignore[arg-type]
strides = (*untiled_strides, *major_dim_strides, *minor_dim_strides) # type: ignore[arg-type]
return (
tuple(tuple(d) if d else (1,) for d in shape),
tuple(tuple(d) if d else (1,) for d in strides),
)
def tile_indices(self, indices: tuple[int, ...]) -> tuple[int, ...]:
for tile in self.tiles:
untiled, tiled = indices[:-len(tile)], indices[-len(tile):]
indices = (
*untiled,
*(i // t for i, t in zip(tiled, tile)),
*(i % t for i, t in zip(tiled, tile)),
)
return indices
def untile_indices(self, indices: tuple[int, ...]) -> tuple[int, ...]:
for tile in reversed(self.tiles):
untiled = indices[:-2 * len(tile)]
outer = indices[-2 * len(tile):-len(tile)]
inner = indices[-len(tile):]
indices = (*untiled, *(o * t + i for o, i, t in zip(outer, inner, tile)))
return indices
def enumerate_negative(elems: Sequence[T]) -> Iterable[tuple[int, T]]:
"""Like built-in enumerate, but returns negative indices into the sequence."""
offset = len(elems)
for i, e in enumerate(elems):
yield i - offset, e
@dataclasses.dataclass(frozen=True)
| Tiling |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_llms_google_genai.py | {
"start": 1445,
"end": 68985
} | class ____(BaseModel):
"""A model of a schema in a database."""
schema_name: str = Field(description="Schema name")
tables: List[Table] = Field(description="List of random Table objects")
# Define the models to test against
GEMINI_MODELS_TO_TEST = (
[
{"model": "models/gemini-2.5-flash-lite", "config": {}},
{
"model": "models/gemini-2.5-flash",
"config": {
"generation_config": GenerateContentConfig(
thinking_config=ThinkingConfig(thinking_budget=512)
)
},
},
]
if not SKIP_GEMINI
else []
)
@pytest.fixture(params=GEMINI_MODELS_TO_TEST)
def llm(request) -> GoogleGenAI:
"""Fixture to create a GoogleGenAI instance for each model."""
return GoogleGenAI(
model=request.param["model"],
api_key=os.environ["GOOGLE_API_KEY"],
**request.param.get("config", {}),
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_complete(llm: GoogleGenAI) -> None:
"""Test both sync and async complete methods."""
prompt = "Write a poem about a magic backpack"
# Test synchronous complete
sync_response = llm.complete(prompt)
assert sync_response is not None
assert len(sync_response.text) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_acomplete(llm: GoogleGenAI) -> None:
"""Test both sync and async complete methods."""
prompt = "Write a poem about a magic backpack"
# Test async complete
async_response = await llm.acomplete(prompt)
assert async_response is not None
assert len(async_response.text) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_chat(llm: GoogleGenAI) -> None:
"""Test both sync and async chat methods."""
message = ChatMessage(content="Write a poem about a magic backpack")
# Test synchronous chat
sync_response = llm.chat(messages=[message])
assert sync_response is not None
assert sync_response.message.content and len(sync_response.message.content) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_achat(llm: GoogleGenAI) -> None:
"""Test both sync and async chat methods."""
message = ChatMessage(content="Write a poem about a magic backpack")
# Test async chat
async_response = await llm.achat(messages=[message])
assert async_response is not None
assert async_response.message.content and len(async_response.message.content) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_stream_chat(llm: GoogleGenAI) -> None:
"""Test both sync and async stream chat methods."""
message = ChatMessage(content="Write a poem about a magic backpack")
# Test synchronous stream chat
sync_chunks = list(llm.stream_chat(messages=[message]))
assert len(sync_chunks) > 0
assert all(isinstance(chunk.message.content, str) for chunk in sync_chunks)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_astream_chat(llm: GoogleGenAI) -> None:
"""Test both sync and async stream chat methods."""
message = ChatMessage(content="Write a poem about a magic backpack")
# Test async stream chat
response_gen = await llm.astream_chat(messages=[message])
chunks = []
async for chunk in response_gen:
chunks.append(chunk)
assert len(chunks) > 0
assert all(isinstance(chunk.message.content, str) for chunk in chunks)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_stream_complete(llm: GoogleGenAI) -> None:
"""Test both sync and async stream complete methods."""
prompt = "Write a poem about a magic backpack"
# Test synchronous stream complete
sync_chunks = list(llm.stream_complete(prompt))
assert len(sync_chunks) > 0
assert all(isinstance(chunk.text, str) for chunk in sync_chunks)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_astream_complete(llm: GoogleGenAI) -> None:
"""Test both sync and async stream complete methods."""
prompt = "Write a poem about a magic backpack"
# Test async stream complete
response_gen = await llm.astream_complete(prompt)
chunks = []
async for chunk in response_gen:
chunks.append(chunk)
assert len(chunks) > 0
assert all(isinstance(chunk.text, str) for chunk in chunks)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_astructured_predict(llm: GoogleGenAI) -> None:
"""Test async structured prediction with a simple schema."""
response = await llm.astructured_predict(
output_cls=Poem,
prompt=PromptTemplate("Write a poem about a magic backpack"),
)
assert response is not None
assert isinstance(response, Poem)
assert isinstance(response.content, str)
assert len(response.content) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_simple_stream_structured_predict(llm: GoogleGenAI) -> None:
"""Test stream structured prediction with a simple schema."""
response = llm.stream_structured_predict(
output_cls=Poem,
prompt=PromptTemplate("Write a poem about a magic backpack"),
)
result = None
for partial_response in response:
assert hasattr(partial_response, "content")
result = partial_response
assert result is not None
assert isinstance(result, Poem)
assert len(result.content) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_simple_astream_structured_predict(llm: GoogleGenAI) -> None:
"""Test async stream structured prediction with a simple schema."""
response = await llm.astream_structured_predict(
output_cls=Poem,
prompt=PromptTemplate("Write a poem about a magic backpack"),
)
result = None
async for partial_response in response:
result = partial_response
assert hasattr(result, "content")
assert result is not None
assert isinstance(result, Poem)
assert isinstance(result.content, str)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_simple_structured_predict(llm: GoogleGenAI) -> None:
"""Test structured prediction with a simple schema."""
response = llm.structured_predict(
output_cls=Poem,
prompt=PromptTemplate("Write a poem about a magic backpack"),
)
assert response is not None
assert isinstance(response, Poem)
assert isinstance(response.content, str)
assert len(response.content) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_complex_structured_predict(llm: GoogleGenAI) -> None:
"""Test structured prediction with a complex nested schema."""
prompt = PromptTemplate("Generate a simple database structure")
response = llm.structured_predict(output_cls=Schema, prompt=prompt)
assert response is not None
assert isinstance(response, Schema)
assert isinstance(response.schema_name, str)
assert len(response.schema_name) > 0
assert len(response.tables) > 0
assert all(isinstance(table, Table) for table in response.tables)
assert all(len(table.columns) > 0 for table in response.tables)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_anyof_optional_structured_predict(llm: GoogleGenAI) -> None:
class Person(BaseModel):
last_name: str = Field(description="Last name")
first_name: Optional[str] = Field(None, description="Optional first name")
prompt = PromptTemplate("Create a fake person ")
response = llm.structured_predict(output_cls=Person, prompt=prompt)
assert response is not None
assert isinstance(response, Person)
assert isinstance(response.last_name, str)
assert isinstance(response.first_name, None | str)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_as_structured_llm_native_genai(llm: GoogleGenAI) -> None:
schema_response = llm._client.models.generate_content(
model=llm.model,
contents="Generate a simple database structure with at least one table called 'experiments'",
config=GenerateContentConfig(
response_mime_type="application/json",
response_schema=Schema,
),
).parsed
assert isinstance(schema_response, Schema)
assert len(schema_response.schema_name) > 0
assert len(schema_response.tables) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_as_structured_llm(llm: GoogleGenAI) -> None:
prompt = PromptTemplate("Generate content")
# Test with simple schema
poem_response = llm.as_structured_llm(output_cls=Poem, prompt=prompt).complete(
"Write a poem about a magic backpack"
)
assert isinstance(poem_response.raw, Poem)
assert len(poem_response.raw.content) > 0
# Test with complex schema
schema_response = llm.as_structured_llm(output_cls=Schema, prompt=prompt).complete(
"Generate a simple database structure with at least one table called 'experiments'"
)
assert isinstance(schema_response.raw, Schema)
assert len(schema_response.raw.schema_name) > 0
assert len(schema_response.raw.tables) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_as_structured_llm_async(llm: GoogleGenAI) -> None:
prompt = PromptTemplate("Generate content")
# Test with simple schema
poem_response = await llm.as_structured_llm(
output_cls=Poem, prompt=prompt
).acomplete("Write a poem about a magic backpack")
assert isinstance(poem_response.raw, Poem)
assert len(poem_response.raw.content) > 0
# Test with complex schema
schema_response = await llm.as_structured_llm(
output_cls=Schema, prompt=prompt
).acomplete("Generate a simple database structure")
assert isinstance(schema_response.raw, Schema)
assert len(schema_response.raw.schema_name) > 0
assert len(schema_response.raw.tables) > 0
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_as_structure_llm_with_config(llm: GoogleGenAI) -> None:
response = (
llm.as_structured_llm(output_cls=Poem)
.complete(
prompt="Write a poem about a magic backpack",
# here we want to change the temperature, but it must not override the whole config
config={"temperature": 0.1},
)
.raw
)
assert isinstance(response, Poem)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_as_structured_llm_async_with_config(llm: GoogleGenAI) -> None:
response = await llm.as_structured_llm(output_cls=Poem).acomplete(
prompt="Write a poem about a magic backpack",
# here we want to change the temperature, but it must not override the whole config
config={"temperature": 0.1},
)
assert isinstance(response.raw, Poem)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_structured_predict_multiple_block(llm: GoogleGenAI) -> None:
chat_messages = [
ChatMessage(
content=[
TextBlock(text="which logo is this?"),
ImageBlock(
url="https://upload.wikimedia.org/wikipedia/commons/7/7a/Nohat-wiki-logo.png"
),
],
role=MessageRole.USER,
),
]
class Response(BaseModel):
answer: str
support = llm.structured_predict(
output_cls=Response, prompt=ChatPromptTemplate(message_templates=chat_messages)
)
assert isinstance(support, Response)
assert "wiki" in support.answer.lower()
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_predict_with_video(llm: GoogleGenAI) -> None:
chat_messages = [
ChatMessage(
content=[
TextBlock(text="where is this scene happening?"),
VideoBlock(
url="https://upload.wikimedia.org/wikipedia/commons/transcoded/2/28/"
"TikTok_and_YouTube_Shorts_example.webm/TikTok_and_YouTube_Shorts_example.webm.720p.vp9.webm"
),
],
role=MessageRole.USER,
),
]
answer = llm.predict(prompt=ChatPromptTemplate(message_templates=chat_messages))
assert "space" in answer.lower()
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_predict_with_large_video(llm: GoogleGenAI) -> None:
client = llm._client
before_file_names = {file.name for file in client.files.list()}
chat_messages = [
ChatMessage(
content=[
TextBlock(text="what is this video about?"),
VideoBlock(
url="https://upload.wikimedia.org/wikipedia/commons/transcoded/f/f0/Die_Franz%C3%B6sische_"
"Revolution_und_Napoleon_-_Planet_Wissen.webm/Die_Franz%C3%B6sische_Revolution_und_Napoleon"
"_-_Planet_Wissen.webm.720p.vp9.webm"
),
],
role=MessageRole.USER,
),
]
answer = llm.predict(prompt=ChatPromptTemplate(message_templates=chat_messages))
assert "revolution" in answer.lower()
# Unsure the file has been deleted
after_file_names = {file.name for file in client.files.list()}
assert before_file_names == after_file_names
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_get_tool_calls_from_response(llm: GoogleGenAI) -> None:
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
msg = ChatMessage("What is the result of adding 2 and 3?")
response = llm.chat_with_tools(
user_msg=msg,
tools=[add_tool],
)
tool_calls: List[ToolSelection] = llm.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_name == "add"
assert tool_calls[0].tool_kwargs == {"a": 2, "b": 3}
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
@pytest.fixture
def mock_google_genai() -> GoogleGenAI:
"""Fixture to create a mocked GoogleGenAI instance for unit testing."""
with patch("google.genai.Client") as mock_client_class:
# Mock the client and its methods
mock_client = MagicMock()
mock_client.models.get.return_value = MagicMock(
input_token_limit=200000, output_token_limit=8192
)
mock_client_class.return_value = mock_client
return GoogleGenAI(model="models/gemini-2.0-flash-001", api_key="test-key")
def test_prepare_chat_with_tools_tool_required(mock_google_genai: GoogleGenAI) -> None:
"""Test that tool_required is correctly passed to the API request when True."""
# Test with tool_required=True
result = mock_google_genai._prepare_chat_with_tools(
tools=[search_tool], tool_required=True
)
assert (
result["tool_config"].function_calling_config.mode
== types.FunctionCallingConfigMode.ANY
)
assert len(result["tools"]) == 1
assert result["tools"][0].function_declarations[0].name == "search_tool"
def test_prepare_chat_with_tools_tool_not_required(
mock_google_genai: GoogleGenAI,
) -> None:
"""Test that tool_required is correctly passed to the API request when False."""
# Test with tool_required=False (default)
result = mock_google_genai._prepare_chat_with_tools(
tools=[search_tool], tool_required=False
)
assert (
result["tool_config"].function_calling_config.mode
== types.FunctionCallingConfigMode.AUTO
)
assert len(result["tools"]) == 1
assert result["tools"][0].function_declarations[0].name == "search_tool"
def test_prepare_chat_with_tools_default_behavior(
mock_google_genai: GoogleGenAI,
) -> None:
"""Test that tool_required defaults to False."""
# Test default behavior (should be equivalent to tool_required=False)
result = mock_google_genai._prepare_chat_with_tools(tools=[search_tool])
assert (
result["tool_config"].function_calling_config.mode
== types.FunctionCallingConfigMode.AUTO
)
assert len(result["tools"]) == 1
assert result["tools"][0].function_declarations[0].name == "search_tool"
def test_prepare_chat_with_tools_explicit_tool_choice_overrides_tool_required(
mock_google_genai: GoogleGenAI,
) -> None:
"""Test that explicit tool_choice overrides tool_required parameter."""
# Test with tool_required=True but explicit tool_choice="auto"
result = mock_google_genai._prepare_chat_with_tools(
tools=[search_tool], tool_required=True, tool_choice="auto"
)
assert (
result["tool_config"].function_calling_config.mode
== types.FunctionCallingConfigMode.AUTO
)
assert len(result["tools"]) == 1
assert result["tools"][0].function_declarations[0].name == "search_tool"
# Test with tool_required=False but explicit tool_choice="any"
result = mock_google_genai._prepare_chat_with_tools(
tools=[search_tool], tool_required=False, tool_choice="any"
)
assert (
result["tool_config"].function_calling_config.mode
== types.FunctionCallingConfigMode.ANY
)
assert len(result["tools"]) == 1
assert result["tools"][0].function_declarations[0].name == "search_tool"
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_tool_required_integration(llm: GoogleGenAI) -> None:
"""Test tool_required parameter in actual chat_with_tools calls."""
# Test with tool_required=True
response = llm.chat_with_tools(
user_msg="What is the weather in Paris?",
tools=[search_tool],
tool_required=True,
)
assert (
len(
[
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
)
> 0
)
# Test with tool_required=False
response = llm.chat_with_tools(
user_msg="Say hello!",
tools=[search_tool],
tool_required=False,
)
# Should not use tools for a simple greeting when tool_required=False
# Note: This might still use tools depending on the model's behavior,
# but the important thing is that the tool_config is set correctly
assert response is not None
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_convert_llama_index_schema_to_gemini_function_declaration(
llm: GoogleGenAI,
) -> None:
"""Test conversion of a llama_index schema to a gemini function declaration."""
function_tool = get_function_tool(Poem)
# this is our baseline, which is not working because:
# 1. the descriptions are missing
google_openai_function = types.FunctionDeclaration.from_callable(
client=llm._client,
callable=function_tool.metadata.fn_schema, # type: ignore
)
assert google_openai_function.description == "A simple poem."
# this is our custom conversion that can take a llama index: fn_schema and convert it to a gemini compatible
# function declaration (subset of OpenAPI v3)
converted = convert_schema_to_function_declaration(llm._client, function_tool)
assert converted.name == "Poem"
assert converted.description == "A simple poem."
assert converted.parameters.required is not None
assert converted.parameters
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_convert_llama_index_schema_to_gemini_function_declaration_nested_case(
llm: GoogleGenAI,
) -> None:
"""Test conversion of a llama_index fn_schema to a gemini function declaration."""
function_tool = get_function_tool(Schema)
llama_index_model_json_schema = function_tool.metadata.fn_schema.model_json_schema()
# check that the model_json_schema contains a $defs key, which is not supported by Gemini
assert "$defs" in llama_index_model_json_schema
converted = convert_schema_to_function_declaration(llm._client, function_tool)
assert converted.name == "Schema"
assert converted.description is not None
assert converted.parameters.required is not None
assert converted.parameters
assert list(converted.parameters.properties) == [
"schema_name",
"tables",
]
assert converted.parameters.required == ["schema_name", "tables"]
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_optional_value_gemini(llm: GoogleGenAI) -> None:
class OptionalContent(BaseModel):
content: Optional[str] = Field(default=None)
content2: str | None
function_tool = get_function_tool(OptionalContent)
decl = convert_schema_to_function_declaration(llm._client, function_tool)
assert decl.parameters.properties["content"].nullable
assert decl.parameters.properties["content"].default is None
assert decl.parameters.properties["content2"].nullable
assert decl.parameters.properties["content2"].default is None
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_optional_lists_nested_gemini(llm: GoogleGenAI) -> None:
class TextContent(BaseModel):
"""A piece of text content."""
text: str
language: str
class ImageContent(BaseModel):
"""A piece of image content."""
url: str
alt_text: Optional[str]
width: Optional[int]
height: Optional[int]
class VideoContent(BaseModel):
"""A piece of video content."""
url: str
duration_seconds: int
thumbnail: Optional[str]
class Content(BaseModel):
"""Content of a blog post."""
title: str
created_at: str
text: Optional[TextContent] = None
image: Optional[ImageContent]
video: Optional[VideoContent]
tags: List[str]
class BlogPost(BaseModel):
"""A blog post."""
id: str
author: str
published: bool
contents: List[Content]
category: Optional[str]
function_tool = get_function_tool(BlogPost)
llama_index_model_json_schema = function_tool.metadata.fn_schema.model_json_schema()
assert "$defs" in llama_index_model_json_schema
converted = convert_schema_to_function_declaration(llm._client, function_tool)
assert converted.name == "BlogPost"
contents_property = converted.parameters.properties["contents"]
assert contents_property.type == types.Type.ARRAY
content_items = contents_property.items
assert "text" in content_items.properties
assert "image" in content_items.properties
assert "video" in content_items.properties
blogpost = (
llm.as_structured_llm(output_cls=BlogPost)
.complete(prompt="Write a blog post with at least 3 contents")
.raw
)
assert isinstance(blogpost, BlogPost)
assert len(blogpost.contents) >= 3
@pytest.mark.asyncio
async def test_prepare_chat_params_more_than_2_tool_calls():
expected_generation_config = types.GenerateContentConfig()
expected_model_name = "models/gemini-foo"
test_messages = [
ChatMessage(content="Find me a puppy.", role=MessageRole.USER),
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[
ThinkingBlock(
content="The user is asking me for a puppy, so I should search for puppies using the available tools."
)
],
),
ChatMessage(
blocks=[ToolCallBlock(tool_name="get_available_tools", tool_kwargs={})],
role=MessageRole.ASSISTANT,
),
ChatMessage(
content="Let me search for puppies.",
role=MessageRole.ASSISTANT,
additional_kwargs={
"tool_calls": [
{"name": "tool_1"},
{"name": "tool_2"},
{"name": "tool_3"},
]
},
),
ChatMessage(
content="Tool 1 Response",
role=MessageRole.TOOL,
additional_kwargs={"tool_call_id": "tool_1"},
),
ChatMessage(
content="Tool 2 Response",
role=MessageRole.TOOL,
additional_kwargs={"tool_call_id": "tool_2"},
),
ChatMessage(
content="Tool 3 Response",
role=MessageRole.TOOL,
additional_kwargs={"tool_call_id": "tool_3"},
),
ChatMessage(content="Here is a list of puppies.", role=MessageRole.ASSISTANT),
]
next_msg, chat_kwargs = await prepare_chat_params(
expected_model_name, test_messages
)
assert chat_kwargs["model"] == expected_model_name
assert chat_kwargs["config"] == expected_generation_config
assert next_msg == types.Content(
parts=[types.Part(text="Here is a list of puppies.")], role=MessageRole.MODEL
)
assert chat_kwargs["history"] == [
types.Content(
parts=[types.Part(text="Find me a puppy.")], role=MessageRole.USER
),
types.Content(
parts=[
types.Part(
text="The user is asking me for a puppy, so I should search for puppies using the available tools.",
thought=True,
),
types.Part.from_function_call(name="get_available_tools", args={}),
types.Part(text="Let me search for puppies."),
types.Part.from_function_call(name="tool_1", args={}),
types.Part.from_function_call(name="tool_2", args={}),
types.Part.from_function_call(name="tool_3", args={}),
],
role=MessageRole.MODEL,
),
types.Content(
parts=[
types.Part.from_function_response(
name="tool_1", response={"result": "Tool 1 Response"}
),
types.Part.from_function_response(
name="tool_2", response={"result": "Tool 2 Response"}
),
types.Part.from_function_response(
name="tool_3", response={"result": "Tool 3 Response"}
),
],
role=MessageRole.USER,
),
]
@pytest.mark.asyncio
async def test_prepare_chat_params_with_system_message():
# Setup a conversation starting with a SYSTEM message
model_name = "models/gemini-test"
system_prompt = "You are a test system."
user_message_1 = "Hello from user 1."
assistant_message_1 = "Hello from assistant 1."
user_message_2 = "Hello from user 2."
messages = [
ChatMessage(content=system_prompt, role=MessageRole.SYSTEM),
ChatMessage(content=user_message_1, role=MessageRole.USER),
ChatMessage(content=assistant_message_1, role=MessageRole.ASSISTANT),
ChatMessage(content=user_message_2, role=MessageRole.USER),
]
# Execute prepare_chat_params
next_msg, chat_kwargs = await prepare_chat_params(model_name, messages)
# Verify system_prompt is forwarded to system_instruction
cfg = chat_kwargs["config"]
assert isinstance(cfg, GenerateContentConfig)
assert cfg.system_instruction == system_prompt
# Verify history only contains the user messages and the assistant message
assert chat_kwargs["history"] == [
types.Content(
parts=[types.Part(text=user_message_1)],
role=MessageRole.USER,
),
types.Content(
parts=[types.Part(text=assistant_message_1)],
role=MessageRole.MODEL,
),
]
# Verify next_msg is the user message
assert next_msg == types.Content(
parts=[types.Part(text=user_message_2)],
role=MessageRole.USER,
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_cached_content_initialization() -> None:
"""Test GoogleGenAI initialization with cached_content parameter."""
cached_content_value = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
llm = GoogleGenAI(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
cached_content=cached_content_value,
)
# Verify cached_content is stored in the instance
assert llm.cached_content == cached_content_value
# Verify cached_content is stored in generation config
assert llm._generation_config["cached_content"] == cached_content_value
def test_cached_content_in_response() -> None:
"""Test that cached_content is extracted from Gemini responses."""
# Mock response with cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[0].text = "Test response"
mock_response.candidates[0].content.parts[0].thought = False
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].function_call.id = ""
mock_response.candidates[0].content.parts[0].function_call.name = "hello"
mock_response.candidates[0].content.parts[0].function_call.args = {}
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
mock_response.cached_content = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify cached_content is in raw response
assert "cached_content" in chat_response.raw
assert (
chat_response.raw["cached_content"]
== "projects/test-project/locations/us-central1/cachedContents/cached-content-id-123"
)
def test_cached_content_without_cached_content() -> None:
"""Test response processing when cached_content is not present."""
# Mock response without cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[0].text = "Test response"
mock_response.candidates[0].content.parts[0].thought = False
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].function_call.id = ""
mock_response.candidates[0].content.parts[0].function_call.name = "hello"
mock_response.candidates[0].content.parts[0].function_call.args = {}
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
# No cached_content attribute
del mock_response.cached_content
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify no cached_content key in raw response
assert "cached_content" not in chat_response.raw
def test_thoughts_in_response() -> None:
"""Test response processing when thought summaries are present."""
# Mock response without cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock(), MagicMock()]
mock_response.candidates[0].content.parts[0].text = "This is a thought."
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].thought = True
mock_response.candidates[0].content.parts[0].function_call.id = ""
mock_response.candidates[0].content.parts[0].function_call.name = "hello"
mock_response.candidates[0].content.parts[0].function_call.args = {}
mock_response.candidates[0].content.parts[1].text = "This is not a thought."
mock_response.candidates[0].content.parts[1].inline_data = None
mock_response.candidates[0].content.parts[1].thought = None
mock_response.candidates[0].content.parts[1].function_call = None
mock_response.candidates[0].content.parts[1].function_response = None
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.candidates[0].content.parts[0].model_dump = MagicMock(return_value={})
mock_response.candidates[0].content.parts[1].model_dump = MagicMock(return_value={})
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
# No cached_content attribute
del mock_response.cached_content
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify thoughts in raw response
assert (
len(
[
block
for block in chat_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
== 1
)
assert [ # noqa: RUF015
block
for block in chat_response.message.blocks
if isinstance(block, ThinkingBlock)
][0].content == "This is a thought."
assert chat_response.message.content == "This is not a thought."
def test_thoughts_without_thought_response() -> None:
"""Test response processing when thought summaries are not present."""
# Mock response without cached_content
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[0].text = "This is not a thought."
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].thought = None
mock_response.candidates[0].content.parts[0].function_call = None
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
mock_response.candidates[0].content.parts[0].model_dump = MagicMock(return_value={})
# No cached_content attribute
del mock_response.cached_content
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify no cached_content key in raw response
assert (
len(
[
block
for block in chat_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
== 0
)
assert chat_response.message.content == "This is not a thought."
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_cached_content_with_generation_config() -> None:
"""Test that cached_content works with custom generation_config."""
cached_content_value = "projects/test-project/locations/us-central1/cachedContents/cached-content-id-456"
llm = GoogleGenAI(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
generation_config=GenerateContentConfig(
temperature=0.5,
cached_content=cached_content_value,
),
)
# Verify both cached_content and custom config are preserved
assert llm._generation_config["cached_content"] == cached_content_value
assert llm._generation_config["temperature"] == 0.5
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_cached_content_in_chat_params() -> None:
"""Test that cached_content is properly included in generation config."""
cached_content_value = (
"projects/test-project/locations/us-central1/cachedContents/test-cache"
)
llm = GoogleGenAI(
model="models/gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
cached_content=cached_content_value,
)
# Verify cached_content is in the generation config
assert llm._generation_config["cached_content"] == cached_content_value
# Test that prepare_chat_params preserves cached_content
messages = [ChatMessage(content="Test message", role=MessageRole.USER)]
# Prepare chat params with the LLM's generation config
next_msg, chat_kwargs = await prepare_chat_params(
llm.model, messages, generation_config=llm._generation_config
)
# Verify cached_content is preserved in the config
assert chat_kwargs["config"].cached_content == cached_content_value
def test_built_in_tool_initialization() -> None:
"""Test GoogleGenAI initialization with built_in_tool parameter."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
# Mock the client
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=grounding_tool,
)
# Verify built_in_tool is stored in the instance
assert llm.built_in_tool == grounding_tool
# Verify built_in_tool is included in generation config tools
assert "tools" in llm._generation_config
assert len(llm._generation_config["tools"]) == 1
# The tool gets converted to dict format in generation config
tool_dict = llm._generation_config["tools"][0]
assert isinstance(tool_dict, dict)
assert "google_search" in tool_dict
def test_built_in_tool_in_response() -> None:
"""Test that built_in_tool information is extracted from Gemini responses."""
# Mock response with built_in_tool usage metadata
mock_response = MagicMock()
mock_response.candidates = [MagicMock()]
mock_response.candidates[0].finish_reason = types.FinishReason.STOP
mock_response.candidates[0].content.role = "model"
mock_response.candidates[0].content.parts = [MagicMock()]
mock_response.candidates[0].content.parts[
0
].text = "Test response with search results"
mock_response.candidates[0].content.parts[0].inline_data = None
mock_response.candidates[0].content.parts[0].thought = None
mock_response.candidates[0].content.parts[0].function_call = None
mock_response.candidates[0].content.parts[0].function_response = None
mock_response.prompt_feedback = None
mock_response.usage_metadata = MagicMock()
mock_response.usage_metadata.model_dump.return_value = {
"prompt_token_count": 10,
"candidates_token_count": 20,
"total_token_count": 30,
}
mock_response.function_calls = None
# Mock grounding metadata
grounding_metadata = {
"web_search_queries": ["test query"],
"search_entry_point": {"rendered_content": "search results"},
"grounding_supports": [
{
"segment": {"start_index": 0, "end_index": 10, "text": "Test"},
"grounding_chunk_indices": [0],
}
],
"grounding_chunks": [
{"web": {"uri": "https://example.com", "title": "Example"}}
],
}
mock_response.candidates[0].grounding_metadata = grounding_metadata
# Mock model_dump to include grounding_metadata
mock_response.candidates[0].model_dump.return_value = {
"finish_reason": types.FinishReason.STOP,
"content": {
"role": "model",
"parts": [{"text": "Test response with search results"}],
},
"grounding_metadata": grounding_metadata,
}
# Convert response
chat_response = chat_from_gemini_response(mock_response, [])
# Verify response is processed correctly
assert chat_response.message.role == MessageRole.ASSISTANT
assert len(chat_response.message.blocks) == 1
assert chat_response.message.blocks[0].text == "Test response with search results"
# Verify grounding metadata is in raw response
assert "grounding_metadata" in chat_response.raw
assert chat_response.raw["grounding_metadata"]["web_search_queries"] == [
"test query"
]
def test_built_in_tool_with_generation_config() -> None:
"""Test that built_in_tool works with custom generation_config."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
# Mock the client
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=grounding_tool,
generation_config=types.GenerateContentConfig(
temperature=0.5,
max_output_tokens=1000,
),
)
# Verify built_in_tool is stored in the instance even with custom generation config
assert llm.built_in_tool == grounding_tool
# Verify custom config parameters are preserved
assert llm._generation_config["temperature"] == 0.5
assert llm._generation_config["max_output_tokens"] == 1000
# Verify built_in_tool is now properly added to the generation config
assert "tools" in llm._generation_config
assert len(llm._generation_config["tools"]) == 1
# The tool should be preserved as the original Tool object
tool_obj = llm._generation_config["tools"][0]
assert isinstance(tool_obj, types.Tool)
assert tool_obj == grounding_tool
@pytest.mark.asyncio
async def test_built_in_tool_in_chat_params() -> None:
"""Test that built_in_tool is properly included in chat parameters."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
messages = [
ChatMessage(role=MessageRole.USER, content="What is the weather today?")
]
# Mock the client
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=grounding_tool,
)
# Prepare chat params
next_msg, chat_kwargs = await prepare_chat_params(
llm.model, messages, generation_config=llm._generation_config
)
# Verify built_in_tool is in the chat config
assert hasattr(chat_kwargs["config"], "tools")
assert chat_kwargs["config"].tools is not None
assert len(chat_kwargs["config"].tools) == 1
# The tool should be preserved as the original Tool object in chat config
assert chat_kwargs["config"].tools[0] == grounding_tool
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_built_in_tool_with_invalid_tool() -> None:
"""Test error handling when built_in_tool is invalid or malformed."""
# Mock the client
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
# Test with None as built_in_tool
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=None,
)
# Should initialize successfully without tools
assert llm.built_in_tool is None
assert "tools" in llm._generation_config and not llm._generation_config.get(
"tools"
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_built_in_tool_with_streaming() -> None:
"""Test that built_in_tool works correctly with streaming responses."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=grounding_tool,
)
# Test streaming chat
messages = [ChatMessage(content="Who won the Euro 2024?", role=MessageRole.USER)]
stream_response = llm.stream_chat(messages)
# Collect all streaming chunks
chunks = []
final_response = None
for chunk in stream_response:
chunks.append(chunk)
final_response = chunk
assert len(chunks) > 0
assert final_response is not None
assert final_response.message is not None
assert len(final_response.message.content) > 0
# Check if grounding metadata is present in the final response
if hasattr(final_response, "raw") and final_response.raw:
raw_response = final_response.raw
# Grounding metadata may be present depending on whether search was used
if "grounding_metadata" in raw_response:
assert isinstance(raw_response["grounding_metadata"], dict)
def test_built_in_tool_config_merge_edge_cases() -> None:
"""Test edge cases in merging built_in_tool with generation_config."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
# Test with generation_config that already has empty tools list
empty_tools_config = types.GenerateContentConfig(temperature=0.7, tools=[])
# Mock the client
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=grounding_tool,
generation_config=empty_tools_config,
)
# Tool should be added to the empty tools list
assert "tools" in llm._generation_config
assert len(llm._generation_config["tools"]) == 1
assert llm._generation_config["temperature"] == 0.7
# Test with generation_config that has existing tools
existing_tool = types.Tool(google_search=types.GoogleSearch())
existing_tools_config = types.GenerateContentConfig(
temperature=0.3, tools=[existing_tool]
)
# Should raise an error when trying to add another built_in_tool
with pytest.raises(
ValueError,
match="Providing multiple Google GenAI tools or mixing with custom tools is not supported.",
):
GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=grounding_tool,
generation_config=existing_tools_config,
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_built_in_tool_error_recovery() -> None:
"""Test error recovery when built_in_tool encounters issues."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=grounding_tool,
)
# Test with a query that might not trigger search (should still work)
response = llm.complete("Hello, how are you?")
assert response is not None
assert response.text is not None
assert len(response.text) > 0
# The LLM should still function even if the tool isn't used
assert isinstance(response.raw, dict)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_built_in_tool_async_compatibility() -> None:
"""Test that built_in_tool works with async methods."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=grounding_tool,
)
# Test async complete
response = await llm.acomplete("What is machine learning?")
assert response is not None
assert response.text is not None
assert len(response.text) > 0
# Test async chat
messages = [ChatMessage(content="Explain quantum computing", role=MessageRole.USER)]
chat_response = await llm.achat(messages)
assert chat_response is not None
assert chat_response.message is not None
assert len(chat_response.message.content) > 0
# Verify tool configuration persists in async calls
assert llm.built_in_tool == grounding_tool
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_built_in_tool_google_search() -> None:
"""Test Google Search functionality with built_in_tool."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=grounding_tool,
)
response = llm.complete("What is the current weather in San Francisco?")
assert response is not None
assert response.text is not None
assert len(response.text) > 0
# Check if grounding metadata is present in the response
assert "raw" in response.__dict__
raw_response = response.raw
assert isinstance(raw_response, dict)
# Grounding metadata should be present when Google Search is used
# Note: This may not always be present depending on whether the model
# decides to use the search tool, so we check if it exists
if "grounding_metadata" in raw_response:
grounding_metadata = raw_response["grounding_metadata"]
assert isinstance(grounding_metadata, dict)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_google_search_grounding_metadata(llm: GoogleGenAI) -> None:
"""Test that Google Search returns comprehensive grounding metadata in response."""
grounding_tool = types.Tool(google_search=types.GoogleSearch())
# Create a new LLM instance with the grounding tool
llm_with_search = GoogleGenAI(
model=llm.model,
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=grounding_tool,
)
response = llm_with_search.complete("What is the capital of Japan?")
assert response is not None
assert response.text is not None
assert len(response.text) > 0
raw_response = response.raw
assert raw_response is not None
assert isinstance(raw_response, dict)
# Grounding metadata should be present when Google Search is used
# Note: Grounding metadata may not always be present depending on
# whether the model decides to use the search tool
if "grounding_metadata" in raw_response:
grounding_metadata = raw_response["grounding_metadata"]
assert isinstance(grounding_metadata, dict)
# Web search queries should be present if grounding was used
if "web_search_queries" in grounding_metadata:
assert grounding_metadata["web_search_queries"] is not None
assert isinstance(grounding_metadata["web_search_queries"], list)
assert len(grounding_metadata["web_search_queries"]) > 0
# Validate each web search query
for query in grounding_metadata["web_search_queries"]:
assert isinstance(query, str)
assert len(query.strip()) > 0
# Search entry point should be present if grounding was used
if "search_entry_point" in grounding_metadata:
search_entry_point = grounding_metadata["search_entry_point"]
assert isinstance(search_entry_point, dict)
# Rendered content should be present
if "rendered_content" in search_entry_point:
assert search_entry_point["rendered_content"] is not None
assert isinstance(search_entry_point["rendered_content"], str)
assert len(search_entry_point["rendered_content"].strip()) > 0
# Grounding supports should be present if grounding was used
if "grounding_supports" in grounding_metadata:
assert grounding_metadata["grounding_supports"] is not None
assert isinstance(grounding_metadata["grounding_supports"], list)
# Validate grounding support structure if present
for support in grounding_metadata["grounding_supports"]:
assert isinstance(support, dict)
if "segment" in support:
segment = support["segment"]
assert isinstance(segment, dict)
# Grounding chunks should be present if grounding was used
if "grounding_chunks" in grounding_metadata:
assert grounding_metadata["grounding_chunks"] is not None
assert isinstance(grounding_metadata["grounding_chunks"], list)
# Validate grounding chunk structure if present
for chunk in grounding_metadata["grounding_chunks"]:
assert isinstance(chunk, dict)
if "web" in chunk:
web_chunk = chunk["web"]
assert isinstance(web_chunk, dict)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_built_in_tool_code_execution() -> None:
"""Test Code Execution functionality with built_in_tool."""
code_execution_tool = types.Tool(code_execution=types.ToolCodeExecution())
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
api_key=os.environ["GOOGLE_API_KEY"],
built_in_tool=code_execution_tool,
)
response = llm.complete("Calculate 20th fibonacci number.")
assert response is not None
assert response.text is not None
assert len(response.text) > 0
# The response should contain the calculation result
assert "6765" in response.text
# Check if the raw response contains the expected structure
assert "raw" in response.__dict__
raw_response = response.raw
assert isinstance(raw_response, dict)
def test_code_execution_response_parts() -> None:
"""Test that code execution response contains executable_code, code_execution_result, and text parts."""
code_execution_tool = types.Tool(code_execution=types.ToolCodeExecution())
# Mock response with code execution parts
mock_response = MagicMock()
mock_candidate = MagicMock()
mock_candidate.finish_reason = types.FinishReason.STOP
mock_candidate.content.role = "model"
mock_response.candidates = [mock_candidate]
# Create mock parts for text, executable code, and code execution result
mock_text_part = MagicMock()
mock_text_part.text = (
"I'll calculate the sum of the first 50 prime numbers for you."
)
mock_text_part.inline_data = None
mock_text_part.thought = None
mock_text_part.function_call = None
mock_text_part.function_response = None
mock_code_part = MagicMock()
mock_code_part.text = None
mock_code_part.inline_data = None
mock_code_part.thought = None
mock_code_part.executable_code = {
"code": "def is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\nprimes = []\nn = 2\nwhile len(primes) < 50:\n if is_prime(n):\n primes.append(n)\n n += 1\n\nprint(f'Sum of first 50 primes: {sum(primes)}')",
"language": types.Language.PYTHON,
}
mock_code_part.function_call = None
mock_code_part.function_response = None
mock_result_part = MagicMock()
mock_result_part.text = None
mock_result_part.inline_data = None
mock_result_part.thought = None
mock_result_part.code_execution_result = {
"outcome": types.Outcome.OUTCOME_OK,
"output": "Sum of first 50 primes: 5117",
}
mock_result_part.function_call = None
mock_result_part.function_response = None
mock_final_text_part = MagicMock()
mock_final_text_part.text = "The sum of the first 50 prime numbers is 5117."
mock_final_text_part.inline_data = None
mock_final_text_part.thought = None
mock_final_text_part.function_call = None
mock_final_text_part.function_response = None
mock_candidate.content.parts = [
mock_text_part,
mock_code_part,
mock_result_part,
mock_final_text_part,
]
mock_response.prompt_feedback = None
mock_response.usage_metadata = None
mock_response.function_calls = None
# Mock model_dump to return the expected structure
mock_candidate.model_dump.return_value = {
"finish_reason": types.FinishReason.STOP,
"content": {
"role": "model",
"parts": [
{
"text": "I'll calculate the sum of the first 50 prime numbers for you."
},
{
"executable_code": {
"code": "def is_prime(n):\n if n < 2:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\nprimes = []\nn = 2\nwhile len(primes) < 50:\n if is_prime(n):\n primes.append(n)\n n += 1\n\nprint(f'Sum of first 50 primes: {sum(primes)}')",
"language": types.Language.PYTHON,
}
},
{
"code_execution_result": {
"outcome": types.Outcome.OUTCOME_OK,
"output": "Sum of first 50 primes: 5117",
}
},
{"text": "The sum of the first 50 prime numbers is 5117."},
],
},
}
# Mock the client and chat method
with patch("google.genai.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
# Mock the model metadata response
mock_model = MagicMock()
mock_model.supported_generation_methods = ["generateContent"]
mock_client.models.get.return_value = mock_model
# Mock the chat creation and send_message method
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_chat.send_message.return_value = mock_response
llm = GoogleGenAI(
model="gemini-2.0-flash-001",
built_in_tool=code_execution_tool,
)
messages = [
ChatMessage(
role="user", content="What is the sum of the first 50 prime numbers?"
)
]
response = llm.chat(messages)
assert response is not None
assert response.message is not None
assert len(response.message.content) > 0
# Check the raw response structure
raw_response = response.raw
assert isinstance(raw_response, dict)
assert "content" in raw_response
content = raw_response["content"]
assert "parts" in content
assert isinstance(content["parts"], list)
assert len(content["parts"]) > 0
# Analyze each part in the response
for part in content["parts"]:
assert isinstance(part, dict)
# Check for text parts
if part.get("text") is not None:
assert isinstance(part["text"], str)
assert len(part["text"].strip()) > 0
# Check for executable code parts
if part.get("executable_code") is not None:
executable_code_content = part["executable_code"]
# Validate executable code structure
assert isinstance(executable_code_content, dict)
assert "code" in executable_code_content
assert "language" in executable_code_content
# Validate the code content
code = executable_code_content["code"]
assert isinstance(code, str)
assert len(code.strip()) > 0
# Validate language
assert executable_code_content["language"] == types.Language.PYTHON
# Check for code execution result parts
if part.get("code_execution_result") is not None:
code_execution_result = part["code_execution_result"]
# Validate code execution result structure
assert isinstance(code_execution_result, dict)
assert "outcome" in code_execution_result
assert "output" in code_execution_result
# Validate the execution outcome
assert code_execution_result["outcome"] == types.Outcome.OUTCOME_OK
# Validate the output
output = code_execution_result["output"]
assert isinstance(output, str)
assert len(output.strip()) > 0
# The response should mention the final answer
response_content = response.message.content
assert "5117" in response_content
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_thoughts_with_streaming() -> None:
"""Test that thought summaries work correctly with streaming responses."""
llm = GoogleGenAI(
model="gemini-2.5-flash",
api_key=os.environ["GOOGLE_API_KEY"],
generation_config=GenerateContentConfig(
thinking_config=ThinkingConfig(
include_thoughts=True,
),
),
)
# Test streaming chat
messages = [ChatMessage(content="What is your name?", role=MessageRole.USER)]
stream_response = llm.stream_chat(messages)
# Collect all streaming chunks
chunks = []
final_response = None
for chunk in stream_response:
chunks.append(chunk)
final_response = chunk
print(final_response)
assert len(chunks) > 0
assert final_response is not None
assert final_response.message is not None
assert len(final_response.message.content) != 0
assert any(
isinstance(block, ThinkingBlock) for block in final_response.message.blocks
)
assert (
len(
"".join(
[
block.content or ""
for block in final_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
)
!= 0
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_thoughts_with_async_streaming() -> None:
"""Test that thought summaries work correctly with streaming responses."""
llm = GoogleGenAI(
model="gemini-2.5-flash",
api_key=os.environ["GOOGLE_API_KEY"],
generation_config=GenerateContentConfig(
thinking_config=ThinkingConfig(
include_thoughts=True,
),
),
)
# Test streaming chat
messages = [ChatMessage(content="What is your name?", role=MessageRole.USER)]
stream_response = await llm.astream_chat(messages)
# Collect all streaming chunks
chunks = []
final_response = None
async for chunk in stream_response:
chunks.append(chunk)
final_response = chunk
assert len(chunks) > 0
assert final_response is not None
assert final_response.message is not None
assert len(final_response.message.content) != 0
assert any(
isinstance(block, ThinkingBlock) for block in final_response.message.blocks
)
assert (
len(
"".join(
[
block.content or ""
for block in final_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
)
!= 0
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
def test_thoughts_with_chat() -> None:
"""Test that thought summaries work correctly with chat responses."""
llm = GoogleGenAI(
model="gemini-2.5-flash",
api_key=os.environ["GOOGLE_API_KEY"],
generation_config=GenerateContentConfig(
thinking_config=ThinkingConfig(
include_thoughts=True,
),
),
)
# Test streaming chat
messages = [ChatMessage(content="What is your name?", role=MessageRole.USER)]
response = llm.chat(messages)
final_response = response
assert final_response is not None
assert final_response.message is not None
assert len(final_response.message.content) != 0
assert any(
isinstance(block, ThinkingBlock) for block in final_response.message.blocks
)
assert (
len(
"".join(
[
block.content or ""
for block in final_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
)
!= 0
)
@pytest.mark.skipif(SKIP_GEMINI, reason="GOOGLE_API_KEY not set")
@pytest.mark.asyncio
async def test_thoughts_with_async_chat() -> None:
"""Test that thought summaries work correctly with chat responses."""
llm = GoogleGenAI(
model="gemini-2.5-flash",
api_key=os.environ["GOOGLE_API_KEY"],
generation_config=GenerateContentConfig(
thinking_config=ThinkingConfig(
include_thoughts=True,
),
),
)
# Test streaming chat
messages = [ChatMessage(content="What is your name?", role=MessageRole.USER)]
response = await llm.achat(messages)
final_response = response
assert final_response is not None
assert final_response.message is not None
assert len(final_response.message.content) != 0
assert any(
isinstance(block, ThinkingBlock) for block in final_response.message.blocks
)
assert (
len(
"".join(
[
block.content or ""
for block in final_response.message.blocks
if isinstance(block, ThinkingBlock)
]
)
)
!= 0
)
| Schema |
python | django__django | tests/admin_views/admin.py | {
"start": 30724,
"end": 30793
} | class ____(admin.ModelAdmin):
search_fields = ["name"]
| CountryAdmin |
python | getsentry__sentry | src/sentry/sentry_apps/installations.py | {
"start": 7347,
"end": 8702
} | class ____:
sentry_app_installation: SentryAppInstallation
user: User | RpcUser
action: str
def run(self) -> None:
if self.action not in VALID_ACTIONS:
raise SentryAppSentryError(
f"Invalid action '{self.action} for installation notifier for {self.sentry_app}"
)
send_and_save_webhook_request(self.sentry_app, self.request)
@property
def request(self) -> AppPlatformEvent[SentryAppInstallationWebhookData]:
data = serialize(
self.sentry_app_installation,
user=self.user,
serializer=SentryAppInstallationSerializer(),
is_webhook=True,
)
return AppPlatformEvent[SentryAppInstallationWebhookData](
resource=SentryAppResourceType.INSTALLATION,
action=InstallationActionType(self.action),
install=self.sentry_app_installation,
data=SentryAppInstallationWebhookData(installation=data),
actor=self.user,
)
@cached_property
def sentry_app(self) -> SentryApp:
return self.sentry_app_installation.sentry_app
@cached_property
def api_grant(self) -> ApiGrant | None:
return self.sentry_app_installation.api_grant_id and self.sentry_app_installation.api_grant
@dataclasses.dataclass
| SentryAppInstallationNotifier |
python | apache__airflow | airflow-core/tests/unit/dags/test_parsing_context.py | {
"start": 1147,
"end": 1908
} | class ____(EmptyOperator):
def execute(self, context: Context):
import os
parsing_context_file = Path("/tmp/airflow_parsing_context")
self.log.info("Executing")
# signal to the test that we've started
parsing_context = (
f"{_AIRFLOW_PARSING_CONTEXT_DAG_ID}={os.environ.get(_AIRFLOW_PARSING_CONTEXT_DAG_ID)}\n"
f"{_AIRFLOW_PARSING_CONTEXT_TASK_ID}={os.environ.get(_AIRFLOW_PARSING_CONTEXT_TASK_ID)}\n"
)
parsing_context_file.write_text(parsing_context)
self.log.info("Executed")
dag1 = DAG(dag_id="test_parsing_context", schedule=None, start_date=datetime(2015, 1, 1))
dag1_task1 = DagWithParsingContext(task_id="task1", dag=dag1, owner="airflow")
| DagWithParsingContext |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 10254,
"end": 11947
} | class ____(BaseModel):
name: str
owner: Owner
subaccounts: list[Account] = []
"""
)
Account = module.Account
assert Account.model_json_schema() == {
'$ref': '#/$defs/Account',
'$defs': {
'Account': {
'title': 'Account',
'type': 'object',
'properties': {
'name': {'title': 'Name', 'type': 'string'},
'owner': {'$ref': '#/$defs/Owner'},
'subaccounts': {
'title': 'Subaccounts',
'default': [],
'type': 'array',
'items': {'$ref': '#/$defs/Account'},
},
},
'required': ['name', 'owner'],
},
'Owner': {
'title': 'Owner',
'type': 'object',
'properties': {'account': {'$ref': '#/$defs/Account'}},
'required': ['account'],
},
},
}
def test_forward_ref_with_field(create_module):
@create_module
def module():
import re
from typing import ForwardRef
import pytest
from pydantic import BaseModel, Field
Foo = ForwardRef('Foo')
class Foo(BaseModel):
c: list[Foo] = Field(gt=0)
with pytest.raises(TypeError, match=re.escape("Unable to apply constraint 'gt' to supplied value []")):
Foo(c=[Foo(c=[])])
def test_forward_ref_optional(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from pydantic import BaseModel, Field
| Account |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.