language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-to-make-string-balanced.py | {
"start": 29,
"end": 353
} | class ____(object):
def minimumDeletions(self, s):
"""
:type s: str
:rtype: int
"""
result = b_cnt = 0
for c in s:
if c == 'b':
b_cnt += 1
elif b_cnt:
b_cnt -= 1
result += 1
return result
| Solution |
python | ray-project__ray | python/ray/train/trainer.py | {
"start": 894,
"end": 6812
} | class ____:
"""An iterator over Train results. Returned by ``trainer.run_iterator``."""
def __init__(
self,
backend_executor: Union[BackendExecutor, ActorWrapper],
backend_config: BackendConfig,
train_func: Union[Callable[[], T], Callable[[Dict[str, Any]], T]],
datasets: Dict[str, Dataset],
metadata: Dict[str, Any],
data_config: DataConfig,
checkpoint: Optional[Union[Dict, str, Path, Checkpoint]],
):
self._backend_executor = backend_executor
self._backend = backend_config.backend_cls()
self._train_func = train_func
self._datasets = datasets
self._metadata = metadata
self._data_config = data_config
self._start_training(
train_func=train_func,
datasets=self._datasets,
metadata=self._metadata,
data_config=self._data_config,
checkpoint=checkpoint,
)
self._finished_training = False
def __iter__(self):
return self
def _start_training(
self,
train_func,
datasets,
metadata,
data_config,
checkpoint: Optional[Checkpoint] = None,
):
tune_session: _TrainSession = get_session()
assert tune_session, "`_start_training` should only be called from within Tune"
storage = tune_session.storage
self._run_with_error_handling(
lambda: self._backend_executor.start_training(
train_func=train_func,
datasets=datasets,
metadata=metadata,
data_config=data_config,
storage=storage,
checkpoint=checkpoint,
)
)
def _run_with_error_handling(self, func: Callable):
try:
return func()
except TrainingWorkerError:
# TODO(ml-team): This Train fault-tolerance code doesn't get used
# since max_retries=0
# Workers have already been restarted.
logger.info(
"Workers have been successfully restarted. Resuming "
"training from latest checkpoint."
)
self._start_training(
self._train_func,
self._datasets,
self._metadata,
self._data_config,
)
return self._run_with_error_handling(func)
except InactiveWorkerGroupError:
raise RuntimeError(
"This Trainer is not active. It is either shutdown "
"already or never started in the first place. "
"Either create a new Trainer or start this one."
) from None
except TrainBackendError:
raise RuntimeError(
"Training failed. You should not be seeing "
"this error and this is a bug. Please create "
"a new issue at "
"https://github.com/ray-project/ray."
) from None
def __next__(self):
if self.is_finished():
self._backend_executor.report_final_run_status(errored=False)
raise StopIteration
try:
next_results = self._run_with_error_handling(self._fetch_next_result)
if next_results is None:
self._backend_executor.report_final_run_status(errored=False)
self._run_with_error_handling(self._finish_training)
self._finished_training = True
raise StopIteration
else:
return next_results
except StartTraceback as e:
# If this is a StartTraceback, then this is a user error.
# We raise it directly
if isinstance(e, StartTracebackWithWorkerRank):
failed_rank = e.worker_rank
else:
failed_rank = None
# Extract the stack trace from the exception
e = skip_exceptions(e)
stack_trace = "".join(
traceback.format_exception(type(e), e, e.__traceback__)
)
self._backend_executor.report_final_run_status(
errored=True, stack_trace=stack_trace, failed_rank=failed_rank
)
try:
# Exception raised in at least one training worker. Immediately raise
# this error to the user and do not attempt to terminate gracefully.
self._backend_executor.shutdown(graceful_termination=False)
self._finished_training = True
except Exception:
pass
raise
def _fetch_next_result(self) -> Optional[List[Dict]]:
"""Fetch next results produced by ``session.report()`` from each worker.
Assumes ``start_training`` has already been called.
Returns:
A list of dictionaries of values passed to ``session.report()`` from
each worker. Each item corresponds to an intermediate result
a single worker. If there are no more items to fetch,
returns None.
"""
results = self._backend_executor.get_next_results()
if results is None:
return None
assert all(isinstance(result, _TrainingResult) for result in results)
return results
def _finish_training(self):
"""Finish training and return final results. Propagate any exceptions.
Blocks until training is finished on all workers.
Assumes `start_training` has already been called.
Returns:
A list of return values from calling ``train_func`` on each worker.
Each item corresponds to the return value from a single worker.
"""
return self._backend_executor.finish_training()
def is_finished(self) -> bool:
return self._finished_training
| TrainingIterator |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_common.py | {
"start": 2017,
"end": 3205
} | class ____:
@pytest.mark.parametrize("exclude_type", (tuple, list))
@pytest.mark.parametrize("dump_only_type", (tuple, list))
def test_get_fields_meta_exclude_dump_only_as_list_and_tuple(
self, exclude_type, dump_only_type
):
class ExcludeSchema(Schema):
field1 = fields.Int()
field2 = fields.Int()
field3 = fields.Int()
field4 = fields.Int()
field5 = fields.Int()
class Meta:
exclude = exclude_type(("field1", "field2"))
dump_only = dump_only_type(("field3", "field4"))
assert list(get_fields(ExcludeSchema).keys()) == ["field3", "field4", "field5"]
assert list(get_fields(ExcludeSchema, exclude_dump_only=True).keys()) == [
"field5"
]
# regression test for https://github.com/marshmallow-code/apispec/issues/673
def test_schema_with_field_named_fields(self):
class TestSchema(Schema):
fields = fields.Int()
schema_fields = get_fields(TestSchema)
assert list(schema_fields.keys()) == ["fields"]
assert isinstance(schema_fields["fields"], fields.Int)
| TestGetFields |
python | lepture__authlib | authlib/jose/rfc7518/jwe_algs.py | {
"start": 4144,
"end": 6333
} | class ____(JWEAlgorithm):
EXTRA_HEADERS = frozenset(["iv", "tag"])
def __init__(self, key_size):
self.name = f"A{key_size}GCMKW"
self.description = f"Key wrapping with AES GCM using {key_size}-bit key"
self.key_size = key_size
def prepare_key(self, raw_data):
return OctKey.import_key(raw_data)
def generate_preset(self, enc_alg, key):
cek = enc_alg.generate_cek()
return {"cek": cek}
def _check_key(self, key):
if len(key) * 8 != self.key_size:
raise ValueError(f"A key of size {self.key_size} bits is required.")
def wrap(self, enc_alg, headers, key, preset=None):
if preset and "cek" in preset:
cek = preset["cek"]
else:
cek = enc_alg.generate_cek()
op_key = key.get_op_key("wrapKey")
self._check_key(op_key)
#: https://tools.ietf.org/html/rfc7518#section-4.7.1.1
#: The "iv" (initialization vector) Header Parameter value is the
#: base64url-encoded representation of the 96-bit IV value
iv_size = 96
iv = os.urandom(iv_size // 8)
cipher = Cipher(AES(op_key), GCM(iv), backend=default_backend())
enc = cipher.encryptor()
ek = enc.update(cek) + enc.finalize()
h = {
"iv": to_native(urlsafe_b64encode(iv)),
"tag": to_native(urlsafe_b64encode(enc.tag)),
}
return {"ek": ek, "cek": cek, "header": h}
def unwrap(self, enc_alg, ek, headers, key):
op_key = key.get_op_key("unwrapKey")
self._check_key(op_key)
iv = headers.get("iv")
if not iv:
raise ValueError('Missing "iv" in headers')
tag = headers.get("tag")
if not tag:
raise ValueError('Missing "tag" in headers')
iv = urlsafe_b64decode(to_bytes(iv))
tag = urlsafe_b64decode(to_bytes(tag))
cipher = Cipher(AES(op_key), GCM(iv, tag), backend=default_backend())
d = cipher.decryptor()
cek = d.update(ek) + d.finalize()
if len(cek) * 8 != enc_alg.CEK_SIZE:
raise ValueError('Invalid "cek" length')
return cek
| AESGCMAlgorithm |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/decorators/config_mapping_decorator.py | {
"start": 310,
"end": 4953
} | class ____:
def __init__(
self,
config_schema: Optional[UserConfigSchema] = None,
receive_processed_config_values: Optional[bool] = None,
):
self.config_schema = config_schema
self.receive_processed_config_values = check.opt_bool_param(
receive_processed_config_values, "receive_processed_config_values"
)
def __call__(self, fn: Callable[..., Any]) -> ConfigMapping:
check.callable_param(fn, "fn")
from dagster._config.pythonic_config import (
Config,
infer_schema_from_config_annotation,
safe_is_subclass,
)
from dagster._core.definitions.run_config import RunConfig
config_fn_params = get_function_params(fn)
check.invariant(
len(config_fn_params) == 1, "Config mapping should have exactly one parameter"
)
param = config_fn_params[0]
# If the parameter is a subclass of Config, we can infer the config schema from the
# type annotation. We'll also wrap the config mapping function to convert the config
# dictionary into the appropriate Config object.
if safe_is_subclass(param.annotation, Config):
check.invariant(
self.config_schema is None,
"Cannot provide config_schema to config mapping with Config-annotated param",
)
config_schema = infer_schema_from_config_annotation(param.annotation, param.default)
config_cls = cast("type[Config]", param.annotation)
param_name = param.name
def wrapped_fn(config_as_dict) -> Any:
config_input = config_cls(**config_as_dict)
output = fn(**{param_name: config_input})
if isinstance(output, RunConfig):
return output.to_config_dict()
else:
return output
return ConfigMapping(
config_fn=wrapped_fn,
config_schema=config_schema,
receive_processed_config_values=None,
)
return ConfigMapping(
config_fn=fn,
config_schema=self.config_schema,
receive_processed_config_values=self.receive_processed_config_values,
)
@overload
def config_mapping(
config_fn: ConfigMappingFn,
) -> ConfigMapping: ...
@overload
def config_mapping(
*,
config_schema: UserConfigSchema = ...,
receive_processed_config_values: Optional[bool] = ...,
) -> Callable[[ConfigMappingFn], ConfigMapping]: ...
def config_mapping(
config_fn: Optional[Callable[..., Any]] = None,
*,
config_schema: Optional[UserConfigSchema] = None,
receive_processed_config_values: Optional[bool] = None,
) -> Union[Callable[[ConfigMappingFn], ConfigMapping], ConfigMapping]:
"""Create a config mapping with the specified parameters from the decorated function.
The config schema will be inferred from the type signature of the decorated function if not explicitly provided.
Args:
config_schema (ConfigSchema): The schema of the composite config.
receive_processed_config_values (Optional[bool]): If true, config values provided to the config_fn
will be converted to their dagster types before being passed in. For example, if this
value is true, enum config passed to config_fn will be actual enums, while if false,
then enum config passed to config_fn will be strings.
Examples:
.. code-block:: python
@op
def my_op(context):
return context.op_config["foo"]
@graph
def my_graph():
my_op()
@config_mapping
def my_config_mapping(val):
return {"ops": {"my_op": {"config": {"foo": val["foo"]}}}}
@config_mapping(config_schema={"foo": str})
def my_config_mapping(val):
return {"ops": {"my_op": {"config": {"foo": val["foo"]}}}}
result = my_graph.to_job(config=my_config_mapping).execute_in_process()
"""
# This case is for when decorator is used bare, without arguments. e.g. @config_mapping versus @config_mapping()
if config_fn is not None:
check.invariant(config_schema is None)
check.invariant(receive_processed_config_values is None)
return _ConfigMapping()(config_fn)
check.invariant(config_fn is None)
return _ConfigMapping(
config_schema=config_schema,
receive_processed_config_values=receive_processed_config_values,
)
| _ConfigMapping |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 104206,
"end": 107526
} | class ____(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
try:
o = subprocess.check_output(cmd)
except (OSError, subprocess.CalledProcessError):
pass
else:
o = filepath_from_subprocess_output(o)
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
_c_string_literal(version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
| _pkg_config_info |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 6860,
"end": 6993
} | class ____(str, Enum):
TEXT = auto()
IMAGE = auto()
INDEX = auto()
DOCUMENT = auto()
MULTIMODAL = auto()
| ObjectType |
python | pyparsing__pyparsing | examples/tiny/tiny_ast.py | {
"start": 6270,
"end": 8055
} | class ____(TinyNode):
"""Node representing a function declaration/definition.
This node accepts parser groups tagged with type 'func_decl'. It will
initialize its `statements` from an associated function body group if
available. The body is expected under either `parsed.Function_Body.stmts`
or `parsed.body.stmts`, depending on how the upstream parser groups were
provided by the caller.
"""
statement_type: ClassVar[str] = "func_decl"
# Prebuilt function body statements (if a body was provided)
name: str
statements: list[TinyNode] = field(default_factory=list)
@classmethod
def from_parsed(cls, parsed: pp.ParseResults) -> FunctionDeclStmtNode:
fn_name = parsed.decl.name
# Locate a function body group in common shapes
body_group: pp.ParseResults = parsed.body
built: list[TinyNode] = []
if body_group:
raw_stmts = body_group.stmts or []
for stmt in raw_stmts:
node_cls = TinyNode.from_statement_type(stmt["type"]) # type: ignore[index]
if node_cls is not None:
built.append(node_cls.from_parsed(stmt)) # type: ignore[arg-type]
return cls(name=fn_name, statements=built)
def execute(self, engine: "TinyEngine") -> object | None: # noqa: F821 - forward ref
# Execute the function body in a new local frame. If no body is present,
# this is effectively a no-op that returns None.
# Caller should have already created a frame and populated parameters as vars
try:
for node in self.statements:
node.execute(engine)
return None
except ReturnPropagate as rp:
return rp.value
@dataclass
| FunctionDeclStmtNode |
python | kubernetes-client__python | kubernetes/client/models/v1_horizontal_pod_autoscaler_spec.py | {
"start": 383,
"end": 8320
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max_replicas': 'int',
'min_replicas': 'int',
'scale_target_ref': 'V1CrossVersionObjectReference',
'target_cpu_utilization_percentage': 'int'
}
attribute_map = {
'max_replicas': 'maxReplicas',
'min_replicas': 'minReplicas',
'scale_target_ref': 'scaleTargetRef',
'target_cpu_utilization_percentage': 'targetCPUUtilizationPercentage'
}
def __init__(self, max_replicas=None, min_replicas=None, scale_target_ref=None, target_cpu_utilization_percentage=None, local_vars_configuration=None): # noqa: E501
"""V1HorizontalPodAutoscalerSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_replicas = None
self._min_replicas = None
self._scale_target_ref = None
self._target_cpu_utilization_percentage = None
self.discriminator = None
self.max_replicas = max_replicas
if min_replicas is not None:
self.min_replicas = min_replicas
self.scale_target_ref = scale_target_ref
if target_cpu_utilization_percentage is not None:
self.target_cpu_utilization_percentage = target_cpu_utilization_percentage
@property
def max_replicas(self):
"""Gets the max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. # noqa: E501
:return: The max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: int
"""
return self._max_replicas
@max_replicas.setter
def max_replicas(self, max_replicas):
"""Sets the max_replicas of this V1HorizontalPodAutoscalerSpec.
maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. # noqa: E501
:param max_replicas: The max_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and max_replicas is None: # noqa: E501
raise ValueError("Invalid value for `max_replicas`, must not be `None`") # noqa: E501
self._max_replicas = max_replicas
@property
def min_replicas(self):
"""Gets the min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
:return: The min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: int
"""
return self._min_replicas
@min_replicas.setter
def min_replicas(self, min_replicas):
"""Sets the min_replicas of this V1HorizontalPodAutoscalerSpec.
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
:param min_replicas: The min_replicas of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:type: int
"""
self._min_replicas = min_replicas
@property
def scale_target_ref(self):
"""Gets the scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:return: The scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: V1CrossVersionObjectReference
"""
return self._scale_target_ref
@scale_target_ref.setter
def scale_target_ref(self, scale_target_ref):
"""Sets the scale_target_ref of this V1HorizontalPodAutoscalerSpec.
:param scale_target_ref: The scale_target_ref of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:type: V1CrossVersionObjectReference
"""
if self.local_vars_configuration.client_side_validation and scale_target_ref is None: # noqa: E501
raise ValueError("Invalid value for `scale_target_ref`, must not be `None`") # noqa: E501
self._scale_target_ref = scale_target_ref
@property
def target_cpu_utilization_percentage(self):
"""Gets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used. # noqa: E501
:return: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: int
"""
return self._target_cpu_utilization_percentage
@target_cpu_utilization_percentage.setter
def target_cpu_utilization_percentage(self, target_cpu_utilization_percentage):
"""Sets the target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec.
targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used. # noqa: E501
:param target_cpu_utilization_percentage: The target_cpu_utilization_percentage of this V1HorizontalPodAutoscalerSpec. # noqa: E501
:type: int
"""
self._target_cpu_utilization_percentage = target_cpu_utilization_percentage
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HorizontalPodAutoscalerSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1HorizontalPodAutoscalerSpec):
return True
return self.to_dict() != other.to_dict()
| V1HorizontalPodAutoscalerSpec |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Operators.py | {
"start": 2216,
"end": 2397
} | class ____(BinOpNode):
"""Returns A + B. Does not check input types."""
nodeName = 'Add'
def __init__(self, name):
BinOpNode.__init__(self, name, '__add__')
| AddNode |
python | django-extensions__django-extensions | django_extensions/templatetags/highlighting.py | {
"start": 1450,
"end": 3079
} | class ____(Node):
def __init__(self, language, nodelist, name=""):
self.language = Variable(language)
self.nodelist = nodelist
if name:
self.name = Variable(name)
else:
self.name = None
def render(self, context):
code = self.nodelist.render(context).strip()
lexer = get_lexer_by_name(self.language.resolve(context))
formatter = HtmlFormatter(linenos=False)
html = ""
if self.name:
name = self.name.resolve(context)
html = '<div class="predesc"><span>%s</span></div>' % name
return html + pyghighlight(code, lexer, formatter)
@register.tag
def highlight(parser, token):
"""
Tag to put a highlighted source code <pre> block in your code.
This takes two arguments, the language and a little explaination message
that will be generated before the code. The second argument is optional.
Your code will be fed through pygments so you can use any language it
supports.
Usage::
{% load highlighting %}
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is colder than death")
{% endhighlight %}
"""
if not HAS_PYGMENTS: # pragma: no cover
raise ImportError("Please install 'pygments' library to use highlighting.")
nodelist = parser.parse(("endhighlight",))
parser.delete_first_token()
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'highlight' statement requires an argument")
return CodeNode(bits[0], nodelist, *bits[1:])
| CodeNode |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_block_types.py | {
"start": 5031,
"end": 5974
} | class ____:
async def test_read_block_type_by_id(self, client, block_type_x):
response = await client.get(f"/block_types/{block_type_x.id}")
assert response.status_code == status.HTTP_200_OK
result = BlockType.model_validate(response.json())
assert result.name == block_type_x.name
assert result.id == block_type_x.id
async def test_read_block_type_by_slug(self, client, block_type_x):
response = await client.get(f"/block_types/slug/{block_type_x.slug}")
assert response.status_code == status.HTTP_200_OK
result = BlockType.model_validate(response.json())
assert result.name == block_type_x.name
assert result.id == block_type_x.id
async def test_read_missing_block_type_by_name(self, client):
response = await client.get("/block_types/slug/not-a-real-block")
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadBlockType |
python | apache__airflow | airflow-core/src/airflow/models/dag.py | {
"start": 10897,
"end": 11867
} | class ____(Base):
"""
Table defining different owner attributes.
For example, a link for an owner that will be passed as a hyperlink to the "DAGs" view.
"""
__tablename__ = "dag_owner_attributes"
dag_id: Mapped[str] = mapped_column(
StringID(),
ForeignKey("dag.dag_id", name="dag.dag_id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
)
owner: Mapped[str] = mapped_column(String(500), primary_key=True, nullable=False)
link: Mapped[str] = mapped_column(String(500), nullable=False)
def __repr__(self):
return f"<DagOwnerAttributes: dag_id={self.dag_id}, owner={self.owner}, link={self.link}>"
@classmethod
def get_all(cls, session: Session) -> dict[str, dict[str, str]]:
dag_links: dict = defaultdict(dict)
for obj in session.scalars(select(cls)):
dag_links[obj.dag_id].update({obj.owner: obj.link})
return dag_links
| DagOwnerAttributes |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/contexts/test_workflow_event_context.py | {
"start": 713,
"end": 1132
} | class ____(WorkflowEventContextTestCase):
def test_usage_in_contextual_class(self) -> None:
detector = self.create_detector()
ctx_data = WorkflowEventContextData(
detector=detector,
)
self.ctx_token = WorkflowEventContext.set(ctx_data)
mock_cls = MockContextualClass()
result = mock_cls.run()
assert result == detector
| TestWorkflowEventContextUsage |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 11030,
"end": 11557
} | class ____(PointerModel):
def get_data_type(self):
return self._pointee_be_type
def as_data(self, builder, value):
value = builder.load(value)
return self._pointee_model.as_data(builder, value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.EphemeralArray)
| EphemeralPointerModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter07.py | {
"start": 315,
"end": 1743
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_scatter07.xlsx")
self.ignore_elements = {"xl/workbook.xml": ["<fileVersion", "<calcPr"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [63597952, 63616128]
chart.axis2_ids = [63617664, 63619456]
data = [
[27, 33, 44, 12, 1],
[6, 8, 6, 4, 2],
[20, 10, 30, 50, 40],
[0, 27, 23, 30, 40],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$C$1:$C$5",
"values": "=Sheet1!$D$1:$D$5",
"y2_axis": 1,
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | test/distributed/test_symmetric_memory.py | {
"start": 10713,
"end": 23292
} | class ____(MultiProcContinuousTest):
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
def _init_process(self):
torch.cuda.set_device(self.device)
torch.manual_seed(42 + self.rank)
torch.use_deterministic_algorithms(True)
torch.set_deterministic_debug_mode("warn")
torch.utils.deterministic.fill_uninitialized_memory = True
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
@parametrize("gather_dim", [0, 1, 2])
def test_fused_all_gather_matmul(self, gather_dim: int) -> None:
self._init_process()
BATCH = 8
M = 64
N = 16
K = 32
group = dist.group.WORLD
rank = self.rank
torch.manual_seed(42 + rank)
A_shard_shape = [BATCH, M, K]
A_shard_shape[gather_dim] //= self.world_size
A_shard = torch.rand(A_shard_shape, device="cuda")
Bs = [torch.rand(K, N, device="cuda") for _ in range(3)]
ag_output_0, mm_outputs_0 = _fused_all_gather_matmul_fallback(
A_shard, Bs, gather_dim=gather_dim, group_name=group.group_name
)
ag_output_1, mm_outputs_1 = torch.ops.symm_mem.fused_all_gather_matmul(
A_shard, Bs, gather_dim=gather_dim, group_name=group.group_name
)
assert torch.allclose(ag_output_0, ag_output_1)
assert ag_output_0.stride() == ag_output_1.stride()
for mm_output_0, mm_output_1 in zip(mm_outputs_0, mm_outputs_1):
assert torch.allclose(mm_output_0, mm_output_1)
assert mm_output_0.stride(), mm_output_1.stride()
@skip_if_rocm_multiprocess # this requires async_input_mm support
@skipIf(
not SM90OrLater,
"_fused_all_gather_matmul_native currently only supports sm>=90",
)
@skip_if_lt_x_gpu(2)
@parametrize("symm_mem_input", [True, False])
@parametrize("is_b_row_major", [True, False])
@skipIf(
SM100OrLater,
"https://github.com/pytorch/pytorch/issues/162917",
)
def test_fused_all_gather_matmul_native(
self, symm_mem_input: bool, is_b_row_major: bool
) -> None:
os.environ["TORCH_SYMM_MEM_ENABLE_NATIVE_ASYNC_TP"] = "1"
self._init_process()
# See _should_use_fused_all_gather_matmul_native() for the algo
# selection criteria of _fused_all_gather_matmul_native().
M = 4096
N = 1024
K = 1024
group_name = dist.group.WORLD.group_name
torch.manual_seed(42 + self.rank)
if symm_mem_input:
A_shard = symm_mem.empty(
M // self.world_size,
K,
dtype=torch.bfloat16,
device=self.device,
).normal_()
else:
A_shard = torch.rand(
M // self.world_size, K, dtype=torch.bfloat16, device="cuda"
)
if is_b_row_major:
B = torch.rand(K, N, dtype=torch.bfloat16, device="cuda")
else:
B = torch.rand(N, K, dtype=torch.bfloat16, device="cuda").t()
ag_baseline, mm_baseline = _fused_all_gather_matmul_fallback(
A_shard, [B], gather_dim=0, group_name=group_name
)
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CUDA,
],
) as prof:
ag_target, mm_target = torch.ops.symm_mem.fused_all_gather_matmul(
A_shard, [B], gather_dim=0, group_name=group_name
)
self.assertTrue(
any("PersistentAsyncInputScheduler" in event.key for event in prof.events())
)
torch.testing.assert_close(ag_target, ag_baseline)
torch.testing.assert_close(mm_target[0], mm_baseline[0])
os.environ["TORCH_SYMM_MEM_ENABLE_NATIVE_ASYNC_TP"] = "0"
@skip_if_lt_x_gpu(2)
@requires_multicast_support()
def test_multimem_all_gather_matmul(self) -> None:
self._init_process()
# See _should_use_multimem_all_gather_matmul() for the algo
# selection criteria of _multimem_gather_matmul().
M = 1024
N = 1024
K = 1024
group_name = dist.group.WORLD.group_name
torch.manual_seed(42 + self.rank)
A_shard = torch.rand(
M // self.world_size, K, dtype=torch.bfloat16, device="cuda"
)
B = torch.rand(K, N, dtype=torch.bfloat16, device="cuda")
ag_baseline, mm_baseline = _fused_all_gather_matmul_fallback(
A_shard, [B], gather_dim=0, group_name=group_name, return_A=False
)
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CUDA,
],
) as prof:
ag_target, mm_target = torch.ops.symm_mem.fused_all_gather_matmul(
A_shard, [B], gather_dim=0, group_name=group_name, return_A=False
)
self.assertTrue(
any("multimem_all_gather_kernel" in event.key for event in prof.events())
)
torch.testing.assert_close(ag_target, ag_baseline)
torch.testing.assert_close(mm_target[0], mm_baseline[0])
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
@skipUnless(SM89OrLater, "Requires compute capability >= 8.9")
@parametrize("gather_dim", [0, 1])
@parametrize(
"scale_mode", ["tensor-wise", "row-wise-replicated", "row-wise-sharded"]
)
def test_fused_all_gather_scaled_matmul(
self, gather_dim: int, scale_mode: str
) -> None:
self._init_process()
BATCH = 8
M = 64
N = 16
K = 32
group = dist.group.WORLD
rank = self.rank
if gather_dim == 0:
leading_dims = (BATCH // self.world_size, M)
elif gather_dim == 1:
leading_dims = (BATCH, M // self.world_size)
else:
raise AssertionError(f"Invalid scale_mode: {scale_mode}")
torch.manual_seed(42 + rank)
A_shard = torch.rand(*leading_dims, K, device="cuda").to(e4m3_type)
Bs = [torch.rand(N, K, device="cuda").to(e4m3_type).T for _ in range(3)]
if scale_mode == "tensor-wise":
A_scale = torch.tensor(0.1, device="cuda")
B_scales = [torch.tensor(0.1, device="cuda") for _ in range(3)]
out_dtypes = [None, torch.bfloat16, torch.float32]
elif scale_mode == "row-wise-sharded":
A_scale = torch.full((*leading_dims, 1), 0.1, device="cuda")
B_scales = [torch.full((1, N), 0.1, device="cuda") for _ in range(3)]
out_dtypes = [torch.bfloat16] * 3
elif scale_mode == "row-wise-replicated":
A_scale = torch.full((BATCH, M, 1), 0.1, device="cuda")
B_scales = [torch.full((1, N), 0.1, device="cuda") for _ in range(3)]
out_dtypes = [torch.bfloat16] * 3
else:
raise AssertionError(f"Invalid scale_mode: {scale_mode}")
ag_output_0, mm_outputs_0 = _fused_all_gather_scaled_matmul_fallback(
A_shard,
Bs,
A_scale,
B_scales,
gather_dim=gather_dim,
group_name=group.group_name,
biases=[None] * len(Bs),
result_scales=[None] * len(Bs),
out_dtypes=out_dtypes,
use_fast_accum=[None] * len(Bs),
)
ag_output_1, mm_outputs_1 = torch.ops.symm_mem.fused_all_gather_scaled_matmul(
A_shard,
Bs,
A_scale,
B_scales,
gather_dim=gather_dim,
group_name=group.group_name,
biases=[None] * len(Bs),
result_scales=[None] * len(Bs),
out_dtypes=out_dtypes,
use_fast_accum=[None] * len(Bs),
)
self.assertTrue(
torch.allclose(
ag_output_0.to(torch.float32),
ag_output_1.to(torch.float32),
)
)
self.assertEqual(ag_output_0.stride(), ag_output_1.stride())
for mm_output_0, mm_output_1 in zip(mm_outputs_0, mm_outputs_1):
self.assertTrue(
torch.allclose(
mm_output_0.to(torch.float32), mm_output_1.to(torch.float32)
)
)
self.assertEqual(mm_output_0.stride(), mm_output_1.stride())
self.assertEqual(mm_output_0.dtype, mm_output_1.dtype)
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@skip_if_lt_x_gpu(2)
@parametrize("scatter_dim", [0, 1, 2])
def test_fused_matmul_reduce_scatter(self, scatter_dim: int) -> None:
self._init_process()
BATCH = 8
M = 64
N = 16
K = 1024
group = dist.group.WORLD
rank = self.rank
torch.manual_seed(42 + rank)
A = torch.rand(BATCH, M, K, device="cuda")
B = torch.rand(K, N, device="cuda")
output_0 = _fused_matmul_reduce_scatter_fallback(
A, B, "avg", scatter_dim=scatter_dim, group_name=group.group_name
)
output_1 = torch.ops.symm_mem.fused_matmul_reduce_scatter(
A, B, "avg", scatter_dim=scatter_dim, group_name=group.group_name
)
assert torch.allclose(output_0, output_1)
assert output_0.stride() == output_1.stride()
@skip_if_rocm_multiprocess # AsyncTP support changed _fused_scaled_matmul_reduce_scatter_fallback API, need more changes
@skip_if_lt_x_gpu(2)
@skipUnless(SM89OrLater, "Requires compute capability >= 8.9")
@parametrize("scatter_dim", [0, 1])
@parametrize("rowwise", [True, False])
@skipIf(
SM100OrLater,
"https://github.com/pytorch/pytorch/issues/162940",
)
def test_fused_scaled_matmul_reduce_scatter(
self, scatter_dim: int, rowwise: bool
) -> None:
self._init_process()
BATCH = 8
M = 64
N = 16
K = 32
group = dist.group.WORLD
rank = self.rank
torch.manual_seed(42 + rank)
A = torch.rand(BATCH, M, K, device="cuda").to(e4m3_type)
B = torch.rand(N, K, device="cuda").to(e4m3_type).T
if rowwise:
A_scale = torch.full((BATCH, M, 1), 0.1, device="cuda")
B_scale = torch.full((1, N), 0.1, device="cuda")
else:
A_scale = torch.tensor(0.1, device="cuda")
B_scale = torch.tensor(0.1, device="cuda")
output_shape = [*A.shape[:-1], B.shape[1]]
outputs = []
for context in test_contexts:
with context():
outputs.append(
torch.ops.symm_mem.fused_scaled_matmul_reduce_scatter(
A,
B,
A_scale,
B_scale,
"avg",
scatter_dim,
scatter_dim,
group.group_name,
output_shape,
out_dtype=torch.bfloat16,
)
)
assert outputs[0].stride() == outputs[1].stride()
self.assertEqual(outputs[0], outputs[1])
@skipIf(
not PLATFORM_SUPPORTS_SYMM_MEM, "SymmMem is not supported on this ROCm arch"
)
@parametrize("dim", [0, 1, 2])
def test_optimal_layout(self, dim: int) -> None:
t = torch.rand(8, 64, 32, 16)
x = restride_A_shard_for_fused_all_gather_matmul(t, dim)
self.assertTrue(x.movedim(dim, 0).is_contiguous())
self.assertTrue(torch.allclose(x, t))
x = restride_A_for_fused_matmul_reduce_scatter(t, dim)
self.assertTrue(x.movedim(dim, 0).is_contiguous())
self.assertTrue(torch.allclose(x, t))
# [READ ME FIRST]
# The `SymmMemEmptySetDeviceTest` suite parameterizes whether user sets the
# device before calling symm_mem.empty. Either way should work.
# However, since `set_device` is persistent, we cannot use the
# `MultiProcContinuousTest` template because the next function will be
# "contaminated", leading to flaky tests (e.g. hang). Therefore, we use
# `MultiProcessTestCase` which spawns new processes for each test function.
# Please limit the number of tests you want to add under this test
# suite as respawning processes and `init_process_group` is expensive.
@instantiate_parametrized_tests
@requires_cuda_p2p_access()
| AsyncTPTest |
python | pandas-dev__pandas | pandas/tests/test_algos.py | {
"start": 885,
"end": 21569
} | class ____:
def test_factorize_complex(self):
# GH#17927
array = np.array([1, 2, 2 + 1j], dtype=complex)
labels, uniques = algos.factorize(array)
expected_labels = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, expected_labels)
expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=complex)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_factorize(self, index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = Index
if isinstance(obj, MultiIndex):
constructor = MultiIndex.from_tuples
expected_arr = obj.unique()
if expected_arr.dtype == np.float16:
expected_arr = expected_arr.astype(np.float32)
expected_uniques = constructor(expected_arr)
if (
isinstance(obj, Index)
and expected_uniques.dtype == bool
and obj.dtype == object
):
expected_uniques = expected_uniques.astype(object)
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
tm.assert_index_equal(result_uniques, expected_uniques, exact=True)
def test_series_factorize_use_na_sentinel_false(self):
# GH#35667
values = np.array([1, 2, 1, np.nan])
ser = Series(values)
codes, uniques = ser.factorize(use_na_sentinel=False)
expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)
expected_uniques = Index([1.0, 2.0, np.nan])
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_index_equal(uniques, expected_uniques)
def test_basic(self):
items = np.array(["a", "b", "b", "a", "a", "c", "c", "c"], dtype=object)
codes, uniques = algos.factorize(items)
tm.assert_numpy_array_equal(uniques, np.array(["a", "b", "c"], dtype=object))
codes, uniques = algos.factorize(items, sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array(["a", "b", "c"], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5, dtype=np.intp)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
arr = np.arange(5.0)[::-1]
codes, uniques = algos.factorize(arr)
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([4.0, 3.0, 2.0, 1.0, 0.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
codes, uniques = algos.factorize(arr, sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=arr.dtype)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(["A", "A", np.nan, "B", 3.14, np.inf])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index(["A", "B", 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = Index([3.14, np.inf, "A", "B"])
tm.assert_index_equal(uniques, exp)
def test_factorize_datetime64(self):
# M8
v1 = Timestamp("20130101 09:00:00.00004")
v2 = Timestamp("20130101")
x = Series([v1, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
def test_factorize_period(self):
# period
v1 = Period("201302", freq="M")
v2 = Period("201303", freq="M")
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
codes, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, PeriodIndex([v1, v2]))
def test_factorize_timedelta(self):
# GH 5986
v1 = to_timedelta("1 day 1 min")
v2 = to_timedelta("1 day")
x = Series([v1, v2, v1, v1, v2, v2, v1])
codes, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v1, v2]))
codes, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, exp)
tm.assert_index_equal(uniques, to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype="O")
rizer = ht.ObjectFactorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype=np.intp)
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
tm.assert_numpy_array_equal(ids, expected)
def test_factorizer_with_mask(self):
# GH#49549
data = np.array([1, 2, 3, 1, 1, 0], dtype="int64")
mask = np.array([False, False, False, False, False, True])
rizer = ht.Int64Factorizer(len(data))
result = rizer.factorize(data, mask=mask)
expected = np.array([0, 1, 2, 0, 0, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
expected_uniques = np.array([1, 2, 3], dtype="int64")
tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)
def test_factorizer_object_with_nan(self):
# GH#49549
data = np.array([1, 2, 3, 1, np.nan])
rizer = ht.ObjectFactorizer(len(data))
result = rizer.factorize(data.astype(object))
expected = np.array([0, 1, 2, 0, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
expected_uniques = np.array([1, 2, 3], dtype=object)
tm.assert_numpy_array_equal(rizer.uniques.to_array(), expected_uniques)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
[(1, 1), (1, 2), (0, 0), (1, 2), "nonsense"],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), "nonsense"],
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)],
),
([(1, 1), (1, 2), (0, 0), (1, 2)], [0, 1, 2, 1], [(1, 1), (1, 2), (0, 0)]),
],
)
def test_factorize_tuple_list(self, data, expected_codes, expected_uniques):
# GH9454
data = com.asarray_tuplesafe(data, dtype=object)
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, np.array(expected_codes, dtype=np.intp))
expected_uniques_array = com.asarray_tuplesafe(expected_uniques, dtype=object)
tm.assert_numpy_array_equal(uniques, expected_uniques_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
x17 = np.array([complex(i) for i in range(17)], dtype=object)
msg = "'[<>]' not supported between instances of .*"
with pytest.raises(TypeError, match=msg):
algos.factorize(x17[::-1], sort=True)
def test_numeric_dtype_factorize(self, any_real_numpy_dtype):
# GH41132
dtype = any_real_numpy_dtype
data = np.array([1, 2, 2, 1], dtype=dtype)
expected_codes = np.array([0, 1, 1, 0], dtype=np.intp)
expected_uniques = np.array([1, 2], dtype=dtype)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_float64_factorize(self, writable):
data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1, 0], dtype=np.intp)
expected_uniques = np.array([1.0, 1e8, 1e-8], dtype=np.float64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_uint64_factorize(self, writable):
data = np.array([2**64 - 1, 1, 2**64 - 1], dtype=np.uint64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2**64 - 1, 1], dtype=np.uint64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_int64_factorize(self, writable):
data = np.array([2**63 - 1, -(2**63), 2**63 - 1], dtype=np.int64)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0], dtype=np.intp)
expected_uniques = np.array([2**63 - 1, -(2**63)], dtype=np.int64)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_string_factorize(self, writable):
data = np.array(["a", "c", "a", "b", "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, 0, 2, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_object_factorize(self, writable):
data = np.array(["a", "c", None, np.nan, "a", "b", NaT, "c"], dtype=object)
data.setflags(write=writable)
expected_codes = np.array([0, 1, -1, -1, 0, 2, -1, 1], dtype=np.intp)
expected_uniques = np.array(["a", "c", "b"], dtype=object)
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_datetime64_factorize(self, writable):
# GH35650 Verify whether read-only datetime64 array can be factorized
data = np.array([np.datetime64("2020-01-01T00:00:00.000")], dtype="M8[ns]")
data.setflags(write=writable)
expected_codes = np.array([0], dtype=np.intp)
expected_uniques = np.array(
["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]"
)
codes, uniques = pd.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
def test_factorize_rangeindex(self, sort):
# increasing -> sort doesn't matter
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
result = algos.factorize(ri, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
def test_factorize_rangeindex_decreasing(self, sort):
# decreasing -> sort matters
ri = pd.RangeIndex.from_range(range(10))
expected = np.arange(10, dtype=np.intp), ri
ri2 = ri[::-1]
expected = expected[0], ri2
if sort:
expected = expected[0][::-1], expected[1][::-1]
result = algos.factorize(ri2, sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
result = ri2.factorize(sort=sort)
tm.assert_numpy_array_equal(result[0], expected[0])
tm.assert_index_equal(result[1], expected[1], exact=True)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with pytest.raises(TypeError, match="got an unexpected keyword"):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize(
"data",
[
np.array([0, 1, 0], dtype="u8"),
np.array([-(2**63), 1, -(2**63)], dtype="i8"),
np.array(["__nan__", "foo", "__nan__"], dtype="object"),
],
)
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
codes, uniques = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_codes = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, na_value",
[
(np.array([0, 1, 0, 2], dtype="u8"), 0),
(np.array([1, 0, 1, 2], dtype="u8"), 1),
(np.array([-(2**63), 1, -(2**63), 0], dtype="i8"), -(2**63)),
(np.array([1, -(2**63), 1, 0], dtype="i8"), 1),
(np.array(["a", "", "a", "b"], dtype=object), "a"),
(np.array([(), ("a", 1), (), ("a", 2)], dtype=object), ()),
(np.array([("a", 1), (), ("a", 1), ("a", 2)], dtype=object), ("a", 1)),
],
)
def test_parametrized_factorize_na_value(self, data, na_value):
codes, uniques = algos.factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_numpy_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, uniques",
[
(
np.array(["b", "a", None, "b"], dtype=object),
np.array(["b", "a"], dtype=object),
),
(
pd.array([2, 1, pd.NA, 2], dtype="Int64"),
pd.array([2, 1], dtype="Int64"),
),
],
ids=["numpy_array", "extension_array"],
)
def test_factorize_use_na_sentinel(self, sort, data, uniques):
codes, uniques = algos.factorize(data, sort=sort, use_na_sentinel=True)
if sort:
expected_codes = np.array([1, 0, -1, 1], dtype=np.intp)
expected_uniques = algos.safe_sort(uniques)
else:
expected_codes = np.array([0, 1, -1, 0], dtype=np.intp)
expected_uniques = uniques
tm.assert_numpy_array_equal(codes, expected_codes)
if isinstance(data, np.ndarray):
tm.assert_numpy_array_equal(uniques, expected_uniques)
else:
tm.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
["a", None, "b", "a"],
np.array([0, 1, 2, 0], dtype=np.dtype("intp")),
np.array(["a", np.nan, "b"], dtype=object),
),
(
["a", np.nan, "b", "a"],
np.array([0, 1, 2, 0], dtype=np.dtype("intp")),
np.array(["a", np.nan, "b"], dtype=object),
),
],
)
def test_object_factorize_use_na_sentinel_false(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(
np.array(data, dtype=object), use_na_sentinel=False
)
tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)
tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
np.array([1, None, 1, 2], dtype=object),
np.array([0, 1, 0, 2], dtype=np.dtype("intp")),
np.array([1, np.nan, 2], dtype="O"),
),
(
np.array([1, np.nan, 1, 2], dtype=np.float64),
np.array([0, 1, 0, 2], dtype=np.dtype("intp")),
np.array([1, np.nan, 2], dtype=np.float64),
),
],
)
def test_int_factorize_use_na_sentinel_false(
self, data, expected_codes, expected_uniques
):
codes, uniques = algos.factorize(data, use_na_sentinel=False)
tm.assert_numpy_array_equal(uniques, expected_uniques, strict_nan=True)
tm.assert_numpy_array_equal(codes, expected_codes, strict_nan=True)
@pytest.mark.parametrize(
"data, expected_codes, expected_uniques",
[
(
Index(Categorical(["a", "a", "b"])),
np.array([0, 0, 1], dtype=np.intp),
CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),
),
(
Series(Categorical(["a", "a", "b"])),
np.array([0, 0, 1], dtype=np.intp),
CategoricalIndex(["a", "b"], categories=["a", "b"], dtype="category"),
),
(
Series(DatetimeIndex(["2017", "2017"], tz="US/Eastern")),
np.array([0, 0], dtype=np.intp),
DatetimeIndex(["2017"], tz="US/Eastern"),
),
],
)
def test_factorize_mixed_values(self, data, expected_codes, expected_uniques):
# GH 19721
codes, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(codes, expected_codes)
tm.assert_index_equal(uniques, expected_uniques)
def test_factorize_interval_non_nano(self, unit):
# GH#56099
left = DatetimeIndex(["2016-01-01", np.nan, "2015-10-11"]).as_unit(unit)
right = DatetimeIndex(["2016-01-02", np.nan, "2015-10-15"]).as_unit(unit)
idx = IntervalIndex.from_arrays(left, right)
codes, cats = idx.factorize()
assert cats.dtype == f"interval[datetime64[{unit}], right]"
ts = Timestamp(0).as_unit(unit)
idx2 = IntervalIndex.from_arrays(left - ts, right - ts)
codes2, cats2 = idx2.factorize()
assert cats2.dtype == f"interval[timedelta64[{unit}], right]"
idx3 = IntervalIndex.from_arrays(
left.tz_localize("US/Pacific"), right.tz_localize("US/Pacific")
)
codes3, cats3 = idx3.factorize()
assert cats3.dtype == f"interval[datetime64[{unit}, US/Pacific], right]"
| TestFactorize |
python | google__pytype | pytype/rewrite/overlays/special_builtins.py | {
"start": 364,
"end": 1129
} | class ____(abstract.PytdFunction):
"""assert_type implementation."""
def call_with_mapped_args(
self, mapped_args: abstract.MappedArgs[abstract.FrameType],
) -> abstract.SimpleReturn:
var = mapped_args.argdict['val']
typ = mapped_args.argdict['typ']
pp = self._ctx.errorlog.pretty_printer
actual = pp.print_var_type(var, node=None)
try:
expected = abstract.get_atomic_constant(typ, str)
except ValueError:
expected = pp.print_type_of_instance(typ.get_atomic_value())
if actual != expected:
stack = _stack(mapped_args.frame)
self._ctx.errorlog.assert_type(stack, actual, expected)
return abstract.SimpleReturn(self._ctx.consts[None])
@overlays.register_function('builtins', 'reveal_type')
| AssertType |
python | doocs__leetcode | solution/0500-0599/0559.Maximum Depth of N-ary Tree/Solution.py | {
"start": 195,
"end": 422
} | class ____:
def maxDepth(self, root: "Node") -> int:
if root is None:
return 0
mx = 0
for child in root.children:
mx = max(mx, self.maxDepth(child))
return 1 + mx
| Solution |
python | huggingface__transformers | tests/quantization/quanto_integration/test_quanto.py | {
"start": 16158,
"end": 16545
} | class ____(QuantoQuantizationOffloadTest):
EXPECTED_OUTPUTS = [
"Hello my name is John, I am a professional photographer, I", # CUDA output
"Hello my name is Nils, I am a student of the University", # XPU output
]
weights = "int4"
@unittest.skip(reason="Skipping test class because serialization is not supported yet")
| QuantoQuantizationQBitsTensorOffloadTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 40132,
"end": 42289
} | class ____(GeneratedAirbyteSource):
class AuthenticateViaGoogleOAuth:
@public
def __init__(self, client_id: str, client_secret: str, refresh_token: str):
self.auth_type = "Client"
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class ServiceAccountKeyAuthentication:
@public
def __init__(self, service_account_info: str):
self.auth_type = "Service"
self.service_account_info = check.str_param(
service_account_info, "service_account_info"
)
@public
def __init__(
self,
name: str,
spreadsheet_id: str,
credentials: Union[
"GoogleSheetsSource.AuthenticateViaGoogleOAuth",
"GoogleSheetsSource.ServiceAccountKeyAuthentication",
],
row_batch_size: Optional[int] = None,
):
"""Airbyte Source for Google Sheets.
Documentation can be found at https://docs.airbyte.com/integrations/sources/google-sheets
Args:
name (str): The name of the destination.
spreadsheet_id (str): Enter the link to the Google spreadsheet you want to sync
row_batch_size (Optional[int]): Number of rows fetched when making a Google Sheet API call. Defaults to 200.
credentials (Union[GoogleSheetsSource.AuthenticateViaGoogleOAuth, GoogleSheetsSource.ServiceAccountKeyAuthentication]): Credentials for connecting to the Google Sheets API
"""
self.spreadsheet_id = check.str_param(spreadsheet_id, "spreadsheet_id")
self.row_batch_size = check.opt_int_param(row_batch_size, "row_batch_size")
self.credentials = check.inst_param(
credentials,
"credentials",
(
GoogleSheetsSource.AuthenticateViaGoogleOAuth,
GoogleSheetsSource.ServiceAccountKeyAuthentication,
),
)
super().__init__("Google Sheets", name)
| GoogleSheetsSource |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 44135,
"end": 45248
} | class ____(TypedDict, total=False):
type: Required[Literal['literal']]
expected: Required[list[Any]]
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def literal_schema(
expected: list[Any],
*,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> LiteralSchema:
"""
Returns a schema that matches a literal value, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.literal_schema(['hello', 'world'])
v = SchemaValidator(schema)
assert v.validate_python('hello') == 'hello'
```
Args:
expected: The value must be one of these values
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(type='literal', expected=expected, ref=ref, metadata=metadata, serialization=serialization)
| LiteralSchema |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 80956,
"end": 83409
} | class ____:
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_allclose(exm, exmrl, atol=1.5e-8, rtol=0)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_allclose(ex, exrl, atol=1e-6, rtol=0)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_allclose(exm, exmrl, atol=1.5e-8, rtol=0)
def test_expm1(self):
ex = (special.expm1(2), special.expm1(3), special.expm1(4))
exrl = (exp(2) - 1, exp(3) - 1, exp(4) - 1)
assert_allclose(ex, exrl, atol=1.5e-8, rtol=0)
def test_expm1more(self):
ex1 = (special.expm1(2), special.expm1(2.1), special.expm1(2.2))
exrl1 = (exp(2) - 1, exp(2.1) - 1, exp(2.2) - 1)
assert_allclose(ex1, exrl1, atol=1.5e-8, rtol=0)
def assert_really_equal(x, y, rtol=None):
"""
Sharper assertion function that is stricter about matching types, not just values
This is useful/necessary in some cases:
* dtypes for arrays that have the same _values_ (e.g. element 1.0 vs 1)
* distinguishing complex from real NaN
* result types for scalars
We still want to be able to allow a relative tolerance for the values though.
The main logic comparison logic is handled by the xp_assert_* functions.
"""
def assert_func(x, y):
xp_assert_equal(x, y) if rtol is None else xp_assert_close(x, y, rtol=rtol)
def assert_complex_nan(x):
assert np.isnan(x.real) and np.isnan(x.imag)
assert type(x) is type(y), f"types not equal: {type(x)}, {type(y)}"
# ensure we also compare the values _within_ an array appropriately,
# e.g. assert_equal does not distinguish different complex nans in arrays
if isinstance(x, np.ndarray):
# assert_equal does not compare (all) types, only values
assert x.dtype == y.dtype
# for empty arrays resp. to ensure shapes match
assert_func(x, y)
for elem_x, elem_y in zip(x.ravel(), y.ravel()):
assert_really_equal(elem_x, elem_y, rtol=rtol)
elif np.isnan(x) and np.isnan(y) and _is_subdtype(type(x), "c"):
assert_complex_nan(x) and assert_complex_nan(y)
# no need to consider complex infinities due to numpy/numpy#25493
else:
assert_func(x, y)
| TestExp |
python | ray-project__ray | python/ray/data/_internal/datasource/range_datasource.py | {
"start": 323,
"end": 5124
} | class ____(Datasource):
"""An example datasource that generates ranges of numbers from [0..n)."""
def __init__(
self,
n: int,
block_format: str = "arrow",
tensor_shape: Tuple = (1,),
column_name: Optional[str] = None,
):
self._n = int(n)
self._block_format = block_format
self._tensor_shape = tensor_shape
self._column_name = column_name
def estimate_inmemory_data_size(self) -> Optional[int]:
if self._block_format == "tensor":
element_size = int(np.prod(self._tensor_shape))
else:
element_size = 1
return 8 * self._n * element_size
def get_read_tasks(
self,
parallelism: int,
per_task_row_limit: Optional[int] = None,
) -> List[ReadTask]:
if self._n == 0:
return []
read_tasks: List[ReadTask] = []
n = self._n
block_format = self._block_format
tensor_shape = self._tensor_shape
block_size = max(1, n // parallelism)
# TODO(swang): This target block size may not match the driver's
# context if it was overridden. Set target max block size during
# optimizer stage to fix this.
ctx = DataContext.get_current()
if ctx.target_max_block_size is None:
# If target_max_block_size is ``None``, treat it as unlimited and
# avoid further splitting.
target_rows_per_block = n # whole block in one shot
else:
row_size_bytes = self.estimate_inmemory_data_size() // self._n
row_size_bytes = max(row_size_bytes, 1)
target_rows_per_block = max(1, ctx.target_max_block_size // row_size_bytes)
# Example of a read task. In a real datasource, this would pull data
# from an external system instead of generating dummy data.
def make_block(start: int, count: int) -> Block:
if block_format == "arrow":
import pyarrow as pa
return pa.Table.from_arrays(
[np.arange(start, start + count)],
names=[self._column_name or "value"],
)
elif block_format == "tensor":
import pyarrow as pa
tensor = np.ones(tensor_shape, dtype=np.int64) * np.expand_dims(
np.arange(start, start + count),
tuple(range(1, 1 + len(tensor_shape))),
)
return BlockAccessor.batch_to_block(
{self._column_name: tensor} if self._column_name else tensor
)
else:
return list(builtins.range(start, start + count))
def make_blocks(
start: int, count: int, target_rows_per_block: int
) -> Iterable[Block]:
while count > 0:
num_rows = min(count, target_rows_per_block)
yield make_block(start, num_rows)
start += num_rows
count -= num_rows
if block_format == "tensor":
element_size = int(np.prod(tensor_shape))
else:
element_size = 1
i = 0
while i < n:
count = min(block_size, n - i)
meta = BlockMetadata(
num_rows=count,
size_bytes=8 * count * element_size,
input_files=None,
exec_stats=None,
)
read_tasks.append(
ReadTask(
lambda i=i, count=count: make_blocks(
i, count, target_rows_per_block
),
meta,
schema=self._schema,
per_task_row_limit=per_task_row_limit,
)
)
i += block_size
return read_tasks
@functools.cached_property
def _schema(self):
if self._n == 0:
return None
if self._block_format == "arrow":
_check_pyarrow_version()
import pyarrow as pa
schema = pa.Table.from_pydict({self._column_name or "value": [0]}).schema
elif self._block_format == "tensor":
_check_pyarrow_version()
import pyarrow as pa
tensor = np.ones(self._tensor_shape, dtype=np.int64) * np.expand_dims(
np.arange(0, 10), tuple(range(1, 1 + len(self._tensor_shape)))
)
schema = BlockAccessor.batch_to_block(
{self._column_name: tensor} if self._column_name else tensor
).schema
elif self._block_format == "list":
schema = int
else:
raise ValueError("Unsupported block type", self._block_format)
return schema
| RangeDatasource |
python | apache__airflow | airflow-core/tests/unit/utils/test_db_manager.py | {
"start": 1213,
"end": 1677
} | class ____(BaseDBManager):
metadata = Base.metadata
version_table_name = "custom_alembic_version"
migration_dir = "custom_migration_dir"
alembic_file = "custom_alembic.ini"
def downgrade(self, to_revision, from_revision=None, show_sql_only=False):
from alembic import command as alembic_command
config = self.get_alembic_config()
alembic_command.downgrade(config, revision=to_revision, sql=show_sql_only)
| CustomDBManager |
python | doocs__leetcode | solution/0500-0599/0566.Reshape the Matrix/Solution.py | {
"start": 0,
"end": 333
} | class ____:
def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:
m, n = len(mat), len(mat[0])
if m * n != r * c:
return mat
ans = [[0] * c for _ in range(r)]
for i in range(m * n):
ans[i // c][i % c] = mat[i // n][i % n]
return ans
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 14084,
"end": 16530
} | class ____:
def test_init(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
assert e.document == doc
assert e.title == "title"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.TitleChangedEvent.kind == "TitleChanged"
def test_to_serializable(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, title="title")
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_except_title_changd_event(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) is False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_setter(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent(doc, "title2", "setter2", "invoker2")
assert e.combine(e2) is False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_doc(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent("doc2", "title2", "setter2", "invoker2")
assert e.combine(e2) is False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_with_title_changed_event(self) -> None:
doc = Document()
e = bde.TitleChangedEvent(doc, "title", "setter", "invoker")
e2 = bde.TitleChangedEvent(doc, "title2", "setter", "invoker2")
assert e.combine(e2) is True
assert e.title == "title2"
assert e.callback_invoker == "invoker2"
# RootAddedEvent --------------------------------------------------------------
| TestTitleChangedEvent |
python | falconry__falcon | falcon/errors.py | {
"start": 74325,
"end": 77406
} | class ____(HTTPError):
"""503 Service Unavailable.
The server is currently unable to handle the request due to a
temporary overload or scheduled maintenance, which will likely be
alleviated after some delay.
The server MAY send a Retry-After header field to suggest an
appropriate amount of time for the client to wait before retrying
the request.
Note: The existence of the 503 status code does not imply that a
server has to use it when becoming overloaded. Some servers might
simply refuse the connection.
(See also: RFC 7231, Section 6.6.4)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '503 Service Unavailable').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
retry_after (datetime or int): Value for the Retry-After header. If a
``datetime`` object, will serialize as an HTTP date. Otherwise,
a non-negative ``int`` is expected, representing the number of
seconds to wait.
Note:
The existing value of the Retry-After in headers will be
overridden by this value
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
retry_after: RetryAfter = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_503,
title=title,
description=description,
headers=_parse_retry_after(headers, retry_after),
**kwargs, # type: ignore[arg-type]
)
| HTTPServiceUnavailable |
python | huggingface__transformers | src/transformers/models/vaultgemma/modeling_vaultgemma.py | {
"start": 20410,
"end": 23757
} | class ____(VaultGemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = VaultGemmaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, VaultGemmaForCausalLM
>>> model = VaultGemmaForCausalLM.from_pretrained("google/gemma-2-9b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
if self.config.final_logit_softcapping is not None:
logits = logits / self.config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * self.config.final_logit_softcapping
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["VaultGemmaForCausalLM", "VaultGemmaModel", "VaultGemmaPreTrainedModel"]
| VaultGemmaForCausalLM |
python | sympy__sympy | sympy/polys/domains/finitefield.py | {
"start": 2938,
"end": 10571
} | class ____(Field, SimpleDomain):
r"""Finite field of prime order :ref:`GF(p)`
A :ref:`GF(p)` domain represents a `finite field`_ `\mathbb{F}_p` of prime
order as :py:class:`~.Domain` in the domain system (see
:ref:`polys-domainsintro`).
A :py:class:`~.Poly` created from an expression with integer
coefficients will have the domain :ref:`ZZ`. However, if the ``modulus=p``
option is given then the domain will be a finite field instead.
>>> from sympy import Poly, Symbol
>>> x = Symbol('x')
>>> p = Poly(x**2 + 1)
>>> p
Poly(x**2 + 1, x, domain='ZZ')
>>> p.domain
ZZ
>>> p2 = Poly(x**2 + 1, modulus=2)
>>> p2
Poly(x**2 + 1, x, modulus=2)
>>> p2.domain
GF(2)
It is possible to factorise a polynomial over :ref:`GF(p)` using the
modulus argument to :py:func:`~.factor` or by specifying the domain
explicitly. The domain can also be given as a string.
>>> from sympy import factor, GF
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, domain=GF(2))
(x + 1)**2
>>> factor(x**2 + 1, domain='GF(2)')
(x + 1)**2
It is also possible to use :ref:`GF(p)` with the :py:func:`~.cancel`
and :py:func:`~.gcd` functions.
>>> from sympy import cancel, gcd
>>> cancel((x**2 + 1)/(x + 1))
(x**2 + 1)/(x + 1)
>>> cancel((x**2 + 1)/(x + 1), domain=GF(2))
x + 1
>>> gcd(x**2 + 1, x + 1)
1
>>> gcd(x**2 + 1, x + 1, domain=GF(2))
x + 1
When using the domain directly :ref:`GF(p)` can be used as a constructor
to create instances which then support the operations ``+,-,*,**,/``
>>> from sympy import GF
>>> K = GF(5)
>>> K
GF(5)
>>> x = K(3)
>>> y = K(2)
>>> x
3 mod 5
>>> y
2 mod 5
>>> x * y
1 mod 5
>>> x / y
4 mod 5
Notes
=====
It is also possible to create a :ref:`GF(p)` domain of **non-prime**
order but the resulting ring is **not** a field: it is just the ring of
the integers modulo ``n``.
>>> K = GF(9)
>>> z = K(3)
>>> z
3 mod 9
>>> z**2
0 mod 9
It would be good to have a proper implementation of prime power fields
(``GF(p**n)``) but these are not yet implemented in SymPY.
.. _finite field: https://en.wikipedia.org/wiki/Finite_field
"""
rep = 'FF'
alias = 'FF'
is_FiniteField = is_FF = True
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
dom = None
mod = None
def __init__(self, mod, symmetric=True):
from sympy.polys.domains import ZZ
dom = ZZ
if mod <= 0:
raise ValueError('modulus must be a positive integer, got %s' % mod)
ctx, poly_ctx, is_flint = _modular_int_factory(mod, dom, symmetric, self)
self.dtype = ctx
self._poly_ctx = poly_ctx
self._is_flint = is_flint
self.zero = self.dtype(0)
self.one = self.dtype(1)
self.dom = dom
self.mod = mod
self.sym = symmetric
self._tp = type(self.zero)
@property
def tp(self):
return self._tp
@property
def is_Field(self):
is_field = getattr(self, '_is_field', None)
if is_field is None:
from sympy.ntheory.primetest import isprime
self._is_field = is_field = isprime(self.mod)
return is_field
def __str__(self):
return 'GF(%s)' % self.mod
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.mod, self.dom))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, FiniteField) and \
self.mod == other.mod and self.dom == other.dom
def characteristic(self):
"""Return the characteristic of this domain. """
return self.mod
def get_field(self):
"""Returns a field associated with ``self``. """
return self
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(self.to_int(a))
def from_sympy(self, a):
"""Convert SymPy's Integer to SymPy's ``Integer``. """
if a.is_Integer or int_valued(a):
return self.dtype(self.dom.dtype(int(a)))
raise CoercionFailed("expected an integer, got %s" % a)
def to_int(self, a):
"""Convert ``val`` to a Python ``int`` object. """
aval = int(a)
if self.sym and aval > self.mod // 2:
aval -= self.mod
return aval
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return bool(a)
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return True
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return False
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return not a
def from_FF(K1, a, K0=None):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ(int(a), K0.dom))
def from_FF_python(K1, a, K0=None):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(int(a), K0.dom))
def from_ZZ(K1, a, K0=None):
"""Convert Python's ``int`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a, K0))
def from_ZZ_python(K1, a, K0=None):
"""Convert Python's ``int`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a, K0))
def from_QQ(K1, a, K0=None):
"""Convert Python's ``Fraction`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_python(a.numerator)
def from_QQ_python(K1, a, K0=None):
"""Convert Python's ``Fraction`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_python(a.numerator)
def from_FF_gmpy(K1, a, K0=None):
"""Convert ``ModularInteger(mpz)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a.val, K0.dom))
def from_ZZ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpz`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a, K0))
def from_QQ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpq`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_gmpy(a.numerator)
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to ``dtype``. """
p, q = K0.to_rational(a)
if q == 1:
return K1.dtype(K1.dom.dtype(p))
def is_square(self, a):
"""Returns True if ``a`` is a quadratic residue modulo p. """
# a is not a square <=> x**2-a is irreducible
poly = [int(x) for x in [self.one, self.zero, -a]]
return not gf_irred_p_rabin(poly, self.mod, self.dom)
def exsqrt(self, a):
"""Square root modulo p of ``a`` if it is a quadratic residue.
Explanation
===========
Always returns the square root that is no larger than ``p // 2``.
"""
# x**2-a is not square-free if a=0 or the field is characteristic 2
if self.mod == 2 or a == 0:
return a
# Otherwise, use square-free factorization routine to factorize x**2-a
poly = [int(x) for x in [self.one, self.zero, -a]]
for factor in gf_zassenhaus(poly, self.mod, self.dom):
if len(factor) == 2 and factor[1] <= self.mod // 2:
return self.dtype(factor[1])
return None
FF = GF = FiniteField
| FiniteField |
python | joke2k__faker | faker/providers/company/es_ES/__init__.py | {
"start": 82,
"end": 3453
} | class ____(CompanyProvider):
"""
Provider for company names for es_ES locale
Company naming scheme and probabilities are inspired by and/or based on existing companies in Spain.
Sources:
- https://en.wikipedia.org/wiki/List_of_legal_entity_types_by_country
- https://ranking-empresas.eleconomista.es/ranking_empresas_nacional.html
"""
formats = (
"{{company_prefix}} {{last_name}} {{company_suffix}}",
"{{company_type}} {{random_company_acronym}} {{company_suffix}}",
"{{company_type}} {{last_name}} {{company_suffix}}",
"{{company_type}} {{random_company_adjective}} {{company_suffix}}",
"{{company_type}} {{last_name}} {{random_name_complements}} {{company_suffix}}",
"{{last_name}} {{random_name_complements}} {{company_suffix}}",
"{{last_name}} y {{last_name}} {{company_suffix}}",
"{{first_name}} {{last_name}} {{last_name}} {{company_suffix}}",
)
company_suffixes = OrderedDict(
[
("S.A.", 0.19860906),
("S.A.D", 0.01020618),
("S.A.T.", 0.02307813),
("S.A.U", 0.01506562),
("S.C.P", 0.04465719),
("S.Com.", 0.15636432),
("S.Coop.", 0.17394866),
("S.L.", 0.18325857),
("S.L.L.", 0.05800693),
("S.L.N.E", 0.11496705),
("S.L.U.", 0.02183831),
]
)
company_prefixes = (
"Familia",
"Grupo",
"Hermanos",
"Hnos",
)
company_types = (
"Alimentación",
"Banca Privada",
"Banco",
"Comercial",
"Comercializadora",
"Compañía",
"Construcción",
"Consultoría",
"Desarrollo",
"Despacho",
"Distribuciones",
"Farmaceútica",
"Finanzas",
"Fábrica",
"Hotel",
"Industrias",
"Infraestructuras",
"Inmobiliaria",
"Instalaciones",
"Inversiones",
"Logística",
"Manufacturas",
"Minería",
"Promociones",
"Restauración",
"Servicios",
"Soluciones",
"Suministros",
"Supermercados",
"Talleres",
"Tecnologías",
"Transportes",
)
name_complements = (
"& Asociados",
"y asociados",
)
company_adjectives = (
"Avanzadas",
"Castellana",
"Española",
"Españolas",
"Globales",
"Iberia",
"Ibérica",
"Ibéricos",
"Integrales",
"Inteligentes",
"Internacionales",
"del Levante",
"del Mediterráneo",
"del Noroeste",
"del Norte",
"del Sur",
)
def company_type(self) -> str:
return self.random_element(self.company_types)
def company_suffix(self) -> str:
return self.random_element(self.company_suffixes)
def random_name_complements(self) -> str:
return self.random_element(self.name_complements)
def random_company_adjective(self) -> str:
return self.random_element(self.company_adjectives)
def random_company_acronym(self) -> str:
letters = self.random_letters(self.random_int(2, 4))
return "".join(letters).upper()
def company_prefix(self) -> str:
return self.random_element(self.company_prefixes)
| Provider |
python | coleifer__peewee | examples/twitter/app.py | {
"start": 1226,
"end": 2733
} | class ____(BaseModel):
username = CharField(unique=True)
password = CharField()
email = CharField()
join_date = DateTimeField()
# it often makes sense to put convenience methods on model instances, for
# example, "give me all the users this user is following":
def following(self):
# query other users through the "relationship" table
return (User
.select()
.join(Relationship, on=Relationship.to_user)
.where(Relationship.from_user == self)
.order_by(User.username))
def followers(self):
return (User
.select()
.join(Relationship, on=Relationship.from_user)
.where(Relationship.to_user == self)
.order_by(User.username))
def is_following(self, user):
return (Relationship
.select()
.where(
(Relationship.from_user == self) &
(Relationship.to_user == user))
.exists())
def gravatar_url(self, size=80):
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)
# this model contains two foreign keys to user -- it essentially allows us to
# model a "many-to-many" relationship between users. by querying and joining
# on different columns we can expose who a user is "related to" and who is
# "related to" a given user
| User |
python | google__jax | jax/_src/pallas/mosaic_gpu/lowering.py | {
"start": 20938,
"end": 132655
} | class ____(Exception): # pylint: disable=g-bad-exception-name
pass
def _eval_index_map(
module_ctx: ModuleContext,
launch_ctx: mgpu.LaunchContext,
idx: Sequence[ir.Value],
block_mapping: pallas_core.BlockMapping,
) -> Sequence[ir.Value]:
block_indices = lower_jaxpr_to_mosaic_gpu(
module_ctx, launch_ctx, block_mapping.index_map_jaxpr.jaxpr, idx
)
result = []
for i, b in zip(block_indices, block_mapping.block_shape):
match b:
case pallas_core.Squeezed() | pallas_core.Element():
result.append(i)
case pallas_core.Blocked():
result.append(arith_dialect.muli(_as_index(i), _as_index(b)))
case _:
raise ValueError(f"Unsupported block dim type: {b}")
return tuple(result)
def _check_block_mappings(
block_mappings: Sequence[pallas_core.BlockMapping],
debug_info: jax_core.DebugInfo,
) -> None:
def err_details(bm: pallas_core.BlockMapping) -> str:
return (
f"Block spec for {bm.origin} in pallas_call {debug_info.func_src_info}"
f" has block shape {bm.block_shape}, array shape"
f" {bm.array_aval.shape},"
# TODO(necula): add index_map source location info
f" and index_map {bm.index_map_jaxpr.jaxpr} in"
f" memory space {bm.transformed_block_aval.memory_space}."
" See details at"
" https://docs.jax.dev/en/latest/pallas/grid_blockspec.html#pallas-blockspec."
)
for bm in block_mappings:
if (
bm.transformed_block_aval.memory_space == gpu_core.GMEM
and not bm.has_trivial_window()
):
raise NotImplementedError(
"Mosaic GPU lowering currently requires blocks in GMEM memory space "
"to have same block shape as the array shape "
"and a trivial index_map (returning all 0s).\n\n"
+ err_details(bm)
)
if any(isinstance(b, pallas_core.Element) for b in bm.block_shape):
raise NotImplementedError(
"Only Blocked indexing mode is supported in Mosaic GPU lowering.\n\n"
+ err_details(bm)
)
if bm.pipeline_mode is not None:
raise NotImplementedError(
"Pipeline mode is not supported in Mosaic GPU lowering.\n\n"
+ err_details(bm)
)
def _block_spec_from_block_mapping(
bm: pallas_core.BlockMapping,
which_parallel: Sequence[bool],
) -> pallas_core.BlockSpec:
eval_index_map = functools.partial(
jax.core.eval_jaxpr,
bm.index_map_jaxpr.jaxpr,
bm.index_map_jaxpr.consts,
)
def index_map(*indices):
# Inject the parallel indices into the sequential ones coming from
# `emit_pipeline`.
new_indices = util.merge_lists(
which_parallel,
indices,
[
primitives.program_id(axis - 1)
for axis, is_parallel in zip(
itertools.accumulate(which_parallel), which_parallel
)
if is_parallel
],
)
return eval_index_map(*new_indices)
return gpu_core.BlockSpec(
bm.block_shape,
index_map,
memory_space=bm.transformed_block_aval.memory_space,
transforms=cast(Sequence[gpu_core.MemoryRefTransform], bm.transforms),
)
def lower_pipelined_jaxpr_to_module(
grid_mapping: pallas_core.GridMapping,
gpu_mesh: pallas_core.Mesh | None,
jax_mesh: mesh_lib.Mesh | None,
jaxpr: jax_core.Jaxpr,
params: gpu_core.CompilerParams,
cost_estimate: pallas_core.CostEstimate | None,
) -> LoweringResult:
del cost_estimate # Unused.
assert len(jaxpr.outvars) == 0
assert not grid_mapping.vmapped_dims
if grid_mapping.num_dynamic_grid_bounds:
raise NotImplementedError(
"Dynamic grid bounds not supported in the Mosaic GPU lowering."
)
if grid_mapping.num_index_operands:
raise NotImplementedError(
"Scalar prefetch not supported in Mosaic GPU lowering."
)
block_mappings = grid_mapping.block_mappings
_check_block_mappings(block_mappings, jaxpr.debug_info)
in_block_mappings, out_block_mappings = util.split_list(
block_mappings, [grid_mapping.num_inputs]
)
if gpu_mesh:
assert isinstance(gpu_mesh, gpu_core.Mesh)
block = (128 * (gpu_mesh.num_threads or 1), 1, 1)
grid = gpu_mesh.grid
thread_axis = (
gpu_mesh.thread_name if gpu_mesh.thread_name is not None else ()
)
else:
block = (128, 1, 1)
grid = grid_mapping.grid
thread_axis = ()
if params.dimension_semantics is None:
which_parallel = [True] * len(grid)
else:
assert len(params.dimension_semantics) == len(grid)
which_parallel = [ds == "parallel" for ds in params.dimension_semantics]
sequential_grid = tuple(
d for axis, d in enumerate(grid) if not which_parallel[axis]
)
parallel_grid = tuple(
d for axis, d in enumerate(grid) if which_parallel[axis]
)
from jax._src.pallas.mosaic_gpu import pipeline # pytype: disable=import-error
from jax._src.pallas.mosaic_gpu import primitives as gpu_primitives # pytype: disable=import-error
def ref_for_aval(aval: ShapedAbstractValue):
if isinstance(aval, gpu_core.WGMMAAbstractAccumulatorRef):
return gpu_core.WGMMAAccumulatorRef(aval.shape, aval.dtype)
elif isinstance(aval, gpu_core.AbstractTMEMRef):
return gpu_core.GPUMemoryRef(
jax_core.ShapedArray(aval.shape, aval.dtype), gpu_core.TMEM,
transforms=(), layout=aval.layout, collective=aval.collective,
)
elif isinstance(aval, state_types.AbstractRef):
return pallas_core.MemoryRef(jax_core.ShapedArray(aval.shape, aval.dtype),
aval.memory_space)
else:
return gpu_core.SMEM(aval.shape, aval.dtype)
def pipeline_fn(*refs):
primitives.run_scoped(
functools.partial(scoped_pipeline_fn, *refs),
scratch_refs=[
ref_for_aval(cast(ShapedAbstractValue, v.aval))
for v in jaxpr.invars[grid_mapping.slice_scratch_ops]
],
collective_axes=thread_axis, # scratch_refs are shared across threads
)
return () # ``wrap_init`` does not support functions returning None.
def scoped_pipeline_fn(*refs, scratch_refs):
def body_fn(indices, *refs):
program_ids_template = util.merge_lists(
which_parallel, indices, [None] * sum(which_parallel)
)
assert len(refs) + len(scratch_refs) == len(jaxpr.invars)
return gpu_primitives.jaxpr_call(
jaxpr, *refs, *scratch_refs, program_ids=program_ids_template
)
return pipeline.emit_pipeline(
body_fn,
grid=sequential_grid,
in_specs=[
_block_spec_from_block_mapping(bm, which_parallel)
for bm in in_block_mappings
],
out_specs=[
_block_spec_from_block_mapping(bm, which_parallel)
for bm in out_block_mappings
],
max_concurrent_steps=params.max_concurrent_steps,
)(*refs)
with grid_mapping.trace_env():
new_jaxpr, _, new_consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(pipeline_fn, debug_info=jaxpr.debug_info.with_unknown_names()),
[
gpu_core.GMEM(
bm.array_aval.shape, bm.array_aval.dtype
).get_ref_aval()
for bm in block_mappings
],
)
assert not new_consts
axis_names = (
_AxisNames(gpu_mesh.grid_names, gpu_mesh.cluster_names, gpu_mesh.thread_name)
if gpu_mesh is not None
else _AxisNames(grid_mapping.grid_names or ())
)
with grid_mapping.trace_env():
return lower_jaxpr_to_module(
jax_mesh,
axis_names,
parallel_grid,
block,
gpu_mesh.cluster if gpu_mesh is not None else (),
[bm.array_aval for bm in in_block_mappings],
[bm.array_aval for bm in out_block_mappings],
new_jaxpr,
params,
new_consts,
)
def lower_jaxpr_to_module(
jax_mesh: mesh_lib.Mesh | None,
axis_names: _AxisNames,
grid: tuple[int, ...],
block: tuple[int, ...],
cluster: tuple[int, ...],
in_shapes: Sequence[jax_core.ShapedArray],
out_shapes: Sequence[jax_core.ShapedArray],
jaxpr: jax_core.Jaxpr,
params: gpu_core.CompilerParams,
consts=(),
) -> LoweringResult:
debug_info = jaxpr.debug_info
approx_math = params.approx_math
lowering_semantics = params.lowering_semantics
if len(cluster) < 3:
cluster = (1,) * (3 - len(cluster)) + cluster
else:
assert len(cluster) == 3
if len(grid) <= 3:
squashed_dims = ()
parallel_grid = (1,) * (3 - len(grid)) + grid
else:
# If we have >3 parallel dimensions, we flatten all but the minormost 2 dims.
# Ex: (2, 3, 4, 5) -> (6, 4, 5)
squashed_dims = grid[:-2]
parallel_grid = (math.prod(grid[:-2]), *grid[-2:])
# We reverse the order because Pallas prefers row-major iteration while the
# CUDA runtime prefers column-major iteration.
parallel_grid = parallel_grid[::-1]
cluster = cluster[::-1]
squashed_dims = squashed_dims[::-1]
axis_names = axis_names.reverse()
rs = _estimate_resources(
ResourceEstimatorContext(
axis_names=axis_names, lowering_semantics=lowering_semantics
),
jaxpr,
)
def body(launch_ctx: mgpu.LaunchContext, *buffers: ir.Value):
*buffers_gmem, (
runtime_smem,
runtime_barriers,
runtime_tmem,
) = buffers
num_input_buffers = (len(in_shapes) +
len(rs.scoped_gmem_semaphores))
input_buffers_gmem = buffers_gmem[:num_input_buffers]
output_buffers_gmem = buffers_gmem[num_input_buffers:]
scoped_gmem_semaphores = {}
for collective_axes in sorted(
rs.scoped_gmem_semaphores.keys(), reverse=True):
num_sems = rs.scoped_gmem_semaphores[collective_axes]
# Extract the semaphores local to the current scope.
index = ir.IndexType.get()
# TODO(justinfu): Compute scope_idx for general collective_axes.
# scope_idx computes axis_index(all_axes - collective_axes)
if _is_block_local_scope(collective_axes, axis_names):
scope_idx = arith_dialect.index_castui(index, mgpu_utils.block_idx())
elif _is_global_scope(collective_axes, axis_names):
scope_idx = _as_index(0)
else:
raise NotImplementedError(
f"Unimplemented scope for semaphores: {collective_axes=}")
scoped_gmem_semaphores[collective_axes] = mgpu.memref_slice(
output_buffers_gmem[-1],
mgpu.ds(
arith_dialect.muli(
scope_idx, arith_dialect.constant(index, num_sems)
),
num_sems,
),
)
# The semaphore buffer is an aliased input/output, so we need to skip it
# in both the inputs and outputs.
input_buffers_gmem = input_buffers_gmem[:-1]
output_buffers_gmem = output_buffers_gmem[:-1]
buffers_gmem = [*input_buffers_gmem, *output_buffers_gmem]
grouped_barriers = collections.defaultdict(list)
for barrier, barrier_ref in zip(rs.barriers, runtime_barriers):
grouped_barriers[barrier].append(barrier_ref)
if runtime_tmem is not None:
if lowering_semantics == mgpu.LoweringSemantics.Lane:
tmem_cols = math.prod(runtime_tmem.shape) // tcgen05.TMEM_ROWS
tmem_base = runtime_tmem.address
else:
tmem_cols = math.prod(runtime_tmem.type.shape) // tcgen05.TMEM_ROWS
tmem_base = runtime_tmem
else:
tmem_cols = 0
tmem_base = None
if lowering_semantics == mgpu.LoweringSemantics.Lane:
single_wg_lane_predicate = mgpu.single_thread_predicate(
scope=mgpu.ThreadSubset.WARPGROUP)
single_warp_lane_predicate = mgpu.single_thread_predicate(
scope=mgpu.ThreadSubset.WARP)
else: # Warpgroup semantics do not have a single lane predicate.
single_wg_lane_predicate = None
single_warp_lane_predicate = None
module_ctx = ModuleContext(
mlir.sanitize_name(debug_info.func_name),
axis_names,
[
_program_id(axis, squashed_dims, len(grid))
for axis in range(len(grid))
],
approx_math,
single_wg_lane_predicate,
single_warp_lane_predicate,
smem_requested_bytes=math.prod(ir.MemRefType(runtime_smem.type).shape),
smem_used_bytes=0,
tmem_requested_cols=tmem_cols,
tmem_used_cols=0,
tmem_base=tmem_base,
scoped_gmem_used_semaphores={k: 0 for k in scoped_gmem_semaphores},
scoped_gmem_semaphore_base_ptr=scoped_gmem_semaphores,
runtime_barriers=grouped_barriers,
name_stack=source_info_util.NameStack(),
traceback_caches=mlir.TracebackCaches(),
squashed_dims=squashed_dims,
lowering_semantics=lowering_semantics,
primitive_semantics=gpu_core.PrimitiveSemantics.Warpgroup,
mesh_info=pallas_utils.MeshInfo.from_mesh(jax_mesh)
if jax_mesh is not None
else None,
auto_barriers=not params.unsafe_no_auto_barriers,
)
del runtime_smem, grouped_barriers, runtime_barriers
_ = lower_jaxpr_to_mosaic_gpu(
module_ctx, launch_ctx, jaxpr, buffers_gmem, consts
)
scratch_buffers = [
jax.ShapeDtypeStruct(shape=[rs.smem_scratch_bytes], dtype=np.int8),
rs.barriers,
]
if rs.tmem_scratch_cols > 0 and rs.tmem_collective_scratch_cols > 0:
raise ValueError(
"Can't mix collective and non-collective TMEM allocations within the"
" same kernel."
)
tmem_scratch_cols = rs.tmem_scratch_cols + rs.tmem_collective_scratch_cols
if tmem_scratch_cols > 0:
scratch_buffers.append(
mgpu.TMEM(
shape=(tcgen05.TMEM_ROWS, tmem_scratch_cols),
dtype=np.int32,
collective=rs.tmem_collective_scratch_cols > 0,
),
)
else:
scratch_buffers.append(None)
prof_spec = None
if params.profile_space:
# Each range is 2 events, each event is 4 bytes.
prof_spec = mgpu_profiler.ProfilerSpec(
params.profile_space * 2 * 4, dump_path=params.profile_dir
)
cuda_grid = tuple(map(operator.mul, parallel_grid, cluster))
scoped_semaphores_shape = []
for collective_axes in sorted(rs.scoped_gmem_semaphores.keys()):
num_sems = rs.scoped_gmem_semaphores[collective_axes]
# TODO(justinfu): Compute axis_size for general collective_axes.
# axis_size computes axis_size(all_axes - collective_axes)
if _is_block_local_scope(collective_axes, axis_names):
axis_size = math.prod(cuda_grid)
elif _is_global_scope(collective_axes, axis_names):
axis_size = 1
else:
raise NotImplementedError(
f"Unimplemented scope for semaphores: {collective_axes=}")
scoped_semaphores_shape.append(
jax.ShapeDtypeStruct(
shape=(axis_size * num_sems,), dtype=np.int32
),
)
scoped_semaphores_shape = tuple(scoped_semaphores_shape)
# NOTE: new_out_shapes has out_shapes, then semaphores_shape and
# optionally the profiler buffer.
module, new_out_shapes, _, launch_ctx = (
mgpu_core._lower_as_gpu_kernel(
body,
grid=cuda_grid,
cluster=cluster,
block=block,
in_shapes=(*in_shapes, *scoped_semaphores_shape),
out_shape=(*out_shapes, *scoped_semaphores_shape),
inout_shape=(),
smem_scratch_shape=scratch_buffers,
lowering_semantics=lowering_semantics,
module_name=mlir.sanitize_name(debug_info.func_name),
kernel_name=mlir.sanitize_name(debug_info.func_name),
prof_spec=prof_spec,
)
)
if lowering_semantics == mgpu.LoweringSemantics.Warpgroup:
# We need to run a pass that removes dead-code for which layout inference
# does not work.
pm = mlir.passmanager.PassManager.parse("builtin.module(canonicalize)", module.context)
pm.run(module.operation)
# Run Python lowering passes. The remaining passes will be run in C++ in
# jax/jaxlib/mosaic/gpu/custom_call.cc
mgpu.infer_layout(module) # pytype: disable=attribute-error
mgpu.lower_mgpu_dialect(
module, launch_ctx, auto_barriers=not params.unsafe_no_auto_barriers
)
launch_ctx.scratch.finalize_size()
return LoweringResult(
module, cuda_grid, block, new_out_shapes, prof_spec,
scoped_semaphores_shape,
)
mosaic_lowering_rules = {
# Lowering rules when using Mosaic GPU lane semantics.
(mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warpgroup): {} ,
gpu_core.LANExWARP_SEMANTICS: {} ,
# Lowering rules when using Mosaic GPU warpgroup semantics.
(mgpu.LoweringSemantics.Warpgroup,
gpu_core.PrimitiveSemantics.Warpgroup): {},
}
def register_lowering_rule(
primitive: jax_core.Primitive,
lowering_semantics: mgpu.LoweringSemantics,
primitive_semantics: gpu_core.PrimitiveSemantics = gpu_core.PrimitiveSemantics.Warpgroup,
):
def deco(fn):
mosaic_lowering_rules[
(lowering_semantics, primitive_semantics)][primitive] = fn
return fn
return deco
def _compute_name_stack_updates(
old_name_stack: list[str],
new_name_stack: list[str]
) -> tuple[list[str], list[str]]:
common_prefix_idx = 0
for i, (old, new) in enumerate(unsafe_zip(old_name_stack, new_name_stack)):
if old == new:
common_prefix_idx = i+1
else:
break
return old_name_stack[common_prefix_idx:], new_name_stack[common_prefix_idx:]
def lower_jaxpr_to_mosaic_gpu(
module_ctx: ModuleContext,
launch_ctx: mgpu.LaunchContext,
jaxpr: jax_core.Jaxpr,
args: Sequence[ir.Value],
consts=(),
) -> Sequence[ir.Value]:
env = {}
def read_env(atom: jax_core.Atom):
return atom.val if isinstance(atom, jax_core.Literal) else env[atom]
def write_env(var: jax_core.Var, val, require_value: bool = True):
env[var] = val
# TODO(apaszke): Handle other avals (refs, etc.).
if isinstance(aval := var.aval, jax_core.ShapedArray):
# TODO(apaszke): Clarify the type invariants for lane semantics?
if module_ctx.lowering_semantics == mgpu.LoweringSemantics.Warpgroup:
# Shaped arrays must be vectors if and only if their shape is non-empty.
# Those with empty shapes should be represented by their scalar type.
mlir_dtype = mgpu_utils.dtype_to_ir_type(aval.dtype)
if not isinstance(val, ir.Value):
if require_value:
raise AssertionError(f"Shaped arrays must be represented by ir.Values, got: {val}")
else:
if aval.shape:
raise AssertionError("Only scalars can be represented by non-ir.Values")
return # Skip following checks.
if aval.shape:
if not ir.VectorType.isinstance(val.type):
raise AssertionError(f"Non-scalar arrays must be represented by vectors, got: {val.type}")
vty = ir.VectorType(val.type)
if vty.element_type != mlir_dtype:
raise AssertionError(f"Vector element type must match ShapedArray dtype, got: {val.type} != {mlir_dtype}")
if tuple(vty.shape) != aval.shape:
raise AssertionError(f"Vector shape must match ShapedArray shape, got: {vty.shape} != {aval.shape}")
else:
if ir.VectorType.isinstance(val.type):
raise AssertionError(f"Scalars must be represented by non-vector types, got: {val.type}")
if val.type != mlir_dtype:
raise AssertionError(f"Scalar type must match ShapedArray dtype, got: {val.type} != {mlir_dtype}")
foreach(
functools.partial(write_env, require_value=False), jaxpr.constvars, consts
)
foreach(functools.partial(write_env, require_value=False), jaxpr.invars, args)
# TODO(justinfu): Handle transform scopes.
last_local_name_stack: list[str] = []
named_regions = []
for i, eqn in enumerate(jaxpr.eqns):
invals = map(read_env, eqn.invars)
eqn_name_stack = module_ctx.name_stack + eqn.source_info.name_stack
loc = mlir.source_info_to_location( # pytype: disable=wrong-arg-types
module_ctx, eqn.primitive, eqn_name_stack, eqn.source_info.traceback
)
with source_info_util.user_context(eqn.source_info.traceback), loc:
if eqn.primitive not in mosaic_lowering_rules[
(module_ctx.lowering_semantics, module_ctx.primitive_semantics)]:
raise NotImplementedError(
"Unimplemented primitive in Pallas Mosaic GPU lowering: "
f"{eqn.primitive.name} for lowering semantics "
f"{module_ctx.lowering_semantics} and user thread semantics "
f"{module_ctx.primitive_semantics}. "
"Please file an issue on https://github.com/jax-ml/jax/issues."
)
new_local_name_stack = [scope.name for scope in eqn.source_info.name_stack.stack]
popped, pushed = _compute_name_stack_updates(last_local_name_stack, new_local_name_stack)
last_local_name_stack = new_local_name_stack
for _ in popped:
named_regions.pop().close()
for name in pushed:
wrapper_stack = contextlib.ExitStack()
wrapper_stack.enter_context(launch_ctx.named_region(name))
named_regions.append(wrapper_stack)
rule = mosaic_lowering_rules[
(module_ctx.lowering_semantics, module_ctx.primitive_semantics)
][eqn.primitive]
# If the equation is immediately followed by a layout cast on its output,
# we provide the layout as a hint to the rule.
out_layout_hint = None
if i + 1 < len(jaxpr.eqns):
lookahead_eqn = jaxpr.eqns[i + 1]
is_layout_cast = lookahead_eqn.primitive == gpu_core.layout_cast_p
uses_eqn_output = lookahead_eqn.invars == eqn.outvars
if is_layout_cast and uses_eqn_output:
out_layout_hint = lookahead_eqn.params["new_layout"].to_mgpu()
rule_ctx = LoweringRuleContext(
module_ctx,
launch_ctx,
avals_in=[cast(jax_core.ShapedArray, v.aval) for v in eqn.invars],
avals_out=[cast(jax_core.ShapedArray, v.aval) for v in eqn.outvars],
prim=eqn.primitive,
out_layout_hint=out_layout_hint,
)
try:
outvals = rule(rule_ctx, *invals, **eqn.params)
except LoweringError:
raise # We only add the extra info to the innermost exception.
except Exception as e:
if not config.jax_pallas_verbose_errors.value:
raise
inval_types = map(lambda t: getattr(t, "type", None), invals)
raise LoweringError(
f"Exception while lowering eqn:\n {eqn}\nWith context:\n "
f" {rule_ctx}\nWith inval types={inval_types}\nIn jaxpr:\n{jaxpr}"
) from e
if eqn.primitive.multiple_results:
foreach(write_env, eqn.outvars, outvals)
else:
write_env(eqn.outvars[0], outvals)
while named_regions: # Drain the name stack.
named_regions.pop().close()
return map(read_env, jaxpr.outvars)
@register_lowering_rule(primitives.program_id_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
primitives.program_id_p, mgpu.LoweringSemantics.Warpgroup)
def _program_id_lowering_rule(ctx: LoweringRuleContext, axis):
if ctx.module_ctx.program_ids is None:
raise NotImplementedError("pl.program_id() is not supported in this context")
return ctx.module_ctx.program_ids[axis]
def _unravel_program_id(
block_id: ir.Value,
axis: int,
dimensions: tuple[int, ...],
row_major: bool = False
) -> ir.Value:
"""Computes the program ID for axes compressed into one block dimension."""
if row_major:
div_value = math.prod(dimensions[axis+1:])
else:
div_value = math.prod(dimensions[:axis])
div_value = _as_index(_i32_constant(div_value))
pid = arith_dialect.divui(block_id, div_value)
axis_size = _as_index(_i32_constant(dimensions[axis]))
pid = arith_dialect.remui(pid, axis_size)
return arith_dialect.index_cast(ir.IntegerType.get_signless(32), pid)
def _program_id(
parallel_axis: int, squashed_dims: tuple[int, ...], grid_size: int
) -> ir.Value:
"""Returns the id of the current kernel instance along the given axis in the original Pallas grid."""
if parallel_axis < len(squashed_dims):
# All squashed dimensions are mapped to Dimension.z.
block_id = gpu_dialect.block_id(gpu_dialect.Dimension.z)
idx = len(squashed_dims) - 1 - parallel_axis
return _unravel_program_id(block_id, idx, squashed_dims)
else:
idx = grid_size - 1 - parallel_axis
assert idx in (0, 1, 2)
return arith_dialect.index_cast(
ir.IntegerType.get_signless(32),
gpu_dialect.block_id(gpu_dialect.Dimension(idx)))
def _lower_fun(
fun: Callable[..., Any], *, multiple_results: bool
) -> Callable[..., Any]:
def lowering_rule(ctx: LoweringRuleContext, *args, **params):
wrapped_fun = lu.wrap_init(
fun
if multiple_results
else lambda *args, **params: (fun(*args, **params),),
params,
debug_info=api_util.debug_info(
"Pallas Mosaic GPU lower_fun", fun, args, params
),
)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, ctx.avals_in)
out = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, jaxpr, args, consts
)
return out if multiple_results else out[0]
return lowering_rule
def _handle_dtype_bitcast(
ref: ir.Value, src_dtype: ir.Type, dst_dtype: ir.Type
) -> ir.Value:
"""Allows bitcasting a SMEM ref from one element type to another.
Args:
ref: the reference to bitcast.
src_dtype: the source element type.
dst_dtype: the destination element type.
Returns:
A bitcasted version of `ref` with element type `dst_dtype`.
Raises:
ValueError: if the source ref is not in SMEM.
"""
if src_dtype == dst_dtype:
return ref
if src_dtype != ir.IntegerType.get_signless(8):
raise NotImplementedError(
"Data type bitcast is only supported from i8 to other types."
)
ref_ty = ir.MemRefType(ref.type)
if not mgpu_utils.is_smem_ref(ref_ty):
raise ValueError(f"Only workgroup memory is supported but got {ref}.")
if len(ref_ty.shape) != 1:
raise NotImplementedError(
"Data type bitcast is only supported for 1D arrays."
)
[stride], _ = ref_ty.get_strides_and_offset()
if stride != 1:
raise ValueError(
"Data type bitcast is only supported for contiguous 1D arrays, but got "
f"stride={stride}."
)
[shape_bytes] = ref_ty.shape
shape_bitwidth = shape_bytes * 8
target_bitwidth = mgpu_utils.bitwidth(dst_dtype)
if shape_bitwidth % target_bitwidth:
raise ValueError(
f"Can not bitcast memory region of size {shape_bitwidth} bits to dtype "
f"with {target_bitwidth} bits."
)
result_type = ir.MemRefType.get(
shape=(shape_bitwidth // target_bitwidth,),
element_type=dst_dtype,
memory_space=ref_ty.memory_space,
)
# Do a memref_ptr/ptr_as_memref roundtrip instead of using `memref.view`,
# which refuses to take in our source ref. This is because `memref.view` only
# works on a super restricted set of `memref`s. E.g., it does not work if an
# offset is specified, which can be the case for our SMEM refs.
smem = mgpu_utils.WORKGROUP_NVPTX_ADDRESS_SPACE
ref = mgpu_utils.memref_ptr(ref, memory_space=smem)
return mgpu_utils.ptr_as_memref(ref, result_type, ptr_memory_space=smem)
def _extract_aliased_ref(
ref: RefOrTmemType, transforms: Sequence[state_types.Transform]
) -> tuple[RefOrTmemType, Sequence[state_types.Transform]]:
match transforms:
case (
gpu_core.ExtractAliasedRef(
dtype, transformed_shape, offset, layout
),
*other_transforms,
):
mlir_dtype = mgpu_utils.dtype_to_ir_type(dtype)
if isinstance(ref, tcgen05.TMEMRef):
assert layout is not None
if ref.shape[0] != transformed_shape[0]:
raise ValueError(
"TMEM aliasing only supported for Refs with the same first"
f" dimension, got {ref.shape[0]} != {transformed_shape[0]}."
)
address = arith_dialect.addi(ref.address, _i32_constant(offset))
ref = tcgen05.TMEMRef(
address=address,
shape=transformed_shape,
dtype=mgpu_utils.dtype_to_ir_type(dtype),
layout=layout)
else:
assert layout is None
ref_bits = math.prod(transformed_shape) * mgpu_utils.bitwidth(mlir_dtype)
if ref_bits % 8:
raise NotImplementedError("Only byte-aligned bitcasts are supported.")
assert offset % gpu_core.SMEM_ALIGNMENT == 0
ref_bytes = ref_bits // 8
ref = mgpu.memref_slice(ref, slice(offset, offset + ref_bytes))
ref = _handle_dtype_bitcast(
ref,
ir.MemRefType(ref.type).element_type,
mgpu_utils.dtype_to_ir_type(dtype),
)
ref = mgpu.memref_reshape(ref, transformed_shape)
return ref, tuple(other_transforms)
case _:
return ref, transforms
def _transform_dtype(
dtype: dtypes.DType,
transforms: Sequence[state_types.Transform],
) -> dtypes.DType:
"""Applies `t.transform_dtype` for `t` in `transforms` sequentially on `dtype`."""
for transform in transforms:
dtype = transform.transform_dtype(dtype)
assert dtype is not None
return dtype # pytype: disable=bad-return-type
def _handle_transforms(
ctx: LoweringRuleContext,
ref: RefOrTmemType,
transforms: Sequence[state_types.Transform],
*,
handle_transposes=True,
handle_reshapes=True,
allow_peer_refs=False,
allow_multicast_refs=False,
) -> tuple[RefOrTmemType, Sequence[state_types.Transform]]:
# Before we handle other transforms, we resolve any possible leading
# aliasing transform.
ref, transforms = _extract_aliased_ref(ref, transforms)
if isinstance(ref, tcgen05.TMEMRef):
mlir_dtype = ref.dtype
else:
mlir_dtype = ir.MemRefType(ref.type).element_type
transformed_ref = ref
new_transforms = []
def _bubble_up(untransform_fn, data):
nonlocal new_transforms
new_transforms_rev = []
for t in reversed(new_transforms):
data, new_t = untransform_fn(t, data)
new_transforms_rev.append(new_t)
new_transforms = list(reversed(new_transforms_rev))
return data
peer_device_id = None
is_multicast = False
for t in transforms:
match t:
case indexing.NDIndexer():
indexer = cast(indexing.NDIndexer, t)
if indexer.int_indexer_shape:
raise NotImplementedError("int_indexer_shape non-empty")
indices = _ndindexer_indices(indexer)
indices = _bubble_up(
lambda t, idxs: t.untransform_index(mlir_dtype, idxs), indices
)
if (
isinstance(transformed_ref, tcgen05.TMEMRef)
and ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane
):
transformed_ref = transformed_ref.slice(*indices)
else:
transformed_ref = mgpu.memref_slice(transformed_ref, indices)
case RefTransposer(perm):
if handle_transposes:
perm = _bubble_up(lambda t, p: t.untransform_transpose(p), perm)
if isinstance(transformed_ref, tcgen05.TMEMRef):
raise ValueError("TMEM transpose not allowed.")
transformed_ref = mgpu.memref_transpose(transformed_ref, perm)
else:
if not isinstance(t, gpu_core.TransposeRef):
t = gpu_core.TransposeRef(perm)
new_transforms.append(t)
case RefReshaper(dtype=dtype, shape=shape) if handle_reshapes:
shape = _bubble_up(
lambda t, p: t.untransform_reshape(dtype, p), # pylint: disable=cell-var-from-loop
shape)
if isinstance(transformed_ref, tcgen05.TMEMRef):
raise ValueError("TMEM reshape not allowed.")
transformed_ref = mgpu.memref_reshape(transformed_ref, shape)
case gpu_core.PeerMemRef(device_id, device_id_type):
peer_device_id, other_axes = primitives.device_id_to_logical(
ctx.module_ctx.mesh_info,
_ensure_ir_value_device_id(device_id),
device_id_type,
lambda name: _axis_index_rule(ctx, axis_name=name),
)
if other_axes:
raise ValueError(
"Only JAX mesh axes can be used to obtain peer references, but"
f" got {other_axes}"
)
case gpu_core.MulticastRef(_, _, _):
if not allow_multicast_refs:
raise NotImplementedError(
"Multicast references are not allowed in the lowering of this"
" primitive."
)
is_multicast = True
case _:
new_transforms.append(t)
if peer_device_id is not None:
assert not is_multicast
if not allow_peer_refs:
raise NotImplementedError(
"Peer device references are not allowed in the lowering of this"
" primitive."
)
transformed_ref = ctx.launch_ctx.to_remote(
transformed_ref, _ensure_ir_value(peer_device_id, jnp.int32)
)
if is_multicast:
transformed_ref = ctx.launch_ctx.to_remote_multicast(transformed_ref)
return transformed_ref, new_transforms
def _ndindexer_indices(
indexer: indexing.NDIndexer, allow_arrays: bool = False
) -> tuple[gpu_core.Index | mgpu.FragmentedArray, ...]:
indices = []
for idx in indexer.indices:
if isinstance(idx, mgpu.FragmentedArray) and idx.shape:
if not allow_arrays:
raise ValueError("Arrays are not supported as indices.")
indices.append(idx)
elif not isinstance(idx, indexing.Slice):
indices.append(_as_index(idx))
elif not idx.is_dynamic_start and not idx.is_dynamic_size:
indices.append(slice(idx.start, idx.start + idx.size, idx.stride))
elif idx.stride == 1:
indices.append(
mgpu.DynamicSlice(
_as_index(idx.start) if idx.is_dynamic_start else idx.start,
_as_index(idx.size) if idx.is_dynamic_size else idx.size,
)
)
else:
raise NotImplementedError(f"Unsupported slice: {idx}")
return tuple(indices)
@register_lowering_rule(sp.get_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
sp.get_p, mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp
)
def _get_lowering_rule(
ctx: LoweringRuleContext, x_ref, *leaves, tree, optimized=True
):
if isinstance(x_ref, tcgen05.TMEMRef):
raise RuntimeError(
"Loads from TMEM are asynchronous operations and cannot be performed"
" using the usual syntax. Please use plgpu.async_load_tmem instead."
)
if (
ctx.avals_out[0].shape
and ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp
):
raise ValueError("Can only load scalars in warp-level code.")
if not isinstance(x_ref, ir.Value) and ir.MemRefType.isinstance(x_ref):
raise TypeError(f"Can only load from references (got {x_ref}).")
dtype = ctx.avals_out[0].dtype
transforms = jax.tree.unflatten(tree, leaves)
x_smem, transforms = _handle_transforms(
ctx, x_ref, transforms, allow_peer_refs=True
)
del x_ref # Don't use x_ref anymore. Use x_smem instead!
is_signed = mgpu_utils.is_signed(dtype)
if not ctx.avals_out[0].shape: # The scalar case is simple.
val = memref_dialect.load(x_smem, [])
return mgpu.FragmentedArray.splat(val, shape=(), is_signed=is_signed)
match transforms:
case (gpu_core.UnswizzleRef(swizzle), gpu_core.UntileRef(tiling)):
if len(tiling) != 2:
raise NotImplementedError(f"Only 2D tiling is supported, got: {tiling}")
expected_minor_tiling = swizzle * 8 // dtypes.itemsize_bits(dtype)
if tiling[-1] != expected_minor_tiling:
raise NotImplementedError(
"Minor tiling dimension does not fit swizzle: "
f" expected {expected_minor_tiling}, got {tiling[-1]}"
)
layout = ctx.out_layout_hint or mgpu.WGMMA_LAYOUT
return mgpu.FragmentedArray.load_tiled(
x_smem, is_signed=is_signed, swizzle=swizzle, layout=layout, optimized=optimized
)
case ():
match ctx.out_layout_hint:
case mgpu.WGStridedFragLayout(shape=shape, vec_size=vec_size):
ref_ty = ir.MemRefType(x_smem.type)
if shape != tuple(ref_ty.shape):
raise ValueError(
f"Unsupported shape {shape}, (expected {tuple(ref_ty.shape)})"
)
return mgpu.FragmentedArray.load_strided(
x_smem,
is_signed=is_signed,
vec_size=vec_size,
)
case None:
return mgpu.FragmentedArray.load_strided(x_smem, is_signed=is_signed)
case _:
return mgpu.FragmentedArray.load_untiled(
x_smem,
is_signed=is_signed,
layout=ctx.out_layout_hint,
swizzle=16,
optimized=optimized,
)
case _:
raise NotImplementedError(f"Unsupported transforms: {transforms}")
@register_lowering_rule(sp.get_p, mgpu.LoweringSemantics.Warpgroup)
def _get_lowering_rule_wg(
ctx: LoweringRuleContext, x_ref, *leaves, tree, optimized=True
):
if not isinstance(x_ref, ir.Value) and ir.MemRefType.isinstance(x_ref):
raise TypeError(f"Can only load from references (got {x_ref}).")
transforms = jax.tree.unflatten(tree, leaves)
x_ref, transforms = _handle_transforms(
ctx, x_ref, transforms, allow_peer_refs=True
)
if transforms:
raise NotImplementedError(
"Transforms are not yet implemented for warpgroup semantics"
)
assert isinstance(x_ref, ir.Value)
shape = ctx.avals_out[0].shape
if shape:
return mgpu.dialect.vector_load(x_ref, optimized=optimized)
else:
return memref_dialect.load(x_ref, [])
@register_lowering_rule(sp.swap_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
sp.swap_p, mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp
)
def _swap_lowering_rule(
ctx: LoweringRuleContext, x_ref, value, *leaves, tree
):
if isinstance(x_ref, tcgen05.TMEMRef):
raise RuntimeError(
"Stores to TMEM are asynchronous operations and cannot be performed"
" using the usual syntax. Please use plgpu.async_store_tmem instead."
)
barrier = mgpu.warpgroup_barrier
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
if ctx.avals_out[0].shape:
raise NotImplementedError("Can only store scalars in warp-level lowering.")
i32 = ir.IntegerType.get_signless(32)
barrier = functools.partial(
nvvm_dialect.bar_warp_sync, arith_dialect.constant(i32, -1)
)
value = _ensure_fa(value, ctx.avals_in[1].dtype)
if not isinstance(x_ref, ir.Value) and ir.MemRefType.isinstance(x_ref):
raise TypeError(f"Can only store to references (got {x_ref}).")
v_aval = ctx.avals_in[1]
transforms = jax.tree.unflatten(tree, leaves)
transposed_value = value.layout in (
mgpu.WGMMA_TRANSPOSED_LAYOUT,
mgpu.TCGEN05_TRANSPOSED_LAYOUT,
)
x_smem, transforms = _handle_transforms(
ctx, x_ref, transforms, handle_transposes=not transposed_value,
allow_peer_refs=True
)
del x_ref # Don't use x_ref anymore. Use x_smem instead!
if ctx.module_ctx.auto_barriers:
barrier() # Make sure reads have completed before we write.
match transforms:
case _ if not ctx.avals_out[0].shape: # Scalar case.
old_value = mgpu.FragmentedArray.splat(
memref_dialect.load(x_smem, []),
shape=(),
is_signed=mgpu_utils.is_signed(v_aval.dtype),
)
memref_dialect.store(
_ensure_ir_value(value, ctx.avals_out[0].dtype), x_smem, []
)
case (
gpu_core.UnswizzleRef(swizzle),
gpu_core.UntileRef(tiling),
*maybe_transpose,
):
if len(tiling) != 2:
raise NotImplementedError(f"Only 2D tiling is supported, got: {tiling}")
bw = dtypes.itemsize_bits(v_aval.dtype)
expected_minor_tiling = swizzle * 8 // bw
if tiling[-1] != expected_minor_tiling:
raise NotImplementedError(
"Minor tiling dimension does not fit swizzle: "
f" expected {expected_minor_tiling}, got {tiling[-1]}"
)
if transposed_value != bool(maybe_transpose):
raise ValueError(
"Either both the ref and the value are transposed or neither is."
)
if maybe_transpose:
if maybe_transpose != [gpu_core.TransposeRef((1, 0))]:
raise NotImplementedError(
f"Unsupported transforms: {transforms} ({maybe_transpose})"
)
x_smem = mgpu.memref_transpose(x_smem, (1, 0, 3, 2))
old_value = mgpu.FragmentedArray.load_tiled(
x_smem,
is_signed=mgpu_utils.is_signed(v_aval.dtype),
swizzle=swizzle,
layout=value.layout,
)
value.store_tiled(x_smem, swizzle=swizzle)
case ():
match value.layout:
case mgpu.TiledLayout():
old_value = mgpu.FragmentedArray.load_untiled(
x_smem,
layout=value.layout,
is_signed=mgpu_utils.is_signed(v_aval.dtype),
optimized=False,
)
value.store_untiled(x_smem, optimized=False)
case _:
old_value = mgpu.FragmentedArray.load_strided(
x_smem, is_signed=mgpu_utils.is_signed(v_aval.dtype)
)
value.store_untiled(x_smem)
case _:
raise NotImplementedError(f"Unsupported transforms: {transforms}")
if ctx.module_ctx.auto_barriers:
barrier() # Make sure the writes have completed.
return old_value
@register_lowering_rule(sp.swap_p, mgpu.LoweringSemantics.Warpgroup)
def _swap_lowering_rule_wg(
ctx: LoweringRuleContext, x_smem, value, *leaves, tree
):
shape = ctx.avals_out[0].shape
if shape and not ir.VectorType.isinstance(value.type):
raise TypeError(f"Can only store scalars or vectors (got {value}).")
if not (
isinstance(x_smem, ir.Value) and ir.MemRefType.isinstance(x_smem.type)
):
raise TypeError(f"Can only store to references (got {x_smem}).")
transforms = jax.tree.unflatten(tree, leaves)
x_smem, transforms = _handle_transforms(
ctx, x_smem, transforms, allow_peer_refs=True)
if transforms:
raise NotImplementedError(
"Transforms are not yet implemented for warpgroup semantics"
)
assert isinstance(x_smem, ir.Value)
if shape:
old_value = mgpu.dialect.vector_load(x_smem)
mgpu.dialect.vector_store(value, x_smem)
else:
old_value = memref_dialect.load(x_smem, [])
memref_dialect.store(value, x_smem, [])
return old_value
@register_lowering_rule(pjit.jit_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(pjit.jit_p, mgpu.LoweringSemantics.Warpgroup)
@register_lowering_rule(
pjit.jit_p, mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp
)
def _pjit_lowering_rule(ctx: LoweringRuleContext, *args, jaxpr, **kwargs):
if jaxpr.consts:
raise NotImplementedError
return lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, jaxpr.jaxpr, args,
)
@register_lowering_rule(lax.slice_p, mgpu.LoweringSemantics.Lane)
def _slice_lowering_rule(
ctx: LoweringRuleContext, x, limit_indices, start_indices, strides
):
if strides is not None:
raise NotImplementedError("Strides are not supported.")
return x[tuple(slice(b, e) for b, e in zip(start_indices, limit_indices))]
@register_lowering_rule(lax.slice_p, mgpu.LoweringSemantics.Warpgroup)
def _slice_lowering_rule_wg(
ctx: LoweringRuleContext, x, limit_indices, start_indices, strides
):
del limit_indices
assert ir.VectorType.isinstance(x.type)
if strides is not None:
raise NotImplementedError("Strides are not supported.")
out_ty = ir.VectorType.get(
ctx.avals_out[0].shape, ir.VectorType(x.type).element_type
)
sizes = ctx.avals_out[0].shape
strides = [1] * len(start_indices)
return vector_dialect.extract_strided_slice(
out_ty, x, start_indices, sizes, strides
)
@register_lowering_rule(lax.select_n_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.select_n_p, mgpu.LoweringSemantics.Lane,
gpu_core.PrimitiveSemantics.Warp)
@register_lowering_rule(lax.select_n_p, mgpu.LoweringSemantics.Warpgroup)
def _select_n_lowering_rule(ctx: LoweringRuleContext, pred, *cases):
if len(cases) != 2:
raise NotImplementedError(
"Mosaic GPU lowering only supports select_n with 2 cases, got"
f" {len(cases)}"
)
pred_aval, *cases_avals = ctx.avals_in
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
if not all(aval.shape == () for aval in ctx.avals_in):
raise NotImplementedError(
"Can only select on scalars in warp-level lowering.")
[out_aval] = ctx.avals_out
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
pred = _ensure_fa(pred, pred_aval.dtype)
cases = _bcast(*cases, *cases_avals, out_aval)
# ``select`` expects the first case to be the true branch, but ``select_n``
# orders the cases in reverse.
return pred.select(*reversed(cases))
else:
pred = _ensure_ir_value(pred, pred_aval.dtype)
cases = [_ensure_ir_value(c, c_aval.dtype) for c, c_aval in zip(cases, cases_avals)]
# TODO(bchetioui): support implicit broadcast.
if any(a.shape != out_aval.shape for a in ctx.avals_in):
raise NotImplementedError(
"Implicit broadcast not implemented with warpgroup semantics")
# ``select`` expects the first case to be the true branch, but ``select_n``
# orders the cases in reverse.
return arith_dialect.select(pred, *reversed(cases))
@register_lowering_rule(lax.broadcast_in_dim_p, mgpu.LoweringSemantics.Lane)
def _broadcast_in_dim_lowering_rule(
ctx: LoweringRuleContext,
x: mgpu.FragmentedArray,
*,
broadcast_dimensions,
shape,
sharding,
):
del sharding
[x_aval] = ctx.avals_in
[y_aval] = ctx.avals_out
x = _ensure_fa(x, x_aval.dtype)
rank_diff = y_aval.ndim - x_aval.ndim
if (isinstance(x.layout, mgpu.WGSplatFragLayout) and
broadcast_dimensions == tuple(range(rank_diff, rank_diff + x_aval.ndim))):
return x.broadcast(shape)
if not isinstance(layout := x.layout, mgpu.TiledLayout):
raise NotImplementedError(f"Unsupported layout: {x.layout}")
if any(d1 >= d2 for d1, d2 in zip(broadcast_dimensions[:-1], broadcast_dimensions[1:])):
raise NotImplementedError("broadcast_dimensions must be strictly increasing")
new_dims = [d for d in range(y_aval.ndim) if d not in broadcast_dimensions]
if (new_layout := ctx.out_layout_hint) is None:
candidates = [
mgpu.WGMMA_LAYOUT,
mgpu.WGMMA_TRANSPOSED_LAYOUT,
mgpu.TCGEN05_LAYOUT,
mgpu.TCGEN05_TRANSPOSED_LAYOUT,
tcgen05.TMEM_NATIVE_LAYOUT,
]
if y_aval.shape[-1] % 16 == 0:
candidates.append(tcgen05.fa_m64_collective_layout(y_aval.shape[-1]))
for candidate in candidates:
if len(candidate.base_tile_shape) != len(shape):
continue
if candidate.reduce(new_dims) == layout:
if new_layout is None:
new_layout = candidate
elif candidate == mgpu.TCGEN05_LAYOUT and new_layout == mgpu.WGMMA_LAYOUT:
continue # Choosing WGMMA_LAYOUT for backwards compatibility.
else:
raise NotImplementedError(
"Multiple options for the layout of the broadcast result (found"
f" at least {new_layout} and {candidate}). Use plgpu.layout_cast"
" on the output to suggest the desired output layout."
)
if new_layout is None:
raise NotImplementedError(
"No compatible layout found for the broadcast result. Use"
" plgpu.layout_cast on the output to suggest the desired output layout."
)
return x.broadcast_in_dim(y_aval.shape, broadcast_dimensions, new_layout)
@register_lowering_rule(
lax.broadcast_in_dim_p, mgpu.LoweringSemantics.Warpgroup)
def _broadcast_in_dim_lowering_rule_wg(
ctx: LoweringRuleContext,
x,
*,
broadcast_dimensions,
shape,
sharding,
):
del sharding
[x_aval] = ctx.avals_in
mlir_type = mgpu_utils.dtype_to_ir_type(x_aval.dtype)
result_ty = ir.VectorType.get(shape, mlir_type)
if not broadcast_dimensions:
# Even though we could implement this case by passing a 0D vector as input
# to mgpu.dialect.BroadcastInDimOp we don't want that. 0D vectors are
# generally problematic and so we avoid them by specializing that case
# directly here.
x = _ensure_ir_value(x, x_aval.dtype)
return vector_dialect.broadcast(result_ty, x)
return mgpu.dialect.broadcast_in_dim(result_ty, x, broadcast_dimensions)
@register_lowering_rule(lax.convert_element_type_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.convert_element_type_p,
mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp)
def _convert_element_type_lowering_rule(
ctx: LoweringRuleContext, x, *, new_dtype, weak_type, sharding
):
del weak_type, sharding
[x_aval] = ctx.avals_in
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
if x_aval.shape != ():
raise NotImplementedError(
"Non-scalar arithmetic is not supported in warp-level lowering.")
return _ensure_fa(x, x_aval.dtype).astype(
mgpu_utils.dtype_to_ir_type(new_dtype), is_signed=mgpu_utils.is_signed(new_dtype)
)
@register_lowering_rule(
lax.convert_element_type_p, mgpu.LoweringSemantics.Warpgroup)
def _convert_element_type_lowering_rule_wg(
ctx: LoweringRuleContext, x, *, new_dtype, weak_type, sharding
):
del weak_type, sharding
[x_aval] = ctx.avals_in
[y_aval] = ctx.avals_out
x = _ensure_ir_value(x, x_aval.dtype)
cur_dtype = mgpu_utils.dtype_to_ir_type(x_aval.dtype)
new_dtype = mgpu_utils.dtype_to_ir_type(new_dtype)
if cur_dtype == new_dtype:
return x
if 1 < mgpu_utils.bitwidth(cur_dtype) < 8 or 1 < mgpu_utils.bitwidth(new_dtype) < 8:
raise NotImplementedError("Conversion involving sub-byte types unsupported")
from_float = ir.FloatType.isinstance(cur_dtype)
to_float = ir.FloatType.isinstance(new_dtype)
from_integer = ir.IntegerType.isinstance(cur_dtype)
to_integer = ir.IntegerType.isinstance(new_dtype)
if from_float and to_float:
cur_ty_width = ir.FloatType(cur_dtype).width
new_ty_width = ir.FloatType(new_dtype).width
if cur_ty_width == new_ty_width:
# There is no instruction to perform conversions between two float types
# of the same width. Go through the next-larger standard type.
# TODO(bchetioui): support conversions between float types of width 8.
# Which larger type to pick will depend on the number of bits in the
# smallest exponent.
if cur_ty_width != 16:
raise NotImplementedError(
"Conversion between float types of width other than 16 not"
" supported"
)
larger_ty = ir.F32Type.get()
if x_aval.shape:
upcast_ty = ir.VectorType.get(x_aval.shape, larger_ty)
else:
upcast_ty = larger_ty
def convert(ty, x):
return arith_dialect.truncf(ty, arith_dialect.extf(upcast_ty, x))
elif ir.FloatType(cur_dtype).width > ir.FloatType(new_dtype).width:
convert = arith_dialect.truncf
else:
convert = arith_dialect.extf
elif from_integer and to_integer:
if ir.IntegerType(cur_dtype).width > ir.IntegerType(new_dtype).width:
convert = arith_dialect.trunci
elif ir.IntegerType(cur_dtype).width < ir.IntegerType(new_dtype).width:
if mgpu_utils.is_signed(x_aval.dtype):
convert = arith_dialect.extsi
else:
convert = arith_dialect.extui
else:
convert = lambda _, x: x # signed <-> unsigned conversions
elif from_integer and to_float:
if mgpu_utils.is_signed(x_aval.dtype):
convert = arith_dialect.sitofp
else:
convert = arith_dialect.uitofp
elif from_float and to_integer:
dst_width = mgpu_utils.bitwidth(new_dtype)
# We clamp the float value to the min/max integer destination value
# in order to match JAX/XLA casting behavior. Note that this differs
# from numpy casting behavior.
if mgpu_utils.is_signed(y_aval.dtype):
maxint = 2 ** (dst_width - 1) - 1
minint = -(2 ** (dst_width - 1))
convert = arith_dialect.fptosi
else:
maxint = 2**dst_width - 1
minint = 0
convert = arith_dialect.fptoui
maxint = _ir_constant(maxint, cur_dtype)
minint = _ir_constant(minint, cur_dtype)
if x_aval.shape:
maxint = vector_dialect.broadcast(x.type, maxint)
minint = vector_dialect.broadcast(x.type, minint)
x = arith_dialect.minimumf(x, maxint)
x = arith_dialect.maximumf(x, minint)
else:
raise NotImplementedError(f"Unsupported conversion {cur_dtype} -> {new_dtype}")
ty = ir.VectorType.get(x_aval.shape, new_dtype) if x_aval.shape else new_dtype
return convert(ty, x)
mosaic_lowering_rules[gpu_core.LANExWG_SEMANTICS].update({
lax.neg_p: lambda ctx, x: -x,
lax.not_p: lambda ctx, x: ~x,
})
def _unary_warp_lowering_rule(impl):
def _lowering_rule(ctx: LoweringRuleContext, x):
if not all(aval_in.shape == () for aval_in in ctx.avals_in):
raise NotImplementedError(
"Non-scalar arithmetic is not supported in warp-level lowering.")
return impl(x)
return _lowering_rule
mosaic_lowering_rules[gpu_core.LANExWARP_SEMANTICS].update({
lax.neg_p: _unary_warp_lowering_rule(lambda x: -x),
lax.not_p: _unary_warp_lowering_rule(lambda x: ~x)
})
mosaic_lowering_rules[gpu_core.WGxWG_SEMANTICS].update({
lax.neg_p: _lower_fun(lambda x: jnp.subtract(0, x), multiple_results=False),
lax.not_p: _lower_fun(
lambda x: jnp.astype(jnp.bitwise_xor(jnp.astype(x, int), -1), jnp.dtype(x)), multiple_results=False,
),
})
def _binary_op_lowering_rule(ctx: LoweringRuleContext, x, y, *, impl):
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
if not all(aval_in.shape == () for aval_in in ctx.avals_in):
raise NotImplementedError(
"Non-scalar arithmetic is not supported in warp-level lowering.")
x, y = _bcast(x, y, *ctx.avals_in, *ctx.avals_out)
return impl(x, y)
def _div(x, y):
return x / y if ir.FloatType.isinstance(x.mlir_dtype) else x // y
for semantics in [gpu_core.LANExWG_SEMANTICS, gpu_core.LANExWARP_SEMANTICS]:
mosaic_lowering_rules[semantics].update({
lax.add_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x + y),
lax.sub_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x - y),
lax.mul_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x * y),
lax.div_p: partial(_binary_op_lowering_rule, impl=_div),
lax.rem_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x % y),
lax.and_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x & y),
lax.or_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x | y),
lax.xor_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x ^ y),
lax.gt_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x > y),
lax.lt_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x < y),
lax.ge_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x >= y),
lax.le_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x <= y),
lax.eq_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x == y),
lax.ne_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x != y),
lax.max_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x.max(y)),
lax.min_p: partial(_binary_op_lowering_rule, impl=lambda x, y: x.min(y)),
})
def _binary_op_lowering_rule_wg(
ctx: LoweringRuleContext, x, y, *, ui_impl, si_impl, f_impl=None
):
x_aval, y_aval = ctx.avals_in
[out_aval] = ctx.avals_out
x, y = _bcast_wg(x, y, *ctx.avals_in, *ctx.avals_out)
if jnp.issubdtype(out_aval, jnp.signedinteger):
return si_impl(x, y)
elif jnp.issubdtype(out_aval, jnp.integer):
return ui_impl(x, y)
elif f_impl is not None and jnp.issubdtype(out_aval, jnp.floating):
return f_impl(x, y)
else:
raise NotImplementedError(
f"{ctx.prim} does not support {x_aval.dtype} and {y_aval.dtype}"
)
for op, si_impl, ui_impl, f_impl in [
(lax.add_p, arith_dialect.addi, arith_dialect.addi, arith_dialect.addf),
(lax.sub_p, arith_dialect.subi, arith_dialect.subi, arith_dialect.subf),
(lax.mul_p, arith_dialect.muli, arith_dialect.muli, arith_dialect.mulf),
(
lax.div_p,
arith_dialect.floordivsi,
arith_dialect.divui,
arith_dialect.divf,
),
(lax.rem_p, arith_dialect.remsi, arith_dialect.remui, arith_dialect.remf),
(
lax.max_p,
arith_dialect.maxsi,
arith_dialect.maxui,
arith_dialect.maximumf,
),
(
lax.min_p,
arith_dialect.minsi,
arith_dialect.minui,
arith_dialect.minimumf,
),
]:
mosaic_lowering_rules[gpu_core.WGxWG_SEMANTICS][op] = partial(
_binary_op_lowering_rule_wg,
si_impl=si_impl,
ui_impl=ui_impl,
f_impl=f_impl,
)
def _binary_boolean_op_lowering_rule_wg(
ctx: LoweringRuleContext, x, y, *, impl
):
x, y = _bcast_wg(x, y, *ctx.avals_in, *ctx.avals_out)
return impl(x, y)
for op, impl in [
(lax.and_p, arith_dialect.andi),
(lax.or_p, arith_dialect.ori),
(lax.xor_p, arith_dialect.xori),
]:
mosaic_lowering_rules[gpu_core.WGxWG_SEMANTICS][op] = partial(
_binary_boolean_op_lowering_rule_wg,
impl=impl,
)
CmpIPred = arith_dialect.CmpIPredicate
CmpFPred = arith_dialect.CmpFPredicate
def _comparison_lowering_rule_wg(
ctx: LoweringRuleContext, x, y, *, si_pred, ui_pred, f_pred
):
x_aval, y_aval = ctx.avals_in
x, y = _bcast_wg(x, y, *ctx.avals_in, *ctx.avals_out)
if jnp.issubdtype(x_aval, jnp.signedinteger):
return arith_dialect.cmpi(si_pred, x, y)
elif jnp.issubdtype(x_aval, jnp.unsignedinteger) or jnp.issubdtype(x_aval, jnp.bool):
return arith_dialect.cmpi(ui_pred, x, y)
elif jnp.issubdtype(x_aval, jnp.floating):
return arith_dialect.cmpf(f_pred, x, y)
else:
raise NotImplementedError(
f"{ctx.prim} does not support {x_aval.dtype} and {y_aval.dtype}"
)
for op, si_pred, ui_pred, f_pred in [
(lax.eq_p, CmpIPred.eq, CmpIPred.eq, CmpFPred.OEQ),
(lax.ne_p, CmpIPred.ne, CmpIPred.ne, CmpFPred.UNE),
(lax.lt_p, CmpIPred.slt, CmpIPred.ult, CmpFPred.OLT),
(lax.le_p, CmpIPred.sle, CmpIPred.ule, CmpFPred.OLE),
(lax.gt_p, CmpIPred.sgt, CmpIPred.ugt, CmpFPred.OGT),
(lax.ge_p, CmpIPred.sge, CmpIPred.uge, CmpFPred.OGE),
]:
mosaic_lowering_rules[gpu_core.WGxWG_SEMANTICS][op] = partial(
_comparison_lowering_rule_wg,
si_pred=si_pred,
ui_pred=ui_pred,
f_pred=f_pred,
)
@register_lowering_rule(lax.integer_pow_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.integer_pow_p, mgpu.LoweringSemantics.Warpgroup)
def _integer_pow_lowering_rule(ctx: LoweringRuleContext, x, y):
[x_aval] = ctx.avals_in
if y <= 1:
raise NotImplementedError
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
mul_op = operator.mul
elif jnp.issubdtype(x_aval.dtype, jnp.integer):
mul_op = arith_dialect.muli
elif jnp.issubdtype(x_aval.dtype, jnp.floating):
mul_op = arith_dialect.mulf
else:
raise NotImplementedError(f"Unsupported dtype {x_aval.dtype}")
# Y is an integer. Here we start with res = x so the range is y-1
res = x
# Repeated doubling algorithm.
for i in reversed(range(y.bit_length() - 1)):
res = mul_op(res, res)
if (y >> i) & 1:
res = mul_op(res, x)
return res
@register_lowering_rule(lax.square_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.square_p, mgpu.LoweringSemantics.Warpgroup)
def _square_lowering_rule(ctx: LoweringRuleContext, x):
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
x = _ensure_fa(x, x_aval.dtype)
return x * x
if jnp.issubdtype(x_aval.dtype, jnp.integer):
return arith_dialect.muli(x, x)
if jnp.issubdtype(x_aval.dtype, jnp.floating):
return arith_dialect.mulf(x, x)
raise NotImplementedError(f"Unsupported dtype {x_aval.dtype}")
@register_lowering_rule(lax.rsqrt_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.rsqrt_p, mgpu.LoweringSemantics.Warpgroup)
def _rsqrt_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
return _ensure_fa(x, x_aval.dtype).rsqrt(approx=ctx.module_ctx.approx_math)
fastmath = (
arith_dialect.FastMathFlags.afn if ctx.module_ctx.approx_math else None
)
return math_dialect.rsqrt(
_ensure_ir_value(x, x_aval.dtype), fastmath=fastmath
)
@register_lowering_rule(lax.tanh_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.tanh_p, mgpu.LoweringSemantics.Warpgroup)
def _tanh_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
return _ensure_fa(x, x_aval.dtype).tanh(approx=ctx.module_ctx.approx_math)
fastmath = (
arith_dialect.FastMathFlags.afn if ctx.module_ctx.approx_math else None
)
return math_dialect.tanh(_ensure_ir_value(x, x_aval.dtype), fastmath=fastmath)
def _logistic(x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
return 1.0 / (1 + lax.exp(-x))
mosaic_lowering_rules[gpu_core.LANExWG_SEMANTICS][lax.logistic_p] = _lower_fun(
_logistic, multiple_results=False
)
mosaic_lowering_rules[gpu_core.WGxWG_SEMANTICS][lax.logistic_p] = (
_lower_fun(_logistic, multiple_results=False)
)
@register_lowering_rule(lax.exp_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.exp_p, mgpu.LoweringSemantics.Warpgroup)
def _exp_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
return _ensure_fa(x, x_aval.dtype).exp(approx=ctx.module_ctx.approx_math)
fastmath = (
arith_dialect.FastMathFlags.afn if ctx.module_ctx.approx_math else None
)
return math_dialect.exp(_ensure_ir_value(x, x_aval.dtype), fastmath=fastmath)
@register_lowering_rule(lax.exp2_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.exp2_p, mgpu.LoweringSemantics.Warpgroup)
def _exp2_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
return _ensure_fa(x, x_aval.dtype).exp2(approx=ctx.module_ctx.approx_math)
fastmath = (
arith_dialect.FastMathFlags.afn if ctx.module_ctx.approx_math else None
)
return math_dialect.exp2(_ensure_ir_value(x, x_aval.dtype), fastmath=fastmath)
@register_lowering_rule(lax.log_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.log_p, mgpu.LoweringSemantics.Warpgroup)
def _log_lowering_rule(ctx: LoweringRuleContext, x, accuracy):
if accuracy is not None:
raise NotImplementedError("Not implemented: accuracy")
[x_aval] = ctx.avals_in
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
return _ensure_fa(x, x_aval.dtype).log(approx=ctx.module_ctx.approx_math)
fastmath = (
arith_dialect.FastMathFlags.afn if ctx.module_ctx.approx_math else None
)
return math_dialect.log(_ensure_ir_value(x, x_aval.dtype), fastmath=fastmath)
def _reduce_lowering_rule(op, ctx: LoweringRuleContext, x, *, axes, **kwargs):
[x_aval] = ctx.avals_in
match x.layout:
case mgpu.WGStridedFragLayout():
if set(axes) != set(range(x_aval.ndim)):
raise NotImplementedError("No support for axes yet")
# To relax the restriction below, you need to ensure sufficient
# synchronization with other places that use `scratch_view` (which at the
# time of writing is only `run_scoped`).
if ctx.module_ctx.axis_names.wg is not None:
raise NotImplementedError(
"No support for reduce_sum over all axes and multiple Pallas"
" threads"
)
scratch_ty = jax.ShapeDtypeStruct(shape=(4,), dtype=x_aval.dtype)
with ctx.module_ctx.scratch_view(scratch_ty) as scratch:
return x.reduce(op, axes, scratch)
case mgpu.TiledLayout():
if len(axes) != 1:
raise NotImplementedError("Multi-axis reductions not supported")
reduced_dim = x.layout.tiling.tile_dimension(axes[0])
if any(reduced_dim[d] for d in x.layout.partitioned_warp_dims):
scratch_ty = jax.ShapeDtypeStruct(shape=(REDUCE_SCRATCH_ELEMS,), dtype=x_aval.dtype)
ctx = ctx.module_ctx.scratch_view(scratch_ty)
else:
ctx = contextlib.nullcontext(None)
with ctx as scratch:
return x.reduce(op, axes[0], scratch=scratch)
case _:
raise NotImplementedError(f"Unsupported layout {x.layout}")
register_lowering_rule(lax.reduce_sum_p, mgpu.LoweringSemantics.Lane)(
functools.partial(_reduce_lowering_rule, "add")
)
register_lowering_rule(lax.reduce_max_p, mgpu.LoweringSemantics.Lane)(
functools.partial(_reduce_lowering_rule, "max")
)
def _reduce_lowering_rule_wg(
kind: vector_dialect.CombiningKind,
acc: object,
ctx: LoweringRuleContext,
x,
*,
axes,
) -> ir.OpView:
[x_aval] = ctx.avals_in
[out_aval] = ctx.avals_out
x = _ensure_ir_value(x, x_aval.dtype)
out_type = mgpu_utils.dtype_to_ir_type(out_aval.dtype)
if not out_aval.shape:
# Special-case: reducing to a scalar.
if x_aval.ndim != 1:
# Flatten to 1D, since vector.reduction only supports 1D inputs.
x = vector_dialect.shape_cast(
ir.VectorType.get([x_aval.size], out_type), x
)
return vector_dialect.ReductionOp(out_type, kind, x)
acc = vector_dialect.broadcast(
ir.VectorType.get(out_aval.shape, out_type),
_ensure_ir_value(acc, out_aval.dtype),
)
return vector_dialect.MultiDimReductionOp(kind, x, acc, axes)
@register_lowering_rule(lax.reduce_sum_p, mgpu.LoweringSemantics.Warpgroup)
def _reduce_sum_lowering_rule_wg(ctx: LoweringRuleContext, x, *, axes,
out_sharding):
op = _reduce_lowering_rule_wg(
vector_dialect.CombiningKind.ADD, 0, ctx, x, axes=axes
)
op.attributes["offset"] = ir.IntegerAttr.get(
ir.IntegerType.get_signless(32), ctx.module_ctx.smem_used_bytes
)
return op.result
@register_lowering_rule(lax.reduce_max_p, mgpu.LoweringSemantics.Warpgroup)
def _reduce_max_lowering_rule_wg(ctx: LoweringRuleContext, x, *, axes):
[x_aval] = ctx.avals_in
if jnp.issubdtype(x_aval.dtype, jnp.floating):
kind = vector_dialect.CombiningKind.MAXIMUMF
acc = float("-inf")
elif jnp.issubdtype(x_aval.dtype, jnp.signedinteger):
kind = vector_dialect.CombiningKind.MAXSI
acc = np.iinfo(x_aval.dtype).max
elif jnp.issubdtype(x_aval.dtype, jnp.unsignedinteger):
kind = vector_dialect.CombiningKind.MAXUI
acc = np.iinfo(x_aval.dtype).max
else:
raise NotImplementedError(f"Unsupported dtype {x_aval.dtype}")
return _reduce_lowering_rule_wg(kind, acc, ctx, x, axes=axes).result
def _block_id(ctx: LoweringRuleContext, dim: gpu_dialect.Dimension) -> ir.Value:
result = gpu_dialect.block_id(dim)
cluster_size = ctx.launch_ctx.cluster_size
if math.prod(cluster_size) == 1 or cluster_size[dim.value] == 1:
return result
# We scale the grid in the presence of clusters, so we need to scale the
# block ID back here.
return arith_dialect.divui(result, _as_index(cluster_size[dim.value]))
def _resolve_cluster_axis(axis_names: _AxisNames | None, axis_name: str):
if not axis_names:
raise LookupError(
"No axis names are available. Make sure you are using `pl.core_map`"
" with a `plgpu.Mesh`."
)
if not axis_names or axis_name not in axis_names.cluster:
raise LookupError(
f"Unknown cluster axis {axis_name}, available axes:"
f" {[*axis_names.cluster]}"
)
return gpu_dialect.Dimension(axis_names.cluster.index(axis_name))
def _is_block_local_scope(collective_axes: CollectiveAxesType,
axis_names: _AxisNames):
"""Returns whether the collective axes represents a block scope."""
if axis_names.wg is None:
return not collective_axes
else:
return collective_axes == (axis_names.wg,)
def _is_global_scope(collective_axes: CollectiveAxesType,
axis_names: _AxisNames):
"""Returns whether the collective axes represents a GPU global scope."""
return set(collective_axes) == set(axis_names)
def block_id_to_grid_id(ctx: LoweringRuleContext,
block_ids: Sequence[ir.Value],
axis_name: Hashable):
squashed_dims = ctx.module_ctx.squashed_dims
axis_names = ctx.module_ctx.axis_names
if squashed_dims:
unsquashed_names = axis_names.grid[:2]
squashed_names = axis_names.grid[2:]
else:
# These are unused but initialized for type checkers.
unsquashed_names = squashed_names = ()
if squashed_dims:
if axis_name in unsquashed_names:
# We reversed the grid and cluster axes.
# e.g. for the grid (a, b, c, d, wg)
# squashed = (a, b) Mapped to Dimension.z (2)
# unsquashed = (c, d) Mapped to Dimension.y (1) and Dimension.x (0)
idx = unsquashed_names.index(axis_name)
return block_ids[gpu_dialect.Dimension(idx)]
else:
assert axis_name in squashed_names
# All squashed dimensions are mapped to Dimension.z.
axis = squashed_names.index(axis_name)
return _unravel_program_id(
_as_index(block_ids[gpu_dialect.Dimension.z]), axis, squashed_dims
)
else:
assert axis_name in axis_names.grid
idx = axis_names.grid.index(axis_name)
return block_ids[gpu_dialect.Dimension(idx)]
@register_lowering_rule(lax.axis_index_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.axis_index_p, mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp)
@register_lowering_rule(lax.axis_index_p, mgpu.LoweringSemantics.Warpgroup)
def _axis_index_rule(ctx: LoweringRuleContext, *, axis_name: Hashable):
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
if axis_name == ctx.module_ctx.warp_axis_name:
return mgpu.warp_idx(sync=True)
raise ValueError(
"Named axes can only refer to the warp axis name inside of core_map."
)
gpu_axis_names = ctx.module_ctx.axis_names
jax_axis_names = getattr(ctx.module_ctx.mesh_info, "axis_names", ())
if gpu_axis_names is None and not jax_axis_names:
raise LookupError(
"No axis names are available. Make sure you are using `pl.core_map`"
" with a `plgpu.Mesh` or an appropriate JAX device mesh."
)
if axis_name not in itertools.chain(gpu_axis_names or (), jax_axis_names):
raise LookupError(
f"Axis {axis_name} does not refer to a GPU mesh axis (available axes:"
f" {[*gpu_axis_names]}) or a JAX mesh axis (available axes:"
f" {[*jax_axis_names]})"
)
if axis_name in jax_axis_names:
jax_mesh = ctx.module_ctx.mesh_info
assert jax_mesh is not None
device_id = ctx.launch_ctx.device_id()
jax_mesh_shape = jax_mesh.mesh_shape
axis_index = jax_axis_names.index(axis_name)
i32 = ir.IntegerType.get_signless(32)
axis_size = _ir_constant(jax_mesh_shape[axis_index], i32)
minor_divisor = _ir_constant(
np.prod(jax_mesh_shape[axis_index + 1 :], dtype=np.int32), i32
)
return arith_dialect.remsi(arith_dialect.divsi(device_id, minor_divisor), axis_size)
# We already checked that the axis is in scope and it wasn't a JAX mesh axis.
assert gpu_axis_names is not None
# We only deal with GPU axes from now on.
axis_names = gpu_axis_names
if axis_names.wg is not None and axis_name == axis_names.wg:
return mgpu.warpgroup_idx(sync=True)
if axis_name in axis_names.cluster:
return arith_dialect.index_cast(
ir.IntegerType.get_signless(32),
gpu_dialect.cluster_block_id(
gpu_dialect.Dimension(axis_names.cluster.index(axis_name))
),
)
block_ids = tuple(arith_dialect.index_cast(
ir.IntegerType.get_signless(32),
_block_id(ctx, dimension),
) for dimension in gpu_dialect.Dimension)
return block_id_to_grid_id(ctx, block_ids, axis_name)
@register_lowering_rule(debugging.debug_print_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
debugging.debug_print_p,
mgpu.LoweringSemantics.Lane,
gpu_core.PrimitiveSemantics.Warp,
)
@register_lowering_rule(
debugging.debug_print_p, mgpu.LoweringSemantics.Warpgroup
)
def _debug_print_lowering_rule(
ctx: LoweringRuleContext,
*args,
fmt,
ordered,
partitioned,
in_tree,
static_args,
np_printoptions,
has_placeholders,
logging_record,
):
del partitioned, np_printoptions, has_placeholders
if ordered:
raise NotImplementedError("Ordered debug_print is not supported on Pallas.")
args, kwargs = debugging.merge_callback_args(in_tree, args, static_args)
if kwargs:
raise ValueError(
"Only positional arguments are supported by debug_print on Pallas."
)
primitives.check_debug_print_format(fmt, *args)
scope = mgpu.ThreadSubset.WARPGROUP
if ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp:
scope = mgpu.ThreadSubset.WARP
if not any(aval.shape for aval in ctx.avals_in):
mgpu.debug_print(
fmt,
*(
_ensure_ir_value(arg, aval.dtype)
for arg, aval in zip(args, ctx.avals_in)
),
scope=scope
)
elif len(ctx.avals_in) == 1:
[arg] = args
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Warpgroup:
mgpu.dialect.debug_print(fmt, arg)
else:
arg.debug_print(fmt)
else:
raise NotImplementedError(
"debug_print only supports printing of scalar values, or a single array"
" value when using the Mosaic GPU backend."
)
return ()
@register_lowering_rule(primitives.run_scoped_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(primitives.run_scoped_p, mgpu.LoweringSemantics.Warpgroup)
def _run_scoped_lowering_rule(
ctx: LoweringRuleContext, *consts, jaxpr: jax_core.Jaxpr, collective_axes
):
input_refs = []
should_discharge = []
wg_axis = ctx.module_ctx.axis_names.wg
is_multithreaded = wg_axis is not None
is_thread_collective = is_multithreaded and collective_axes == (wg_axis,)
# Make sure everyone has exited previous scoped allocations. Note that we
# don't synchronize when we exit the allocation, but only when we might want
# to reuse its memory again.
if collective_axes and collective_axes != (wg_axis,):
raise ValueError(
"Only thread-collective allocations are supported in run_scoped."
)
if is_multithreaded and is_thread_collective:
gpu_dialect.barrier()
with contextlib.ExitStack() as alloc_stack:
for v in jaxpr.invars:
aval = cast(ShapedAbstractValue, v.aval)
if isinstance(aval, gpu_core.WGMMAAbstractAccumulatorRef):
if collective_axes:
raise ValueError(
"WGMMA accumulators can only be allocated non-collectively. Hint:"
" remove collective_axes from run_scoped. If other allocations"
" are performed as well, split the run_scoped into two."
)
is_signed = mgpu_utils.is_signed(aval.dtype)
if is_signed is not None and not is_signed:
raise ValueError(
"Invalid WGMMA accumulator dtype for s8/i8 WGMMA. "
f"Expected signed integer, but got {aval.dtype}."
)
dtype = mlir.dtype_to_ir_type(aval.dtype)
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
input_refs.append(
mgpu.WGMMAAccumulator.zero(*aval.shape, dtype, is_signed=is_signed)
)
else:
if ir.IntegerType.isinstance(dtype):
zero = arith_dialect.constant(dtype, 0)
else:
zero = arith_dialect.constant(dtype, 0.0)
acc = vector_dialect.broadcast(
ir.VectorType.get(aval.shape, dtype), zero
)
acc = mgpu.dialect.optimization_barrier([acc])
nvvm_dialect.wgmma_fence_aligned()
input_refs.append(acc)
should_discharge.append(True)
continue
if (
isinstance(aval, state_types.AbstractRef)
and aval.memory_space == gpu_core.GMEM
and jnp.issubdtype(aval.dtype, pallas_core.semaphore)
):
input_ref = alloc_stack.enter_context(
ctx.module_ctx.reserve_semaphores(
aval.shape, collective_axes=collective_axes
)
)
input_refs.append(input_ref)
should_discharge.append(False)
continue
# All other allocations must be made collectively across all threads.
if is_multithreaded and not is_thread_collective:
raise NotImplementedError(
"Only thread-collective allocations are supported in multithreaded"
" kernels. Hint: add"
f" collective_axes={ctx.module_ctx.axis_names.wg} to your"
" run_scoped if you intend all threads to share the same"
f" allocation (currently collective_axes={collective_axes})."
)
if isinstance(aval.dtype, gpu_core.BarrierType):
multiplier = (1 if aval.dtype.orders_tensor_core else
ctx.estimator_ctx.arrival_multiplier)
barrier_ref = alloc_stack.enter_context(
ctx.module_ctx.reserve_barrier(
mgpu.Barrier(
aval.dtype.num_arrivals * multiplier,
*aval.shape,
)
)
)
input_refs.append(barrier_ref)
should_discharge.append(False)
continue
if isinstance(aval.dtype, gpu_core.ClusterBarrierType):
collective_dims = jax.tree.map(
lambda axis: _resolve_cluster_axis(ctx.module_ctx.axis_names, axis),
aval.dtype.collective_axes,
)
barrier_ref = alloc_stack.enter_context(
ctx.module_ctx.reserve_barrier(
mgpu.ClusterBarrier(collective_dims, aval.dtype.num_arrivals, *aval.shape)
)
)
input_refs.append(barrier_ref)
should_discharge.append(False)
continue
if not isinstance(aval, state_types.AbstractRef):
raise ValueError(f"Can't convert to ref: {aval}")
if aval.memory_space == gpu_core.SMEM:
input_ref = alloc_stack.enter_context(
ctx.module_ctx.scratch_view(
jax.ShapeDtypeStruct(shape=aval.shape, dtype=aval.dtype)
)
)
input_refs.append(input_ref)
should_discharge.append(False)
elif aval.memory_space == gpu_core.TMEM:
input_ref = alloc_stack.enter_context(
ctx.module_ctx.alloc_tmem(
jax.ShapeDtypeStruct(shape=aval.shape, dtype=aval.dtype),
layout=aval.layout,
)
)
input_refs.append(input_ref)
should_discharge.append(False)
if any(should_discharge):
# We convert consts to args, because we only have ir.Values and
# not JAX values during lowering. discharge_state() produces JAX
# valiues for the arguments but expects them to be provided for the
# consts. We also don't want to wrap the values in refs.
no_const_jaxpr = pe.convert_constvars_jaxpr(jaxpr)
should_discharge = [False] * len(consts) + should_discharge
discharged_jaxpr, _ = discharge.discharge_state(no_const_jaxpr, (), should_discharge=should_discharge)
new_input_vals = (*consts, *input_refs)
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx,
ctx.launch_ctx,
discharged_jaxpr,
new_input_vals,
(),
)
# Discharge appends to the output the refs that got discharged.
outs = outs[:-sum(should_discharge)]
else:
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx,
ctx.launch_ctx,
jaxpr,
input_refs,
consts,
)
assert len(outs) == len(jaxpr.outvars), (jaxpr, outs)
return outs
@_register_resource_estimator(primitives.get_global_p)
def _get_global_resource_estimator(
ctx: ResourceEstimatorContext, *, what
) -> Resources:
if what.memory_space == gpu_core.GMEM and jnp.issubdtype(
what.dtype, pallas_core.semaphore
):
collective_axes = tuple(ctx.axis_names)
return Resources(scoped_gmem_semaphores={collective_axes: what.size})
raise NotImplementedError(f"get_global only supports semaphores, got {what}")
@register_lowering_rule(primitives.get_global_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
primitives.get_global_p, mgpu.LoweringSemantics.Warpgroup
)
def _get_global_lowering_rule(ctx: LoweringRuleContext, *, what):
if what.memory_space == gpu_core.GMEM and jnp.issubdtype(
what.dtype, pallas_core.semaphore
):
collective_axes = tuple(ctx.module_ctx.axis_names)
return ctx.module_ctx.reserve_semaphores(
what.shape, collective_axes=collective_axes
).__enter__()
raise NotImplementedError(f"get_global only supports semaphores, got {what}")
@register_lowering_rule(discharge.run_state_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(discharge.run_state_p, mgpu.LoweringSemantics.Warpgroup)
def _run_state_lowering_rule(
ctx: LoweringRuleContext,
*args,
jaxpr: jax_core.Jaxpr,
which_linear: tuple[bool, ...],
is_initialized: tuple[bool, ...],
):
del which_linear
# TODO(apaszke): This should be unified with run_scoped.
if not all(is_initialized):
raise NotImplementedError("Uninitialized Refs are not supported in lowering of run_state.")
should_discharge = []
new_input_vals = []
for arg, v, out_aval in zip(args, jaxpr.invars, ctx.avals_out):
aval = v.aval
if isinstance(aval, gpu_core.WGMMAAbstractAccumulatorRef):
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Warpgroup:
arg = mgpu.dialect.optimization_barrier([arg])
nvvm_dialect.wgmma_fence_aligned()
new_input_vals.append(arg)
else:
new_input_vals.append(mgpu.WGMMAAccumulator.from_registers(arg))
should_discharge.append(True)
assert isinstance(out_aval, jax_core.ShapedArray)
else:
new_input_vals.append(arg)
should_discharge.append(not isinstance(out_aval, state_types.AbstractRef))
if not any(should_discharge):
raise NotImplementedError(
"Expected at least one accumulator to in run_state."
)
discharged_jaxpr, new_consts = discharge.discharge_state(
jaxpr, (), should_discharge=should_discharge
)
assert not new_consts
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, discharged_jaxpr, new_input_vals, ()
)
# Await the accumulators and extract their final values.
nvvm_dialect.wgmma_wait_group_sync_aligned(0)
outs = [
out.value if isinstance(out, mgpu.WGMMAAccumulator) else out
for out in outs
]
# Blend the discharge results with refs we closed over. I don't fully
# understand the reasons behind this calling convention, but sharadmv@ has
# assured me that this is ok.
outs_it = iter(outs)
return [next(outs_it) if d else a for d, a in zip(should_discharge, args)]
def _lower_jaxpr_to_for_loop(
ctx: LoweringRuleContext,
jaxpr: jax_core.Jaxpr,
start: ir.Value,
length: int | ir.Value,
consts,
*args,
has_loop_index: bool,
unroll: int | None = None,
):
_consts_avals, arg_avals = util.split_list(ctx.avals_in, [len(consts)])
arg_avals = arg_avals[has_loop_index:]
out_avals = []
if arg_avals:
out_avals = ctx.avals_out[-len(arg_avals):]
is_acc = [isinstance(v, mgpu.WGMMAAccumulator) for v in args]
def as_values(vals, avals):
if is_acc != [isinstance(v, mgpu.WGMMAAccumulator) for v in vals]:
raise ValueError("Unexpected loop carry w.r.t. accumulators.")
_ensure = (
_ensure_fa
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane
else _ensure_ir_value
)
return [
v if a else _ensure(v, av.dtype)
for a, v, av in zip(is_acc, vals, avals)
]
def loop(base_loop_index, body_args):
outs = body_args
if unroll is not None:
base_loop_index = arith_dialect.muli(
base_loop_index, _ir_constant(unroll, start.type)
)
base_loop_index = arith_dialect.addi(base_loop_index, start)
for step in range(unroll or 1):
if has_loop_index:
loop_index = arith_dialect.addi(
base_loop_index, _ir_constant(step, start.type)
)
jaxpr_args = [*consts, loop_index, *outs]
else:
jaxpr_args = [*consts, *outs]
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, jaxpr, jaxpr_args
)
return as_values(outs, out_avals)
if unroll is not None:
if not isinstance(length, int):
raise NotImplementedError(
"``length`` must be an integer when ``unroll` is specified, got"
f" {length}"
)
if length % unroll:
# TODO(slebedev): Emit an epilogue taking care of the remaining steps.
raise NotImplementedError(
f"``unroll`` must divide ``length``, got {unroll=} and {length=}"
)
if unroll == length:
# Special-case: the loop is fully unrolled.
return loop(_ir_constant(0, start.type), as_values(args, arg_avals))
return mgpu.fori(
_ir_constant(length // unroll, start.type), as_values(args, arg_avals)
)(loop).results
else:
if not isinstance(length, ir.Value):
length = _ir_constant(length, start.type)
return mgpu.fori(length, as_values(args, arg_avals))(loop).results
@register_lowering_rule(lax.scan_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.scan_p, mgpu.LoweringSemantics.Warpgroup)
@register_lowering_rule(lax.scan_p, mgpu.LoweringSemantics.Lane,
gpu_core.PrimitiveSemantics.Warp)
def _scan_lowering_rule(
ctx: LoweringRuleContext,
*args,
jaxpr: jax_core.ClosedJaxpr,
linear: tuple[bool, ...],
length: int,
reverse: bool,
unroll: bool | int,
num_consts: int,
num_carry: int,
_split_transpose: bool,
):
# Can only handle fori_loop-like scans.
if (num_extensive := len(args) - num_consts - num_carry) or reverse:
raise NotImplementedError
del linear, num_extensive, reverse
jaxpr, jaxpr_consts = jaxpr.jaxpr, jaxpr.consts
if jaxpr_consts:
raise NotImplementedError
del jaxpr_consts
jaxpr, has_loop_index = pallas_utils.pattern_match_scan_to_fori_loop(
jaxpr, num_consts, num_carry
)
consts, args = util.split_list(args, [num_consts])
_consts_avals, arg_avals = util.split_list(ctx.avals_in, [num_consts])
if has_loop_index:
start, *args = args
index_aval, *_ = arg_avals
start: ir.Value = _ensure_ir_value(start, index_aval.dtype)
else:
start = _i32_constant(0)
for_out = _lower_jaxpr_to_for_loop(
ctx,
jaxpr,
start,
length,
consts,
*args,
has_loop_index=has_loop_index,
unroll=unroll,
)
if has_loop_index:
# Need to return the final loop index value if the outer scan expects
# it as an output.
loop_index = arith_dialect.addi(start, _ir_constant(length, start.type))
return [loop_index, *for_out]
return for_out
def _lower_while_via_fori(
ctx: LoweringRuleContext,
*args,
fori_jaxpr,
cond_nconsts,
body_nconsts,
):
assert not fori_jaxpr.constvars
# The pattern matcher looks for conditions with no constants.
assert cond_nconsts == 0
# Reflect the changes of the pattern matcher to the context.
lb_aval, ub_aval, *_ = ctx.avals_in[cond_nconsts + body_nconsts:]
ctx = ctx.replace(
avals_in=(
*ctx.avals_in[cond_nconsts:body_nconsts],
ctx.avals_in[body_nconsts], # the index
*ctx.avals_in[body_nconsts + 2 :],
),
avals_out=tuple(ctx.avals_out[2:]),
)
_, consts, (lb, ub, *args) = util.split_list(
args, [cond_nconsts, body_nconsts]
)
lb = _ensure_ir_value(lb, lb_aval.dtype)
ub = _ensure_ir_value(ub, ub_aval.dtype)
for_out = _lower_jaxpr_to_for_loop(
ctx,
fori_jaxpr,
lb,
arith_dialect.subi(ub, lb),
consts,
*args,
has_loop_index=True,
)
return ub, ub, *for_out
@register_lowering_rule(lax.while_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.while_p, *gpu_core.LANExWARP_SEMANTICS)
@register_lowering_rule(lax.while_p, mgpu.LoweringSemantics.Warpgroup)
def _while_lowering_rule(
ctx: LoweringRuleContext,
*args,
cond_jaxpr,
body_jaxpr,
cond_nconsts,
body_nconsts,
):
# First try to lower via a simpler fori loop, which may optimize better.
fori_jaxpr, _ = pallas_utils.pattern_match_while_to_fori_loop(
cond_jaxpr, cond_nconsts, body_jaxpr, body_nconsts
)
if fori_jaxpr is not None:
return _lower_while_via_fori(
ctx,
*args,
fori_jaxpr=fori_jaxpr,
cond_nconsts=cond_nconsts,
body_nconsts=body_nconsts,
)
_is_acc = lambda x: isinstance(x, mgpu.WGMMAAccumulator)
_ensure = _ensure_ir_value
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
_ensure = lambda v, aval: v if _is_acc(v) else _ensure_fa(v, aval.dtype)
# If we fail conversion to fori, fallback to an ordinary while loop.
cond_consts, body_consts, carry = util.split_list(
args, [cond_nconsts, body_nconsts]
)
_cond_avals, _body_avals, carry_avals = util.split_list(
ctx.avals_in, [cond_nconsts, body_nconsts]
)
carry = [*map(_ensure, carry, carry_avals)]
# Flatten the carry to get a concatenated list of registers from each FA.
# Note that the treedef is also used below to unflatten the body results.
flat_carry, carry_treedef = jax.tree.flatten(carry)
flat_carry_types = [a.type for a in flat_carry]
while_op = scf_dialect.WhileOp(flat_carry_types, flat_carry)
before_block = while_op.before.blocks.append(*flat_carry_types)
with ir.InsertionPoint.at_block_begin(before_block):
cond_args = [*cond_consts, *carry_treedef.unflatten(before_block.arguments)]
[cond] = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, cond_jaxpr.jaxpr, cond_args
)
scf_dialect.condition(
_ensure_ir_value(cond, *cond_jaxpr.out_avals), before_block.arguments
)
after_block = while_op.after.blocks.append(*flat_carry_types)
with ir.InsertionPoint.at_block_begin(after_block):
body_args = [*body_consts, *carry_treedef.unflatten(after_block.arguments)]
loop_out = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, body_jaxpr.jaxpr, body_args
)
loop_out = [*map(_ensure, loop_out, carry_avals)]
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Lane:
for idx, (carry_fa, out_fa) in enumerate(zip(carry, loop_out)):
if _is_acc(carry_fa) != _is_acc(out_fa):
raise ValueError(
f"The loop body output has unexpected accumulator type:"
f" output[{idx}] is {out_fa}, when it should be {carry_fa}."
)
if not _is_acc(out_fa) and carry_fa.layout != out_fa.layout:
raise ValueError(
f"The loop body output has unexpected layout: output[{idx}] has"
f" layout {out_fa.layout}, when it should be {carry_fa.layout}."
)
scf_dialect.yield_(
carry_treedef.flatten_up_to(loop_out) if loop_out else []
)
return carry_treedef.unflatten(list(while_op.results))
@register_lowering_rule(lax.cond_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(lax.cond_p,
mgpu.LoweringSemantics.Lane, gpu_core.PrimitiveSemantics.Warp)
@register_lowering_rule(lax.cond_p, mgpu.LoweringSemantics.Warpgroup)
def _cond_lowering_rule(ctx: LoweringRuleContext, index, *args, branches,
**params):
if params:
raise NotImplementedError("platform_dependent cond")
index_aval, *_arg_avals = ctx.avals_in
def _yielded_values(outs, avals):
ret = []
for out, aval in zip(outs, avals):
if isinstance(out, (mgpu.WGMMAAccumulator, mgpu.FragmentedArray)):
ret.append(out)
else:
ret.append(_ensure_ir_value(out, aval.dtype))
return ret
# We need to know the result types ahead of time to construct the switch
# operation. Below we lower the first branch in a throw-away module to
# extract them.
with ir.InsertionPoint(ir.Module.create().body):
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, branches[0].jaxpr, args
)
yielded_types = [
v.type for v in jax.tree.leaves(_yielded_values(outs, ctx.avals_out))
]
del outs
# TODO(apaszke): Remove once minimal jaxlib is 0.8.2
idx_switch_params = inspect.signature(scf_dialect.IndexSwitchOp).parameters
if (mlir_compat := "num_caseRegions" in idx_switch_params):
switch_op = scf_dialect.IndexSwitchOp(
yielded_types,
_as_index(_ensure_ir_value(index, index_aval.dtype)),
ir.DenseI64ArrayAttr.get(range(len(branches) - 1)),
num_caseRegions=len(branches) - 1,
)
else:
switch_op = scf_dialect.IndexSwitchOp(
yielded_types,
_as_index(_ensure_ir_value(index, index_aval.dtype)),
range(len(branches) - 1),
)
# ``RegionSequence`` in MLIR does not support slicing, so the
# auto-generated Python bindings for ``caseRegions`` fail at runtime!
# We convert it to a list to work around that.
regions = list(switch_op.regions)
# Move the default region to the back.
regions = regions[1:] + regions[:1]
treedef = None
for branch, region in zip(branches, regions):
block = region.blocks.append() if mlir_compat else region.blocks[0]
with ir.InsertionPoint(block):
outs = lower_jaxpr_to_mosaic_gpu(
ctx.module_ctx, ctx.launch_ctx, branch.jaxpr, args, consts=branch.consts
)
yielded_leaves, yielded_treedef = jax.tree.flatten(_yielded_values(outs, ctx.avals_out))
if treedef is None:
treedef = yielded_treedef
else:
assert treedef == yielded_treedef
scf_dialect.yield_(yielded_leaves)
assert treedef is not None
return treedef.unflatten(list(switch_op.results))
@register_lowering_rule(lax.bitcast_convert_type_p, mgpu.LoweringSemantics.Lane)
@register_lowering_rule(
lax.bitcast_convert_type_p, mgpu.LoweringSemantics.Warpgroup
)
def _bitcast_convert_type_lowering_rule(
ctx: LoweringRuleContext, x, *, new_dtype
):
[x_aval] = ctx.avals_in
src_elem_type = mgpu_utils.dtype_to_ir_type(x_aval.dtype)
dst_elem_type = mgpu_utils.dtype_to_ir_type(new_dtype)
assert isinstance(src_elem_type, (ir.IntegerType, ir.FloatType))
assert isinstance(dst_elem_type, (ir.IntegerType, ir.FloatType))
if src_elem_type.width != dst_elem_type.width:
raise NotImplementedError(
f"Cannot bitcast from {x_aval.dtype} to {new_dtype} because they"
" have different widths"
)
if ctx.module_ctx.lowering_semantics == mgpu.LoweringSemantics.Warpgroup:
x = _ensure_ir_value(x, x_aval.dtype)
return arith_dialect.bitcast(
ir.VectorType.get(x_aval.shape, dst_elem_type), x
)
x = _ensure_fa(x, x_aval.dtype)
output_is_signed = mgpu_utils.is_signed(new_dtype)
return mgpu.FragmentedArray.bitcast(
x, dst_elem_type, output_is_signed=output_is_signed
)
@register_lowering_rule(lax.optimization_barrier_p, mgpu.LoweringSemantics.Lane)
def _optimization_barrier_lowering(ctx: LoweringRuleContext, *args):
result = mgpu.optimization_barrier(
*(_ensure_fa(arg, aval.dtype) for arg, aval in zip(args, ctx.avals_in))
)
return (result,) if len(ctx.avals_in) == 1 else result
@register_lowering_rule(
lax.optimization_barrier_p, mgpu.LoweringSemantics.Warpgroup
)
def _optimization_barrier_lowering_wg(ctx: LoweringRuleContext, *args):
result = mgpu.dialect.optimization_barrier([
_ensure_ir_value(arg, aval.dtype) for arg, aval in zip(args, ctx.avals_in)
])
return (result,) if len(ctx.avals_in) == 1 else result
@register_lowering_rule(pallas_core.core_map_p, mgpu.LoweringSemantics.Lane)
def _core_map_lowering_rule(
ctx: LoweringRuleContext,
*args,
jaxpr,
mesh,
**_,
):
if isinstance(mesh, gpu_core.WarpMesh):
# A core_map over a WarpMesh represents a fork/join over individual
# warps in a warpgroup.
if (ctx.module_ctx.warp_axis_name or
ctx.module_ctx.primitive_semantics == gpu_core.PrimitiveSemantics.Warp):
raise LoweringError(
"Cannot nest core_maps. Already under core_map with warp_axis_name "
f"{ctx.module_ctx.warp_axis_name}.")
module_ctx = dataclasses.replace(
ctx.module_ctx,
warp_axis_name=mesh.axis_name,
primitive_semantics=gpu_core.PrimitiveSemantics.Warp,
)
for aval_in in ctx.avals_in:
if isinstance(aval_in, jax_core.ShapedArray) and aval_in.shape:
raise LoweringError(
"Can only close over scalars and Refs when using core_map with "
f"WarpMesh. Found array of shape {aval_in}."
)
# We allow the warps to schedule async copies without synchronizing with
# other warps, so we need to add a barrier here to make sure all reads and
# writes have completed.
if ctx.module_ctx.auto_barriers:
mgpu.warpgroup_barrier()
_ = lower_jaxpr_to_mosaic_gpu(
module_ctx,
ctx.launch_ctx,
jaxpr,
args=(),
consts=args,
)
if ctx.module_ctx.auto_barriers:
# We need to ensure that any effects produced by one warp
# (e.g. async copies) are observable by all other warps.
mgpu.warpgroup_barrier()
return []
raise ValueError(f"Unsupported mesh: {mesh}")
def _bcast(
x: Any,
y: Any,
x_aval: ShapedAbstractValue,
y_aval: ShapedAbstractValue,
out_aval: ShapedAbstractValue,
) -> tuple[mgpu.FragmentedArray, mgpu.FragmentedArray]:
if not isinstance(x, mgpu.FragmentedArray):
x_dtype = x_aval.dtype
if x_aval.weak_type:
x_dtype = y_aval.dtype
x = _ensure_fa(x, x_dtype)
if not isinstance(y, mgpu.FragmentedArray):
y_dtype = y_aval.dtype
if y_aval.weak_type:
y_dtype = x_aval.dtype
y = _ensure_fa(y, y_dtype)
if x_aval.shape != out_aval.shape:
x = x.broadcast(out_aval.shape)
if y_aval.shape != out_aval.shape:
y = y.broadcast(out_aval.shape)
return x, y
def _ensure_fa(x: object, dtype: jnp.dtype) -> mgpu.FragmentedArray:
if isinstance(x, mgpu.FragmentedArray):
assert x.mlir_dtype == mgpu_utils.dtype_to_ir_type(dtype)
return x
return mgpu.FragmentedArray.splat(
_ensure_ir_value(x, dtype), (), is_signed=mgpu_utils.is_signed(dtype)
)
def _bcast_wg(
x: Any,
y: Any,
x_aval: ShapedAbstractValue,
y_aval: ShapedAbstractValue,
out_aval: ShapedAbstractValue,
) -> tuple[ir.Value, ir.Value]:
"""Ensures that ``x`` and ``y`` have the expected shapes and dtypes.
More specifically, the inputs are converted to vectors of the same dtype
as ``x_aval`` and ``y_aval``, and broadcasted to the output shape
if necessary.
"""
if not out_aval.shape:
return _ensure_ir_value(x, x_aval.dtype), _ensure_ir_value(y, y_aval.dtype)
x_dtype = x_aval.dtype
if not isinstance(x, ir.Value):
if x_aval.weak_type:
x_dtype = y_aval.dtype
x = _ensure_ir_value(x, x_dtype)
y_dtype = y_aval.dtype
if not isinstance(y, ir.Value):
if y_aval.weak_type:
y_dtype = x_aval.dtype
y = _ensure_ir_value(y, y_dtype)
if not ir.VectorType.isinstance(x.type):
assert not x_aval.shape
x = vector_dialect.broadcast(
ir.VectorType.get(out_aval.shape, mgpu_utils.dtype_to_ir_type(x_dtype)),
x,
)
elif x_aval.shape != out_aval.shape:
raise NotImplementedError("Unsupported broadcast")
if not ir.VectorType.isinstance(y.type):
assert not y_aval.shape
y = vector_dialect.broadcast(
ir.VectorType.get(out_aval.shape, mgpu_utils.dtype_to_ir_type(y_dtype)),
y,
)
elif y_aval.shape != out_aval.shape:
raise NotImplementedError("Unsupported broadcast")
return x, y
def _ensure_ir_value(x: Any, dtype: jnp.dtype) -> ir.Value:
if isinstance(x, ir.Value):
mlir_dtype = mgpu_utils.dtype_to_ir_type(dtype)
if ir.VectorType.isinstance(x.type):
assert ir.VectorType(x.type).element_type == mlir_dtype
else:
assert x.type == mlir_dtype, (x.type, mlir_dtype)
return x
elif isinstance(x, mgpu.FragmentedArray):
assert x.mlir_dtype == mgpu_utils.dtype_to_ir_type(dtype)
if isinstance(x.layout, mgpu.WGSplatFragLayout):
return x.registers.item()
raise NotImplementedError(f"Unsupported layout: {x.layout}")
return _ir_constant(x, mgpu_utils.dtype_to_ir_type(dtype))
def _ensure_ir_value_device_id(device_id: Any) -> ir.Value:
ensure_i32 = functools.partial(_ensure_ir_value, dtype=jnp.int32)
if isinstance(device_id, tuple):
return tuple(map(ensure_i32, device_id))
if isinstance(device_id, dict):
return {k: ensure_i32(v) for k, v in device_id.items()}
return ensure_i32(device_id)
def _ir_constant(v: object, t: ir.Type) -> ir.Value:
if isinstance(
v, (np.number, np.ndarray, int, float, literals.TypedNdArray)
):
if isinstance(t, (ir.IntegerType, ir.IndexType)):
v = int(v)
else:
assert isinstance(t, ir.FloatType)
v = float(v)
return arith_dialect.constant(t, v)
raise NotImplementedError(f"Unsupported constant: {v!r}")
def _i32_constant(v: int) -> ir.Value:
if v < jnp.iinfo(jnp.int32).min or v > jnp.iinfo(jnp.int32).max:
raise ValueError(f"Integer constant out of range for i32: {v}")
return arith_dialect.constant(ir.IntegerType.get_signless(32), v)
def _i64_constant(v: int) -> ir.Value:
if v < jnp.iinfo(jnp.int64).min or v > jnp.iinfo(jnp.int64).max:
raise ValueError(f"Integer constant out of range for i64: {v}")
return arith_dialect.constant(ir.IntegerType.get_signless(64), v)
def _as_index(v: object) -> ir.Value:
match v:
case int():
return arith_dialect.constant(ir.IndexType.get(), v)
case ir.Value() if ir.IndexType.isinstance(v.type):
return v
case ir.Value() if ir.IntegerType.isinstance(v.type):
return arith_dialect.index_cast(ir.IndexType.get(), v)
case mgpu.FragmentedArray(layout=mgpu.WGSplatFragLayout()):
return _as_index(v.registers.item())
case literals.TypedNdArray() if (
np.issubdtype(v.dtype, np.integer) and v.ndim == 0
):
return arith_dialect.constant(ir.IndexType.get(), int(v))
case _:
raise ValueError(f"Unsupported index: {v} of type {type(v)}")
def merge_indexers(
indexers: Sequence[indexing.NDIndexer]) -> indexing.NDIndexer:
"""Merges multiple indexers into a single indexer.
This function computes a new indexer such that applying the
new indexer produces the same result as applying the sequence
of input indexers in order from first-to-last.
"""
if len(indexers) == 0:
raise ValueError("Cannot merge empty list of indexers")
if len(indexers) == 1:
return indexers[0]
root_shape = indexers[0].shape
current_indices = [indexing.Slice(0, size, 1) for size in root_shape]
removed_dimensions = set()
for indexer in indexers:
if indexer.int_indexer_shape:
raise NotImplementedError()
def _ensure_idx_fa(x: Any) -> mgpu.FragmentedArray:
i32 = ir.IntegerType.get_signless(32)
if isinstance(x, ir.Value):
# TODO(cperivol): We assume all indices are signed. We should
# look at the JAX avals to see if the integers are signed or
# not to figure out is_signed.
is_signed = False if ir.IntegerType.isinstance(x.type) else None
return mgpu.FragmentedArray.splat(x, (), is_signed=is_signed).astype(
i32, is_signed=False
)
if isinstance(x, mgpu.FragmentedArray):
return x.astype(i32, is_signed=False)
if isinstance(x, int):
return mgpu.FragmentedArray.splat(mgpu.c(x, i32), (), is_signed=False)
if (
isinstance(x, literals.TypedNdArray)
and x.ndim == 0
and np.issubdtype(x.dtype, np.signedinteger)
):
return mgpu.FragmentedArray.splat(
mgpu.c(int(x), i32), (), is_signed=False
)
raise NotImplementedError(x)
num_skipped = 0
for i in range(len(current_indices)):
# Integer indexers remove dimensions which should be
# skipped by following indexers.
if i in removed_dimensions:
num_skipped += 1
continue
dim_indexer = indexer.indices[i - num_skipped]
current_index = current_indices[i]
assert isinstance(current_index, indexing.Slice)
current_start_index = _ensure_idx_fa(current_index.start)
if isinstance(dim_indexer, indexing.Slice):
if dim_indexer.stride != 1:
raise NotImplementedError("Non-unit strides not implemented.")
current_indices[i] = indexing.Slice(
current_start_index + _ensure_idx_fa(dim_indexer.start),
dim_indexer.size,
1,
)
else:
current_indices[i] = current_start_index + _ensure_idx_fa(dim_indexer)
removed_dimensions.add(i)
return indexing.NDIndexer(
indices=tuple(current_indices),
shape=root_shape,
int_indexer_shape=(),
)
@register_lowering_rule(primitives.semaphore_read_p, mgpu.LoweringSemantics.Lane)
def _semaphore_read_lowering_rule(ctx: LoweringRuleContext, *args, args_tree):
sem, transforms = tree_util.tree_unflatten(args_tree, args)
sem, transforms = _handle_transforms(ctx, sem, transforms)
if transforms:
raise NotImplementedError(f"Unhandled transforms for semaphore_read: {transforms}")
sem_ptr = mgpu.utils.memref_ptr(sem)
i32_ty = ir.IntegerType.get_signless(32)
result = llvm_dialect.inline_asm(
i32_ty,
[sem_ptr],
"ld.acquire.sys.u32 $0,[$1];",
"=r,l",
has_side_effects=True,
)
return _ensure_fa(result, jnp.int32)
@register_lowering_rule(primitives.semaphore_signal_p, mgpu.LoweringSemantics.Lane)
def _semaphore_signal_lowering_rule(
ctx: LoweringRuleContext,
*args,
args_tree,
device_id_type,
):
i32 = ir.IntegerType.get_signless(32)
sem, transforms, value, device_id, core_index = tree_util.tree_unflatten(
args_tree, args
)
if core_index is not None:
raise NotImplementedError(
"Mosaic GPU backend does not support the concept of cores, but"
" core_index is specified"
)
sem, transforms = _handle_transforms(ctx, sem, transforms)
if transforms:
raise NotImplementedError(f"Unhandled transforms for semaphore_signal: {transforms}")
sem_ptr = mgpu.utils.memref_ptr(sem)
if device_id is not None:
device_id, other_axes = primitives.device_id_to_logical(
ctx.module_ctx.mesh_info,
_ensure_ir_value_device_id(device_id),
device_id_type,
lambda name: _axis_index_rule(ctx, axis_name=name),
)
if other_axes:
raise NotImplementedError(
f"Only JAX mesh axes can be used in device_id, but found {other_axes}"
)
sem_ptr = ctx.launch_ctx.to_remote(sem_ptr, device_id)
# TODO(apaszke): Narrow the scope from .sys to .gpu when the semaphore is local.
val = _ir_constant(value, i32)
# We only signal the semaphore from a single lane, which does not guarantee
# anything about the state of the other three warps in the warpgroup (they
# might still be e.g. reading memory that someone will overwrite once they
# receive a signal).
if ctx.module_ctx.auto_barriers:
mgpu.utils.warpgroup_barrier()
mgpu_utils.SemaphoreRef(sem_ptr).signal(
val, predicate=ctx.module_ctx.single_wg_lane_predicate
)
return ()
@register_lowering_rule(primitives.semaphore_wait_p, mgpu.LoweringSemantics.Lane)
def _semaphore_wait_lowering_rule(ctx: LoweringRuleContext, *args, args_tree):
sem, transforms, value, decrement = tree_util.tree_unflatten(args_tree, args)
sem, transforms = _handle_transforms(ctx, sem, transforms)
if transforms:
raise NotImplementedError(
f"Unhandled transforms for semaphore_wait: {transforms}"
)
mgpu_utils.SemaphoreRef(mgpu.utils.memref_ptr(sem)).wait(
_ensure_ir_value(value, jnp.int32), decrement=decrement
)
return ()
@register_lowering_rule(checkify.check_p, mgpu.LoweringSemantics.Lane)
def _check_lowering_rule(ctx: LoweringRuleContext, *err_args, err_tree, debug):
del ctx # Unused.
if not debug:
raise NotImplementedError(
"Non-debug checks are not supported by the Mosaic GPU backend."
" Functionalize them via `jax.experimental.checkify`."
)
if not pallas_helpers.debug_checks_enabled():
return []
error = jax.tree.unflatten(err_tree, err_args)
[pred] = error._pred.values()
[exception_tree] = error._metadata.values()
[payload] = error._payload.values()
exception = jax.tree.unflatten(exception_tree, payload)
assert isinstance(exception, checkify.FailedCheckError)
# check_p has an inverted predicate compared to assert, so we need to compute
# ``not pred`` here.
minus_one = _ir_constant(-1, mgpu_utils.dtype_to_ir_type(jnp.bool))
not_pred = arith_dialect.xori(pred.registers.item(), minus_one)
cf_dialect.assert_(not_pred, exception.fmt_string)
return []
@register_lowering_rule(gpu_core.layout_cast_p, mgpu.LoweringSemantics.Lane)
def _layout_cast_lowering(ctx: LoweringRuleContext, x, *, new_layout):
del ctx # Unused.
return x.to_layout(new_layout.to_mgpu())
@register_lowering_rule(gpu_core.layout_cast_p, mgpu.LoweringSemantics.Warpgroup)
def _layout_cast_lowering_wg(
ctx: LoweringRuleContext, x, *, new_layout
):
del ctx # Unused.
return mgpu.dialect.layout_cast(x, mgpu.to_layout_attr(new_layout.to_mgpu()))
@register_lowering_rule(lax.iota_p, mgpu.LoweringSemantics.Lane)
def _iota_lowering(
ctx: LoweringRuleContext, dtype, shape, dimension, sharding
):
del sharding # Unused.
if ctx.out_layout_hint is None:
raise RuntimeError(
"Failed to infer the output layout of the iota. Please apply"
" plgpu.layout_cast to its output right after its creation."
)
mlir_dtype = mgpu_utils.dtype_to_ir_type(dtype)
is_signed = mgpu_utils.is_signed(dtype)
return mgpu.FragmentedArray.broadcasted_iota(
mlir_dtype, shape, dimension, ctx.out_layout_hint, is_signed=is_signed
)
@register_lowering_rule(lax.iota_p, mgpu.LoweringSemantics.Warpgroup)
def _iota_lowering_wg(
ctx: LoweringRuleContext, dtype, shape, dimension, sharding
):
del ctx, sharding
result_type = ir.VectorType.get(shape, mgpu_utils.dtype_to_ir_type(dtype))
return mgpu.dialect.broadcasted_iota(result_type, dimension)
@register_lowering_rule(primitives.delay_p, mgpu.LoweringSemantics.Lane)
def _delay_lowering(ctx: LoweringRuleContext, nanos):
del ctx # Unused.
if not isinstance(nanos, ir.Value):
nanos = _i32_constant(nanos)
mgpu.nanosleep(nanos)
return []
| LoweringError |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 914,
"end": 1034
} | class ____(InvalidTokenError):
"""Raised when a token's ``iat`` claim is non-numeric"""
pass
| InvalidIssuedAtError |
python | MongoEngine__mongoengine | tests/fields/test_enum_field.py | {
"start": 237,
"end": 293
} | class ____(Enum):
NEW = "new"
DONE = "done"
| Status |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 6563,
"end": 6777
} | class ____(EllipticCurve):
name = "sect409r1"
key_size = 409
group_order = 0x10000000000000000000000000000000000000000000000000001E2AAD6A612F33307BE5FA47C3C9E052F838164CD37D9A21173 # noqa: E501
| SECT409R1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methods1.py | {
"start": 1046,
"end": 1181
} | class ____:
def __init__(self, a: str, b: float) -> None:
pass
def dummyFunc(a: str, b: float) -> None:
pass
| DummyClass |
python | pikepdf__pikepdf | tests/test_image_access.py | {
"start": 32553,
"end": 34442
} | class ____(NamedTuple):
width: int
height: int
imbytes: bytes
def to_pdf(self):
pdf = pikepdf.new()
pdfw, pdfh = 36 * self.width, 36 * self.height
pdf.add_blank_page(page_size=(pdfw, pdfh))
imobj = Stream(
pdf,
self.imbytes,
Width=self.width,
Height=self.height,
Type=Name.XObject,
Subtype=Name.Image,
ImageMask=True,
)
pdf.pages[0].Contents = Stream(
pdf, b'%f 0 0 %f 0 0 cm 0.5 0.75 1.0 rg /Im0 Do' % (pdfw, pdfh)
)
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj))
pdf.pages[0].MediaBox = Array([0, 0, pdfw, pdfh])
return pdf
@st.composite
def valid_random_stencil_mask_spec(
draw,
widths=st.integers(min_value=1, max_value=16),
heights=st.integers(min_value=1, max_value=16),
):
width = draw(widths)
height = draw(heights)
min_imbytes = _next_multiple(width, 8) * height // 8
imbytes = draw(st.binary(min_size=min_imbytes, max_size=min_imbytes))
return StencilMaskSpec(width, height, imbytes)
@given(spec=valid_random_stencil_mask_spec())
def test_extract_stencil_mask(spec):
pdf = spec.to_pdf()
pim = PdfImage(pdf.pages[0].Resources.XObject.Im0)
bio = BytesIO()
pim.extract_to(stream=bio)
im = Image.open(bio)
assert im.mode == '1'
def test_repr_when_mode_not_impl():
pdf = Pdf.new()
pim = PdfImage(
Stream(
pdf,
b'',
BitsPerComponent=1,
ColorSpace=Name.InvalidColorSpace,
Width=1,
Height=1,
Type=Name.XObject,
Subtype=Name.Image,
)
)
assert repr(pim).startswith('<pikepdf.PdfImage image mode=? size=1x1')
with pytest.raises(NotImplementedError):
pim.mode
| StencilMaskSpec |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/model_service.py | {
"start": 1584,
"end": 4762
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Model.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model_id: Required. The ID of the Model resource to be deleted.
Could be in format `projects/{project}/locations/{location}/models/{model_id}@{version_id}` or
`projects/{project}/locations/{location}/models/{model_id}@{version_alias}` if model
has several versions.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "model_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
model_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.model_id = model_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.model_id = self.model_id.rpartition("@")[0] if "@" in self.model_id else self.model_id
try:
self.log.info("Deleting model: %s", self.model_id)
operation = hook.delete_model(
project_id=self.project_id,
region=self.region,
model=self.model_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Model was deleted.")
except NotFound:
self.log.info("The Model ID %s does not exist.", self.model_id)
| DeleteModelOperator |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/views/user.py | {
"start": 3106,
"end": 3902
} | class ____(MultiResourceUserMixin, UserLDAPModelView):
"""Customize permission names for FAB's builtin UserLDAPModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
"userinfoedit": permissions.RESOURCE_MY_PROFILE,
"userinfo": permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
"add": "create",
"userinfo": "read",
"download": "read",
"show": "read",
"list": "read",
"edit": "edit",
"userinfoedit": "edit",
"delete": "delete",
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
| CustomUserLDAPModelView |
python | mlflow__mlflow | dev/clint/tests/rules/test_redundant_test_docstring.py | {
"start": 1792,
"end": 1859
} | class ____:
"""Test feature."""
pass
| TestFeatureImplementation |
python | realpython__materials | python-dicts/values.py | {
"start": 0,
"end": 264
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
print(
{
"colors": ["red", "green", "blue"],
"plugins": {"py_code", "dev_sugar", "fasting_py"},
"timeout": 3,
"position": Point(42, 21),
}
)
| Point |
python | doocs__leetcode | solution/2300-2399/2302.Count Subarrays With Score Less Than K/Solution2.py | {
"start": 0,
"end": 298
} | class ____:
def countSubarrays(self, nums: List[int], k: int) -> int:
ans = s = j = 0
for i, x in enumerate(nums):
s += x
while s * (i - j + 1) >= k:
s -= nums[j]
j += 1
ans += i - j + 1
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/granitemoe/modular_granitemoe.py | {
"start": 3621,
"end": 3857
} | class ____(LlamaAttention):
def __init__(self, config: GraniteMoeConfig, layer_idx: int):
super().__init__(self, config, layer_idx)
self.scaling = config.attention_multiplier # Only diff with llama
| GraniteMoeAttention |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip_text.py | {
"start": 9997,
"end": 11244
} | class ____(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
self.self = BlipTextSelfAttention(config, is_cross_attention, layer_idx=layer_idx)
self.output = BlipTextSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert -> BlipText
| BlipTextAttention |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 6671,
"end": 7445
} | class ____(ValueError):
"""
Exception raised when attempting to call a unsupported numpy function.
For example, ``np.cumsum(groupby_object)``.
See Also
--------
DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns.
Series.groupby : Group Series using a mapper or by a Series of columns.
core.groupby.GroupBy.cumsum : Compute cumulative sum for each group.
Examples
--------
>>> df = pd.DataFrame(
... {"A": [0, 0, 1, 1], "B": ["x", "x", "z", "y"], "C": [1, 2, 3, 4]}
... )
>>> np.cumsum(df.groupby(["A"]))
Traceback (most recent call last):
UnsupportedFunctionCall: numpy operations are not valid with groupby.
Use .groupby(...).cumsum() instead
"""
| UnsupportedFunctionCall |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_feed.py | {
"start": 30632,
"end": 39306
} | class ____(InfeedQueue):
"""A helper object to build a device infeed queue with input partition.
Args:
number_of_tuple_elements: the number of Tensors fed atomically through the
queue, must be present unless it can be inferred from other arguments.
device_assignment: A TPU `DeviceAssignment` which is used to place all the
partitions to different TPU infeed queues.
host_id: The id of the host machine.
input_partition_dims: A nested list/tuple of integers. Each inner
list/tuple describes how to partition the corresponding input tensor.
tuple_types: If not None, a list of types of the elements of the queue.
tuple_shapes: If not None, a list of shapes of the elements of the queue.
name: The name of the queue.
"""
def __init__(self,
number_of_tuple_elements,
device_assignment,
host_id,
input_partition_dims=None,
tuple_types=None,
tuple_shapes=None,
name=None):
super(_PartitionedInfeedQueue, self).__init__(
number_of_tuple_elements=number_of_tuple_elements,
tuple_types=tuple_types,
tuple_shapes=None,
shard_dimensions=None,
name="PartitionedInfeedQueue" if name is None else name)
self._input_partition_dims = input_partition_dims
self._host_id = host_id
self._device_assignment = device_assignment
def generate_dequeue_op(self, tpu_device=0):
"""Generate TPU dequeue ops.
Args:
tpu_device: The TPU device ordinal where the infeed instruction should be
placed.
Returns:
A list of Outputs corresponding to a partition of infeed dequeued
into XLA, suitable for use within a replicated block.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set; or if a dequeue op has already been generated.
"""
self.freeze()
if self._generated_dequeue_op and not ops.inside_function():
raise ValueError("Can't generate two dequeue Ops from the same queue")
self._generated_dequeue_op = True
full_name = "%s/dequeue" % self._name
sharded_shapes = [
policy.get_sharded_shape(shape)
for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
]
with ops.device(tpu_name_util.core(tpu_device)):
values = tpu_ops.infeed_dequeue_tuple(
dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
return tag_sharding_attribute_for_dequeued_tensors(
values, self._input_partition_dims)
def generate_enqueue_ops(self, sharded_inputs): # pytype: disable=signature-mismatch # overriding-parameter-count-checks
"""Generates the host-side Ops to enqueue the partitioned inputs.
sharded_inputs is a list, one for each replica, of lists of
Tensors. sharded_inputs[i] is the tuple of Tensors to use to feed
replica i.
sharded_inputs[i][j] is partitioned by self._input_partition_dims[j].
For example, if sharded_inputs[i][j] is a 2-D Tensor:
[[A, B, C, D],
[E ,F, G, H]]
self._input_partition_dims[j] is [2, 4].
sharded_inputs[i][j] will be partitioned and flattened into:
[A, B, C, D, E, F, G, H] and fed into the logical core ids:
[0, 1, 2, 3, 4, 5, 6, 7] respectively.
Args:
sharded_inputs: a list of lists of Tensors. The length of the
outer list determines the number of shards. Each inner list indicates
the types and shapes of the tuples in the corresponding shard.
Returns:
A list of host-side Ops, one for each shard, that when executed together
will enqueue a full-size element of infeed.
Raises:
ValueError: if the queue configuration has previously been frozen and the
shapes of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the shapes of the elements of sharded_inputs
don't form a consistent unsharded tuple; or if the elements of a tuple
have different device constraints; or if the partition dims are invalid.
TypeError: if the queue configuration has previously been frozen and the
types of the elements of sharded_inputs are not compatible with the
frozen configuration; or if the types of the elements of sharded_inputs
don't form a consistent unsharded tuple.
"""
self.set_configuration_from_sharded_input_tensors(sharded_inputs)
number_of_replicas = len(sharded_inputs)
number_of_tuple_elements = len(sharded_inputs[0])
assert len(self._input_partition_dims) == number_of_tuple_elements
enqueue_ops = []
for replica_index in range(number_of_replicas):
flattened_inputs = sharded_inputs[replica_index]
inputs_part_dims_flat = nest.flatten_up_to(flattened_inputs,
self._input_partition_dims)
inputs_parted_iters = [
iter(self._check_dims_and_partition_or_replicate_on_host(x, dims))
for x, dims in zip(sharded_inputs[replica_index],
inputs_part_dims_flat)
]
# Find the replica_id of the host's logical core 0.
# The self._host_id is guaranteed to contain the logical core 0,
# even when num_cores_per_replica > num_cores_per_host -- the function
# caller makes sure that this host_id will must be receiving data (calls
# input_fn).
replica_id = self._device_assignment.lookup_replicas(
task_id=self._host_id, logical_core=0)[replica_index]
for logical_core in range(self._device_assignment.num_cores_per_replica):
# Places different partitions to different logic cores.
# Since there can be multiple hosts per replica, we need to find
# the actual host (device) of this logical core.
device = self._device_assignment.host_device(
replica=replica_id, logical_core=logical_core)
with ops.device(device):
ordinal = self._device_assignment.tpu_ordinal(
replica=replica_id, logical_core=logical_core)
infeed_inputs = []
for it in inputs_parted_iters:
input_for_device = next(it, None)
if input_for_device is not None:
infeed_inputs.append(input_for_device)
if infeed_inputs:
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=infeed_inputs,
shapes=[x.shape for x in infeed_inputs],
name="enqueue/replica_{0}/input_{1}".format(
replica_index, logical_core),
device_ordinal=ordinal))
return enqueue_ops
def _check_input_partition_dims(self, tensor, dims):
"""Checks that input partition dims are valid for the `Tensor`.
Args:
tensor: Input tensor for partitioning.
dims: A list of integer describes how to partition the input tensor.
Raises:
ValueError: If the tensor can't be partitioned by dims or the
num_cores_per_replica doesn't match the number of
partitions(dims.prod()).
"""
# No partitioning specified, so don't perform further checks.
if dims is None:
return
dims = np.array(dims)
if (dims < 1).any():
raise ValueError("All input partition dims must be >= 1.")
# No partitioning, so don't perform further checks.
if dims.prod() == 1:
return
if dims.prod() != self._device_assignment.num_cores_per_replica:
raise ValueError(
"The product of each input partition dim should equal to "
"num_cores_per_replica. (dim = {}, num_cores_per_replica "
"= {})".format(dims, self._device_assignment.num_cores_per_replica))
if dims.shape[0] != tensor.shape.ndims:
raise ValueError(
"Input partition dims must have the same number of dimensions "
"as the `Tensor` to be partitioned. (tensor shape = {}, input "
"partition dims = {}).".format(tensor.shape.as_list(), dims))
tensor.shape.assert_is_fully_defined()
def _check_dims_and_partition_or_replicate_on_host(self, tensor, dims):
"""Checks dims and partitions or replicates the input tensor.
The ops inside this function are placed on the host side.
Args:
tensor: The input tensor which will be partitioned or replicated.
dims: A list of integer describes how to partition the input tensor.
Returns:
An iterator of `Tensor`s or a list of partitioned tensors.
"""
self._check_input_partition_dims(tensor, dims)
return partition_or_replicate_on_host(tensor, dims)
| _PartitionedInfeedQueue |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/multi_consumer_event_buffer.py | {
"start": 489,
"end": 597
} | class ____:
# Index of the next event to be consumed by this consumer
cursor_index: int
| _ConsumerState |
python | joke2k__faker | tests/providers/test_passport.py | {
"start": 40,
"end": 303
} | class ____:
"""Test passport provider methods"""
def test_passport_number(self, faker, num_samples):
for _ in range(num_samples):
passport_number = faker.passport_number()
assert isinstance(passport_number, str)
| TestPassport |
python | keras-team__keras | keras/src/random/seed_generator_test.py | {
"start": 166,
"end": 3348
} | class ____(testing.TestCase):
def test_seed_generator_initialization(self):
gen = seed_generator.SeedGenerator()
self.assertIsNotNone(gen.state)
seed = 12345
gen = seed_generator.SeedGenerator(seed=seed)
self.assertEqual(ops.convert_to_numpy(gen.state)[0], seed)
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be an integer"
):
seed_generator.SeedGenerator(seed="invalid_seed")
def test_seed_generator_next(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = ops.convert_to_numpy(gen.next())
seed2 = ops.convert_to_numpy(gen.next())
self.assertFalse(np.array_equal(seed1, seed2))
def test_global_seed_generator(self):
gen1 = seed_generator.global_seed_generator()
gen2 = seed_generator.global_seed_generator()
self.assertEqual(gen1, gen2)
def test_make_default_seed(self):
seed1 = seed_generator.make_default_seed()
seed2 = seed_generator.make_default_seed()
self.assertNotEqual(seed1, seed2)
def test_seed_generator_dtype(self):
gen = seed_generator.SeedGenerator(seed=42)
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
seed = gen.next()
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
self.assertEqual(
backend.standardize_dtype(seed.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_seed_generator(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = seed_generator.draw_seed(gen)
self.assertTrue(backend.is_tensor(seed1))
def test_draw_seed_from_integer(self):
seed2 = seed_generator.draw_seed(12345)
self.assertTrue(backend.is_tensor(seed2))
self.assertEqual(
backend.standardize_dtype(seed2.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_none(self):
seed3 = seed_generator.draw_seed(None)
self.assertTrue(backend.is_tensor(seed3))
def test_draw_seed_invalid(self):
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be either an integer"
):
seed_generator.draw_seed("invalid_seed")
def test_seed_generator_unexpected_kwargs(self):
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword arguments"
):
seed_generator.SeedGenerator(invalid_arg="unexpected_value")
@pytest.mark.skipif(
backend.backend() != "jax", reason="This test requires the JAX backend"
)
def test_jax_tracing_with_global_seed_generator(self):
import jax
@jax.jit
def traced_function():
return seed_generator.global_seed_generator().next()
with self.assertRaisesRegex(
ValueError,
"When tracing a JAX function, you should only use seeded random",
):
traced_function()
def test_seed_generator_serialization(self):
random_generator = seed_generator.SeedGenerator(seed=42)
self.run_class_serialization_test(random_generator)
| SeedGeneratorTest |
python | kamyu104__LeetCode-Solutions | Python/concatenation-of-consecutive-binary-numbers.py | {
"start": 29,
"end": 352
} | class ____(object):
def concatenatedBinary(self, n):
"""
:type n: int
:rtype: int
"""
MOD = 10**9+7
result = l = 0
for i in xrange(1, n+1):
if i&(i-1) == 0:
l += 1
result = ((result<<l)%MOD+i)%MOD
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-leaves-of-binary-tree.py | {
"start": 29,
"end": 623
} | class ____(object):
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
def findLeavesHelper(node, result):
if not node:
return -1
level = 1 + max(findLeavesHelper(node.left, result), \
findLeavesHelper(node.right, result))
if len(result) < level + 1:
result.append([])
result[level].append(node.val)
return level
result = []
findLeavesHelper(root, result)
return result
| Solution |
python | pytorch__pytorch | torch/_dynamo/variables/builder.py | {
"start": 11782,
"end": 13123
} | class ____(GraphArg):
def __init__(self) -> None:
super().__init__(
source=None,
_example=BackwardState(),
pass_arg_as_tensor=False,
fake_tensor=None,
is_tensor=False,
)
def reconstruct(self, codegen: "PyCodegen"):
assert codegen.tx.output.backward_state_var
codegen.add_push_null(
lambda: codegen.load_import_from(BackwardState.__module__, "BackwardState")
)
codegen.call_function(0, False)
codegen.dup_top()
codegen.store(codegen.tx.output.backward_state_var)
# All class-based iterators in itertools
# NOTE: use id() because some objects are not hashable, it will raise error during lookup
ITERTOOLS_TYPE_IDS: frozenset[int] = frozenset(
id(member)
for name, member in vars(itertools).items()
if not name.startswith("_") and inspect.isclass(member)
)
# Will be updated later in substitute_in_graph in torch/_dynamo/polyfills/itertools.py
ITERTOOLS_POLYFILLED_TYPE_IDS: set[int] = set()
# Capture fn pointer at import time
# This is to guard against trying to mark the iterated tensors
# as static in case user overrides fn ptr
og_module_named_buffers_fn_ptr = torch.nn.Module.named_buffers
og_module_named_parameters_fn_ptr = torch.nn.Module.named_parameters
| BackwardStateGraphArg |
python | joke2k__faker | faker/providers/ssn/sk_SK/__init__.py | {
"start": 65,
"end": 1366
} | class ____(BaseProvider):
"""
A Faker provider for the Slovakian VAT IDs
"""
vat_id_formats = ("SK##########",)
national_id_months = ["%.2d" % i for i in range(1, 13)] + ["%.2d" % i for i in range(51, 63)]
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: a random Slovakian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
def birth_number(self) -> str:
"""
Birth Number (Czech/Slovak: rodné číslo (RČ))
https://en.wikipedia.org/wiki/National_identification_number#Czech_Republic_and_Slovakia
"""
birthdate = self.generator.date_of_birth()
year = f"{birthdate:%y}"
month: str = self.random_element(self.national_id_months)
day = f"{birthdate:%d}"
if birthdate.year > 1953:
sn = self.random_number(4, True)
else:
sn = self.random_number(3, True)
number = int(f"{year}{month}{day}{sn}")
birth_number = str(ceil(number / 11) * 11)
if year == "00":
birth_number = "00" + birth_number
elif year[0] == "0":
birth_number = "0" + birth_number
return f"{birth_number[:6]}/{birth_number[6::]}"
| Provider |
python | pytorch__pytorch | tools/code_coverage/package/util/setting.py | {
"start": 681,
"end": 1115
} | class ____:
name: str
target_pattern: str
test_set: str # like __aten__
test_type: TestType
def __init__(
self, name: str, target_pattern: str, test_set: str, test_type: TestType
) -> None:
self.name = name
self.target_pattern = target_pattern
self.test_set = test_set
self.test_type = test_type
TestList = list[Test]
TestStatusType = dict[str, set[str]]
# option
| Test |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_import_error.py | {
"start": 4876,
"end": 9223
} | class ____:
@pytest.mark.parametrize(
("prepared_import_error_idx", "expected_status_code", "expected_body"),
[
(
0,
200,
{
"timestamp": from_datetime_to_zulu_without_ms(TIMESTAMP1),
"filename": FILENAME1,
"stack_trace": STACKTRACE1,
"bundle_name": BUNDLE_NAME,
},
),
(
1,
200,
{
"timestamp": from_datetime_to_zulu_without_ms(TIMESTAMP2),
"filename": FILENAME2,
"stack_trace": STACKTRACE2,
"bundle_name": BUNDLE_NAME,
},
),
(
2,
200,
{
"timestamp": from_datetime_to_zulu_without_ms(TIMESTAMP3),
"filename": FILENAME3,
"stack_trace": STACKTRACE3,
"bundle_name": None,
},
),
(None, 404, {}),
],
)
def test_get_import_error(
self, prepared_import_error_idx, expected_status_code, expected_body, test_client, import_errors
):
import_error: ParseImportError | None = (
import_errors[prepared_import_error_idx] if prepared_import_error_idx is not None else None
)
import_error_id = import_error.id if import_error else IMPORT_ERROR_NON_EXISTED_ID
response = test_client.get(f"/importErrors/{import_error_id}")
assert response.status_code == expected_status_code
if expected_status_code != 200:
return
expected_body.update({"import_error_id": import_error_id})
assert response.json() == expected_body
def test_should_raises_401_unauthenticated(self, unauthenticated_test_client, import_errors):
import_error_id = import_errors[0].id
response = unauthenticated_test_client.get(f"/importErrors/{import_error_id}")
assert response.status_code == 401
def test_should_raises_403_unauthorized(self, unauthorized_test_client, import_errors):
import_error_id = import_errors[0].id
response = unauthorized_test_client.get(f"/importErrors/{import_error_id}")
assert response.status_code == 403
@mock.patch("airflow.api_fastapi.core_api.routes.public.import_error.get_auth_manager")
def test_should_raises_403_unauthorized__user_can_not_read_any_dags_in_file(
self, mock_get_auth_manager, test_client, import_errors
):
import_error_id = import_errors[0].id
# Mock auth_manager
mock_is_authorized_dag = set_mock_auth_manager__is_authorized_dag(mock_get_auth_manager)
mock_get_authorized_dag_ids = set_mock_auth_manager__get_authorized_dag_ids(mock_get_auth_manager)
# Act
response = test_client.get(f"/importErrors/{import_error_id}")
# Assert
mock_is_authorized_dag.assert_called_once_with(method="GET", user=mock.ANY)
mock_get_authorized_dag_ids.assert_called_once_with(user=mock.ANY)
assert response.status_code == 403
assert response.json() == {"detail": "You do not have read permission on any of the DAGs in the file"}
@mock.patch("airflow.api_fastapi.core_api.routes.public.import_error.get_auth_manager")
def test_get_import_error__user_dont_have_read_permission_to_read_all_dags_in_file(
self, mock_get_auth_manager, test_client, permitted_dag_model, not_permitted_dag_model, import_errors
):
import_error_id = import_errors[0].id
set_mock_auth_manager__is_authorized_dag(mock_get_auth_manager)
set_mock_auth_manager__get_authorized_dag_ids(mock_get_auth_manager, {permitted_dag_model.dag_id})
# Act
response = test_client.get(f"/importErrors/{import_error_id}")
# Assert
assert response.status_code == 200
assert response.json() == {
"import_error_id": import_error_id,
"timestamp": from_datetime_to_zulu_without_ms(TIMESTAMP1),
"filename": FILENAME1,
"stack_trace": "REDACTED - you do not have read permission on all DAGs in the file",
"bundle_name": BUNDLE_NAME,
}
| TestGetImportError |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/core.py | {
"start": 28274,
"end": 35054
} | class ____:
selfy: Any
args: tuple
kwargs: dict
given_kwargs: dict
def process_arguments_to_given(
wrapped_test: Any,
arguments: Sequence[object],
kwargs: dict[str, object],
given_kwargs: dict[str, SearchStrategy],
params: dict[str, Parameter],
) -> tuple[Sequence[object], dict[str, object], Stuff]:
selfy = None
arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)
# If the test function is a method of some kind, the bound object
# will be the first named argument if there are any, otherwise the
# first vararg (if any).
posargs = [p.name for p in params.values() if p.kind is p.POSITIONAL_OR_KEYWORD]
if posargs:
selfy = kwargs.get(posargs[0])
elif arguments:
selfy = arguments[0]
# Ensure that we don't mistake mocks for self here.
# This can cause the mock to be used as the test runner.
if is_mock(selfy):
selfy = None
arguments = tuple(arguments)
with ensure_free_stackframes():
for k, s in given_kwargs.items():
check_strategy(s, name=k)
s.validate()
stuff = Stuff(selfy=selfy, args=arguments, kwargs=kwargs, given_kwargs=given_kwargs)
return arguments, kwargs, stuff
def skip_exceptions_to_reraise():
"""Return a tuple of exceptions meaning 'skip this test', to re-raise.
This is intended to cover most common test runners; if you would
like another to be added please open an issue or pull request adding
it to this function and to tests/cover/test_lazy_import.py
"""
# This is a set in case any library simply re-exports another's Skip exception
exceptions = set()
# We use this sys.modules trick to avoid importing libraries -
# you can't be an instance of a type from an unimported module!
# This is fast enough that we don't need to cache the result,
# and more importantly it avoids possible side-effects :-)
if "unittest" in sys.modules:
exceptions.add(sys.modules["unittest"].SkipTest)
if "_pytest.outcomes" in sys.modules:
exceptions.add(sys.modules["_pytest.outcomes"].Skipped)
return tuple(sorted(exceptions, key=str))
def failure_exceptions_to_catch() -> tuple[type[BaseException], ...]:
"""Return a tuple of exceptions meaning 'this test has failed', to catch.
This is intended to cover most common test runners; if you would
like another to be added please open an issue or pull request.
"""
# While SystemExit and GeneratorExit are instances of BaseException, we also
# expect them to be deterministic - unlike KeyboardInterrupt - and so we treat
# them as standard exceptions, check for flakiness, etc.
# See https://github.com/HypothesisWorks/hypothesis/issues/2223 for details.
exceptions = [Exception, SystemExit, GeneratorExit]
if "_pytest.outcomes" in sys.modules:
exceptions.append(sys.modules["_pytest.outcomes"].Failed)
return tuple(exceptions)
def new_given_signature(original_sig, given_kwargs):
"""Make an updated signature for the wrapped test."""
return original_sig.replace(
parameters=[
p
for p in original_sig.parameters.values()
if not (
p.name in given_kwargs
and p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
)
],
return_annotation=None,
)
def default_executor(data, function):
return function(data)
def get_executor(runner):
try:
execute_example = runner.execute_example
except AttributeError:
pass
else:
return lambda data, function: execute_example(partial(function, data))
if hasattr(runner, "setup_example") or hasattr(runner, "teardown_example"):
setup = getattr(runner, "setup_example", None) or (lambda: None)
teardown = getattr(runner, "teardown_example", None) or (lambda ex: None)
def execute(data, function):
token = None
try:
token = setup()
return function(data)
finally:
teardown(token)
return execute
return default_executor
# This function is a crude solution, a better way of resolving it would probably
# be to rewrite a bunch of exception handlers to use except*.
T = TypeVar("T", bound=BaseException)
def _flatten_group(excgroup: BaseExceptionGroup[T]) -> list[T]:
found_exceptions: list[T] = []
for exc in excgroup.exceptions:
if isinstance(exc, BaseExceptionGroup):
found_exceptions.extend(_flatten_group(exc))
else:
found_exceptions.append(exc)
return found_exceptions
@contextlib.contextmanager
def unwrap_markers_from_group() -> Generator[None, None, None]:
try:
yield
except BaseExceptionGroup as excgroup:
_frozen_exceptions, non_frozen_exceptions = excgroup.split(Frozen)
# group only contains Frozen, reraise the group
# it doesn't matter what we raise, since any exceptions get disregarded
# and reraised as StopTest if data got frozen.
if non_frozen_exceptions is None:
raise
# in all other cases they are discarded
# Can RewindRecursive end up in this group?
_, user_exceptions = non_frozen_exceptions.split(
lambda e: isinstance(e, (StopTest, HypothesisException))
)
# this might contain marker exceptions, or internal errors, but not frozen.
if user_exceptions is not None:
raise
# single marker exception - reraise it
flattened_non_frozen_exceptions: list[BaseException] = _flatten_group(
non_frozen_exceptions
)
if len(flattened_non_frozen_exceptions) == 1:
e = flattened_non_frozen_exceptions[0]
# preserve the cause of the original exception to not hinder debugging
# note that __context__ is still lost though
raise e from e.__cause__
# multiple marker exceptions. If we re-raise the whole group we break
# a bunch of logic so ....?
stoptests, non_stoptests = non_frozen_exceptions.split(StopTest)
# TODO: stoptest+hypothesisexception ...? Is it possible? If so, what do?
if non_stoptests:
# TODO: multiple marker exceptions is easy to produce, but the logic in the
# engine does not handle it... so we just reraise the first one for now.
e = _flatten_group(non_stoptests)[0]
raise e from e.__cause__
assert stoptests is not None
# multiple stoptests: raising the one with the lowest testcounter
raise min(_flatten_group(stoptests), key=lambda s_e: s_e.testcounter)
| Stuff |
python | getsentry__sentry | tests/sentry/utils/email/test_message_builder.py | {
"start": 658,
"end": 13324
} | class ____(TestCase):
def test_raw_content(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
headers={"X-Test": "foo"},
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Test"
assert out.extra_headers["X-Test"] == "foo"
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
def test_inline_css(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<head><style type='text/css'>h1 { color: red; }</style></head><h1>foobar</h1><h2><b>hello world</b></h2>",
headers={"X-Test": "foo"},
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Test"
assert out.extra_headers["X-Test"] == "foo"
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
'<!DOCTYPE html>\n<html><head></head><body><h1 style="color: red">foobar</h1><h2><b>hello world</b></h2></body></html>',
"text/html",
)
def test_explicit_reply_to(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
headers={"X-Sentry-Reply-To": "bar@example.com"},
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Test"
assert out.extra_headers["Reply-To"] == "bar@example.com"
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
def test_with_users(self) -> None:
project = self.project
assert len(mail.outbox) == 0
with assume_test_silo_mode(SiloMode.CONTROL):
user_a = User.objects.create(email="foo@example.com")
user_b = User.objects.create(email="bar@example.com")
user_c = User.objects.create(email="baz@example.com")
alternate_email = "bazzer@example.com"
UserEmail.objects.create(user=user_c, email=alternate_email)
UserOption.objects.create(
user=user_c, project_id=project.id, key="mail:email", value=alternate_email
)
msg = MessageBuilder(
subject="Test", body="hello world", html_body="<!DOCTYPE html>\n<b>hello world</b>"
)
msg.add_users([user_a.id, user_b.id, user_c.id], project=project)
msg.send()
assert len(mail.outbox) == 3
assert sorted(out.to[0] for out in mail.outbox) == [
"bar@example.com",
"bazzer@example.com",
"foo@example.com",
]
def test_fake_dont_send(self) -> None:
project = self.project
with assume_test_silo_mode(SiloMode.CONTROL):
user_a = User.objects.create(email=create_fake_email("foo", "fake"))
user_b = User.objects.create(email=create_fake_email("bar", "fake"))
user_c = User.objects.create(email=create_fake_email("baz", "fake"))
UserOption.objects.create(
user=user_c,
project_id=project.id,
key="mail:email",
value=create_fake_email("bazzer", "fake"),
)
msg = MessageBuilder(
subject="Test", body="hello world", html_body="<!DOCTYPE html>\n<b>hello world</b>"
)
msg.add_users([user_a.id, user_b.id, user_c.id], project=project)
msg.send()
assert len(mail.outbox) == 0
@patch("sentry.utils.email.message_builder.make_msgid")
def test_message_id(self, make_msgid: MagicMock) -> None:
make_msgid.return_value = "abc123"
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Test"
assert out.extra_headers["Message-Id"] == "abc123"
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
@patch("sentry.utils.email.message_builder.make_msgid")
def test_add_groupemailthread(self, make_msgid: MagicMock) -> None:
make_msgid.return_value = "abc123"
msg = MessageBuilder(
subject="Test", body="hello world", html_body="<b>hello world</b>", reference=self.group
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Test", "First message should not have Re: prefix"
assert out.extra_headers["Message-Id"] == "abc123"
assert "In-Reply-To" not in out.extra_headers
assert "References" not in out.extra_headers
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
# Our new EmailThread row was added
assert GroupEmailThread.objects.count() == 1
thread = GroupEmailThread.objects.all()[0]
assert thread.msgid == "abc123"
assert thread.email == "foo@example.com"
assert thread.group == self.group
@patch("sentry.utils.email.message_builder.make_msgid")
def test_reply_reference(self, make_msgid: MagicMock) -> None:
make_msgid.return_value = "abc123"
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
reference=self.activity,
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Re: Test"
assert out.extra_headers["Message-Id"] == "abc123"
assert "In-Reply-To" not in out.extra_headers
assert "References" not in out.extra_headers
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
# Our new EmailThread row was added
assert GroupEmailThread.objects.count() == 1
thread = GroupEmailThread.objects.all()[0]
assert thread.msgid == "abc123"
assert thread.email == "foo@example.com"
assert thread.group == self.group
# new msgid for the next message
make_msgid.return_value = "321cba"
msg.send(["foo@example.com"])
assert len(mail.outbox) == 2
out = mail.outbox[1]
assert isinstance(out, EmailMultiAlternatives)
assert out.to == ["foo@example.com"]
assert out.subject == "Re: Test"
assert out.extra_headers["Message-Id"] == "321cba"
assert out.extra_headers["In-Reply-To"] == "abc123"
assert out.extra_headers["References"] == "abc123"
assert out.body == "hello world"
assert len(out.alternatives) == 1
assert out.alternatives[0] == (
"<!DOCTYPE html>\n<html><body><b>hello world</b></body></html>",
"text/html",
)
# Our new GroupEmailThread row was added
assert GroupEmailThread.objects.count() == 1, "Should not have added a new row"
assert GroupEmailThread.objects.all()[0].msgid == "abc123", "msgid should not have changed"
def test_get_built_messages(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
reference=self.activity,
)
results = msg.get_built_messages(["foo@example.com"])
assert len(results) == 1
assert results[0].message()["Reply-To"] is None
def test_get_built_messages_reply_to(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
reference=self.activity,
)
results = msg.get_built_messages(
to=["foo@example.com", "bar@example.com"], reply_to=["abc123@sentry.io"]
)
assert len(results) == 2
assert results[0].message()["Reply-To"] == "abc123@sentry.io"
assert results[1].message()["Reply-To"] == "abc123@sentry.io"
def test_bcc_on_send(self) -> None:
msg = MessageBuilder(subject="Test", body="hello world")
msg.send(["foo@example.com"], bcc=["bar@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.to == ["foo@example.com"]
assert out.bcc == ["bar@example.com"]
def test_generates_list_ids_for_registered_types(self) -> None:
build_message = functools.partial(
MessageBuilder, subject="Test", body="hello world", html_body="<b>hello world</b>"
)
expected = "<{event.project.slug}.{event.organization.slug}.{namespace}>".format(
event=self.event, namespace=options.get("mail.list-namespace")
)
references = (self.event.group, self.event.project, self.activity)
for reference in references:
(message,) = build_message(reference=reference).get_built_messages(["foo@example.com"])
assert message.message()["List-Id"] == expected
def test_does_not_generates_list_ids_for_unregistered_types(self) -> None:
message = (
MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
reference=self.user,
)
.get_built_messages(["foo@example.com"])[0]
.message()
)
assert "List-Id" not in message
def test_stripped_newline(self) -> None:
msg = MessageBuilder(
subject="Foo\r\nBar", body="hello world", html_body="<b>hello world</b"
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == "Foo"
def test_adds_type_to_headers(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
headers={"X-Test": "foo"},
type="test_email.type",
)
msg.send(["foo@example.com"])
assert len(mail.outbox) == 1
out = mail.outbox[0]
assert out.to == ["foo@example.com"]
assert out.subject == "Test"
assert out.extra_headers["X-Test"] == "foo"
json_xsmtpapi_data = json.loads(out.extra_headers["X-SMTPAPI"])
assert json_xsmtpapi_data["category"] == "test_email.type"
def test_send_async_reply_to(self) -> None:
msg = MessageBuilder(
subject="Test",
body="hello world",
html_body="<b>hello world</b>",
from_email="from@sentry.io",
)
with self.tasks():
msg.send_async(["foo@example.com", "bar@example.com"], reply_to=["reply@sentry.io"])
outbox = mail.outbox
assert len(outbox) == 2
for email in outbox:
assert email.message()["Reply-To"] == "reply@sentry.io"
assert email.from_email == "from@sentry.io"
| MessageBuilderTest |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 71087,
"end": 74589
} | class ____(TestCase):
# The wsgiref validator asserts type(environ) is dict.
# https://mail.python.org/pipermail/web-sig/2016-March/005455.html
validator = None
def init_server(self, application):
super(TestEnviron, self).init_server(application)
self.server.environ_class = pywsgi.SecureEnviron
def application(self, env, start_response):
self.assertIsInstance(env, pywsgi.SecureEnviron)
start_response('200 OK', [('Content-Type', 'text/plain')])
return []
def test_environ_is_secure_by_default(self):
self.urlopen()
def test_default_secure_repr(self):
environ = pywsgi.SecureEnviron()
self.assertIn('<pywsgi.SecureEnviron dict (keys: 0) at', repr(environ))
self.assertIn('<pywsgi.SecureEnviron dict (keys: 0) at', str(environ))
environ['key'] = 'value'
self.assertIn('<pywsgi.SecureEnviron dict (keys: 1) at', repr(environ))
self.assertIn('<pywsgi.SecureEnviron dict (keys: 1) at', str(environ))
environ.secure_repr = False
self.assertEqual(str({'key': 'value'}), str(environ))
self.assertEqual(repr({'key': 'value'}), repr(environ))
del environ.secure_repr
environ.whitelist_keys = ('missing value',)
self.assertEqual(str({'key': "<MASKED>"}), str(environ))
self.assertEqual(repr({'key': "<MASKED>"}), repr(environ))
environ.whitelist_keys = ('key',)
self.assertEqual(str({'key': 'value'}), str(environ))
self.assertEqual(repr({'key': 'value'}), repr(environ))
del environ.whitelist_keys
def test_override_class_defaults(self):
class EnvironClass(pywsgi.SecureEnviron):
__slots__ = ()
environ = EnvironClass()
self.assertTrue(environ.secure_repr)
EnvironClass.default_secure_repr = False
self.assertFalse(environ.secure_repr)
self.assertEqual(str({}), str(environ))
self.assertEqual(repr({}), repr(environ))
EnvironClass.default_secure_repr = True
EnvironClass.default_whitelist_keys = ('key',)
environ['key'] = 1
self.assertEqual(str({'key': 1}), str(environ))
self.assertEqual(repr({'key': 1}), repr(environ))
# Clean up for leaktests
del environ
del EnvironClass
import gc; gc.collect()
def test_copy_still_secure(self):
for cls in (pywsgi.Environ, pywsgi.SecureEnviron):
self.assertIsInstance(cls().copy(), cls)
def test_pickle_copy_returns_dict(self):
# Anything going through copy.copy/pickle should
# return the same pickle that a dict would.
import pickle
import json
for cls in (pywsgi.Environ, pywsgi.SecureEnviron):
bltin = {'key': 'value'}
env = cls(bltin)
self.assertIsInstance(env, cls)
self.assertEqual(bltin, env)
self.assertEqual(env, bltin)
for protocol in range(0, pickle.HIGHEST_PROTOCOL + 1):
# It's impossible to get a subclass of dict to pickle
# identically, but it can restore identically
env_dump = pickle.dumps(env, protocol)
self.assertNotIn(b'Environ', env_dump)
loaded = pickle.loads(env_dump)
self.assertEqual(type(loaded), dict)
self.assertEqual(json.dumps(bltin), json.dumps(env))
if __name__ == '__main__':
greentest.main()
| TestEnviron |
python | keras-team__keras | keras/src/metrics/confusion_metrics.py | {
"start": 2938,
"end": 4609
} | class ____(_ConfusionMatrixConditionCount):
"""Calculates the number of false positives.
If `sample_weight` is given, calculates the sum of the weights of
false positives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of false positives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.FalsePositives()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.FalseNegatives")
| FalsePositives |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/utils.py | {
"start": 130,
"end": 571
} | class ____(BaseModel):
model_config = ConfigDict(
alias_generator=to_camel,
populate_by_name=True,
from_attributes=True,
)
def load_spec(spec_name: str, as_model: type[BaseModel]) -> list[BaseModel]:
with (Path(__file__).parent / "specifications" / f"{spec_name}.json").open(
"r", encoding="utf-8"
) as f:
data = json.load(f)
return [as_model(**item) for item in data]
| BaseSchema |
python | matplotlib__matplotlib | lib/matplotlib/testing/jpl_units/Duration.py | {
"start": 88,
"end": 3966
} | class ____:
"""Class Duration in development."""
allowed = ["ET", "UTC"]
def __init__(self, frame, seconds):
"""
Create a new Duration object.
= ERROR CONDITIONS
- If the input frame is not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the duration. Must be 'ET' or 'UTC'
- seconds The number of seconds in the Duration.
"""
_api.check_in_list(self.allowed, frame=frame)
self._frame = frame
self._seconds = seconds
def frame(self):
"""Return the frame the duration is in."""
return self._frame
def __abs__(self):
"""Return the absolute value of the duration."""
return Duration(self._frame, abs(self._seconds))
def __neg__(self):
"""Return the negative value of this Duration."""
return Duration(self._frame, -self._seconds)
def seconds(self):
"""Return the number of seconds in the Duration."""
return self._seconds
def __bool__(self):
return self._seconds != 0
def _cmp(self, op, rhs):
"""
Check that *self* and *rhs* share frames; compare them using *op*.
"""
self.checkSameFrame(rhs, "compare")
return op(self._seconds, rhs._seconds)
__eq__ = functools.partialmethod(_cmp, operator.eq)
__ne__ = functools.partialmethod(_cmp, operator.ne)
__lt__ = functools.partialmethod(_cmp, operator.lt)
__le__ = functools.partialmethod(_cmp, operator.le)
__gt__ = functools.partialmethod(_cmp, operator.gt)
__ge__ = functools.partialmethod(_cmp, operator.ge)
def __add__(self, rhs):
"""
Add two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to add.
= RETURN VALUE
- Returns the sum of ourselves and the input Duration.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
if isinstance(rhs, U.Epoch):
return rhs + self
self.checkSameFrame(rhs, "add")
return Duration(self._frame, self._seconds + rhs._seconds)
def __sub__(self, rhs):
"""
Subtract two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Duration.
"""
self.checkSameFrame(rhs, "sub")
return Duration(self._frame, self._seconds - rhs._seconds)
def __mul__(self, rhs):
"""
Scale a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration(self._frame, self._seconds * float(rhs))
__rmul__ = __mul__
def __str__(self):
"""Print the Duration."""
return f"{self._seconds:g} {self._frame}"
def __repr__(self):
"""Print the Duration."""
return f"Duration('{self._frame}', {self._seconds:g})"
def checkSameFrame(self, rhs, func):
"""
Check to see if frames are the same.
= ERROR CONDITIONS
- If the frame of the rhs Duration is not the same as our frame,
an error is thrown.
= INPUT VARIABLES
- rhs The Duration to check for the same frame
- func The name of the function doing the check.
"""
if self._frame != rhs._frame:
raise ValueError(
f"Cannot {func} Durations with different frames.\n"
f"LHS: {self._frame}\n"
f"RHS: {rhs._frame}")
| Duration |
python | huggingface__transformers | src/transformers/models/llava_next_video/configuration_llava_next_video.py | {
"start": 1282,
"end": 7978
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaNextVideoForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf)
model.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 32001):
The image token index to encode the image prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -2):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
video_token_index (`int`, *optional*, defaults to 32000):
The video token index to encode the image prompt.
spatial_pool_mode (`str`, *optional*, defaults to `"average"`):
Pooling mode to use for videos. Can be "average", "max" or "conv".
spatial_pool_stride (`int`, *optional*, defaults to 2):
Stride used in the pooling layer for videos.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
video_seq_length (`int`, *optional*, defaults to 288):
Sequence length of one video embedding.
Example:
```python
>>> from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
>>> # Initializing a CLIP-vision config
>>> vision_config = CLIPVisionConfig()
>>> # Initializing a Llama config
>>> text_config = LlamaConfig()
>>> configuration = LlavaNextVideoConfig(vision_config, text_config)
>>> model = LlavaNextVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_next_video"
attribute_map = {
"image_token_id": "image_token_index",
"video_token_id": "video_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=32001,
projector_hidden_act="gelu",
multimodal_projector_bias=True,
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
video_token_index=32000,
spatial_pool_mode="average",
spatial_pool_stride=2,
image_seq_length=576,
video_seq_length=288,
**kwargs,
):
self.video_token_index = video_token_index
self.spatial_pool_mode = spatial_pool_mode
self.spatial_pool_stride = spatial_pool_stride
self.image_seq_length = image_seq_length
self.video_seq_length = video_seq_length
self.image_token_index = image_token_index
self.projector_hidden_act = projector_hidden_act
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "clip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["clip_vision_model"](
intermediate_size=4096,
hidden_size=1024,
patch_size=14,
image_size=336,
num_hidden_layers=24,
num_attention_heads=16,
vocab_size=32000,
projection_dim=768,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "llama")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["llama"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
__all__ = ["LlavaNextVideoConfig"]
| LlavaNextVideoConfig |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1147367,
"end": 1148018
} | class ____(ScaleInvalidDataShowAstheta):
"""
ScaleInvalidDataShowAsValuetheta schema wrapper.
Parameters
----------
value : float
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"theta">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValuetheta |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 928403,
"end": 928913
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RemoveOutsideCollaborator"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "removed_user")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
removed_user = sgqlc.types.Field("User", graphql_name="removedUser")
"""The user that was removed as an outside collaborator."""
| RemoveOutsideCollaboratorPayload |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_logs/client.py | {
"start": 458,
"end": 1654
} | class ____(BaseClient):
def create_logs(self, logs: Iterable[Union["LogCreate", dict[str, Any]]]) -> None:
"""
Create logs for a flow or task run
"""
from prefect.client.schemas.actions import LogCreate
serialized_logs = [
log.model_dump(mode="json") if isinstance(log, LogCreate) else log
for log in logs
]
self.request("POST", "/logs/", json=serialized_logs)
def read_logs(
self,
log_filter: "LogFilter | None" = None,
limit: int | None = None,
offset: int | None = None,
sort: "LogSort | None" = None,
) -> list["Log"]:
"""
Read flow and task run logs.
"""
from prefect.client.schemas.sorting import LogSort
body: dict[str, Any] = {
"logs": log_filter.model_dump(mode="json") if log_filter else None,
"limit": limit,
"offset": offset,
"sort": sort or LogSort.TIMESTAMP_ASC,
}
response = self.request("POST", "/logs/filter", json=body)
from prefect.client.schemas.objects import Log
return Log.model_validate_list(response.json())
| LogClient |
python | PyCQA__bandit | tests/unit/cli/test_config_generator.py | {
"start": 1226,
"end": 3192
} | class ____(testtools.TestCase):
@mock.patch("sys.argv", ["bandit-config-generator"])
def test_parse_args_no_defaults(self):
# Without arguments, the generator should just show help and exit
self.assertRaises(SystemExit, config_generator.parse_args)
@mock.patch("sys.argv", ["bandit-config-generator", "--show-defaults"])
def test_parse_args_show_defaults(self):
# Test that the config generator does show default plugin settings
return_value = config_generator.parse_args()
self.assertTrue(return_value.show_defaults)
@mock.patch("sys.argv", ["bandit-config-generator", "--out", "dummyfile"])
def test_parse_args_out_file(self):
# Test config generator get proper output file when specified
return_value = config_generator.parse_args()
self.assertEqual("dummyfile", return_value.output_file)
def test_get_config_settings(self):
config = {}
for plugin in extension_loader.MANAGER.plugins:
function = plugin.plugin
if hasattr(plugin.plugin, "_takes_config"):
module = importlib.import_module(function.__module__)
config[plugin.name] = module.gen_config(function._takes_config)
settings = config_generator.get_config_settings()
self.assertEqual(
yaml.safe_dump(config, default_flow_style=False), settings
)
@mock.patch("sys.argv", ["bandit-config-generator", "--show-defaults"])
def test_main_show_defaults(self):
# Test that the config generator does show defaults and returns 0
with mock.patch(
"bandit.cli.config_generator.get_config_settings"
) as mock_config_settings:
return_value = config_generator.main()
# The get_config_settings function should have been called
self.assertTrue(mock_config_settings.called)
self.assertEqual(0, return_value)
| BanditConfigGeneratorTests |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 6008,
"end": 97203
} | class ____(TypedDict, total=False):
"""Configuration options for columns in ``st.dataframe`` and ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : str, bool, int, float, or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
hidden : bool or None
Whether to hide the column. This defaults to ``False``.
type_config : dict or str or None
Configure a column type and type specific options.
"""
label: str | None
width: ColumnWidth | None
help: str | None
hidden: bool | None
disabled: bool | None
required: bool | None
pinned: bool | None
default: str | bool | int | float | list[str] | None
alignment: Literal["left", "center", "right"] | None
type_config: (
NumberColumnConfig
| TextColumnConfig
| CheckboxColumnConfig
| SelectboxColumnConfig
| LinkColumnConfig
| ListColumnConfig
| DatetimeColumnConfig
| DateColumnConfig
| TimeColumnConfig
| ProgressColumnConfig
| LineChartColumnConfig
| BarChartColumnConfig
| AreaChartColumnConfig
| ImageColumnConfig
| MultiselectColumnConfig
| JsonColumnConfig
| None
)
@gather_metrics("column_config.Column")
def Column(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
) -> ColumnConfig:
"""Configure a generic column in ``st.dataframe`` or ``st.data_editor``.
The type of the column will be automatically inferred from the data type.
This command needs to be used in the ``column_config`` parameter of ``st.dataframe``
or ``st.data_editor``.
To change the type of the column and enable type-specific configuration options,
use one of the column types in the ``st.column_config`` namespace,
e.g. ``st.column_config.NumberColumn``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "widgets": st.column_config.Column(
>>> "Streamlit Widgets",
>>> help="Streamlit **widget** commands 🎈",
>>> width="medium",
>>> required=True,
>>> )
>>> },
>>> hide_index=True,
>>> num_rows="dynamic",
>>> )
.. output::
https://doc-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
)
@gather_metrics("column_config.NumberColumn")
def NumberColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: int | float | None = None,
format: str | NumberFormat | None = None,
min_value: int | float | None = None,
max_value: int | float | None = None,
step: int | float | None = None,
) -> ColumnConfig:
"""Configure a number column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for integer and float values. This command needs to
be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a numeric input widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : int, float, or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
format : str, "plain", "localized", "percent", "dollar", "euro", "yen", "accounting", "compact", "scientific", "engineering", or None
A format string controlling how numbers are displayed.
This can be one of the following values:
- ``None`` (default): Streamlit infers the formatting from the data.
- ``"plain"``: Show the full number without any formatting (e.g. "1234.567").
- ``"localized"``: Show the number in the default locale format (e.g. "1,234.567").
- ``"percent"``: Show the number as a percentage (e.g. "123456.70%").
- ``"dollar"``: Show the number as a dollar amount (e.g. "$1,234.57").
- ``"euro"``: Show the number as a euro amount (e.g. "€1,234.57").
- ``"yen"``: Show the number as a yen amount (e.g. "¥1,235").
- ``"accounting"``: Show the number in an accounting format (e.g. "1,234.00").
- ``"bytes"``: Show the number in a byte format (e.g. "1.2KB").
- ``"compact"``: Show the number in a compact format (e.g. "1.2K").
- ``"scientific"``: Show the number in scientific notation (e.g. "1.235E3").
- ``"engineering"``: Show the number in engineering notation (e.g. "1.235E3").
- printf-style format string: Format the number with a printf
specifier, like ``"%d"`` to show a signed integer (e.g. "1234") or
``"%X"`` to show an unsigned hexadecimal integer (e.g. "4D2"). You
can also add prefixes and suffixes. To show British pounds, use
``"£ %.2f"`` (e.g. "£ 1234.57"). For more information, see `sprint-js
<https://github.com/alexei/sprintf.js?tab=readme-ov-file#format-specification>`_.
Formatting from ``column_config`` always takes precedence over
formatting from ``pandas.Styler``. The formatting does not impact the
return value when used in ``st.data_editor``.
min_value : int, float, or None
The minimum value that can be entered. If this is ``None`` (default),
there will be no minimum.
max_value : int, float, or None
The maximum value that can be entered. If this is ``None`` (default),
there will be no maximum.
step : int, float, or None
The precision of numbers that can be entered. If this ``None``
(default), integer columns will have a step of 1 and float columns will
have unrestricted precision. In this case, some floats may display like
integers. Setting ``step`` for float columns will ensure a consistent
number of digits after the decimal are displayed.
If ``format`` is a predefined format like ``"dollar"``, ``step``
overrides the display precision. If ``format`` is a printf-style format
string, ``step`` will not change the display precision.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "price": [20, 950, 250, 500],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "price": st.column_config.NumberColumn(
>>> "Price (in USD)",
>>> help="The price of the product in USD",
>>> min_value=0,
>>> max_value=1000,
>>> step=1,
>>> format="$%d",
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-number-column.streamlit.app/
height: 300px
""" # noqa: E501
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=default,
type_config=NumberColumnConfig(
type="number",
min_value=min_value,
max_value=max_value,
format=format,
step=step,
),
)
@gather_metrics("column_config.TextColumn")
def TextColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: str | None = None,
max_chars: int | None = None,
validate: str | None = None,
) -> ColumnConfig:
r"""Configure a text column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for string values. This command needs to be used in the
``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used with
``st.data_editor``, editing will be enabled with a text input widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : str or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
max_chars : int or None
The maximum number of characters that can be entered. If this is
``None`` (default), there will be no maximum.
validate : str or None
A JS-flavored regular expression (e.g. ``"^[a-z]+$"``) that edited
values are validated against. If the user input is invalid, it will not
be submitted.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "widgets": st.column_config.TextColumn(
>>> "Widgets",
>>> help="Streamlit **widget** commands 🎈",
>>> default="st.",
>>> max_chars=50,
>>> validate=r"^st\.[a-z_]+$",
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-text-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=default,
type_config=TextColumnConfig(
type="text", max_chars=max_chars, validate=validate
),
)
@gather_metrics("column_config.LinkColumn")
def LinkColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: str | None = None,
max_chars: int | None = None,
validate: str | None = None,
display_text: str | None = None,
) -> ColumnConfig:
r"""Configure a link column in ``st.dataframe`` or ``st.data_editor``.
The cell values need to be string and will be shown as clickable links.
This command needs to be used in the column_config parameter of ``st.dataframe``
or ``st.data_editor``. When used with ``st.data_editor``, editing will be enabled
with a text input widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : str or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
max_chars : int or None
The maximum number of characters that can be entered. If this is
``None`` (default), there will be no maximum.
validate : str or None
A JS-flavored regular expression (e.g. ``"^https://.+$"``) that edited
values are validated against. If the user input is invalid, it will not
be submitted.
display_text : str or None
The text that is displayed in the cell. This can be one of the
following:
- ``None`` (default) to display the URL itself.
- A string that is displayed in every cell, e.g. ``"Open link"``.
- A Material icon that is displayed in every cell, e.g. ``":material/open_in_new:"``.
- A JS-flavored regular expression (detected by usage of parentheses)
to extract a part of the URL via a capture group. For example, use
``"https://(.*?)\.example\.com"`` to extract the display text
"foo" from the URL "\https://foo.example.com".
.. Comment: The backslash in front of foo.example.com prevents a hyperlink in docs.
For more complex cases, you may use `Pandas Styler's format
<https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.format.html>`_
function on the underlying dataframe. Note that this makes the app slow,
doesn't work with editable columns, and might be removed in the future.
Text formatting from ``column_config`` always takes precedence over
text formatting from ``pandas.Styler``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "apps": [
>>> "https://roadmap.streamlit.app",
>>> "https://extras.streamlit.app",
>>> "https://issues.streamlit.app",
>>> "https://30days.streamlit.app",
>>> ],
>>> "creator": [
>>> "https://github.com/streamlit",
>>> "https://github.com/arnaudmiribel",
>>> "https://github.com/streamlit",
>>> "https://github.com/streamlit",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "apps": st.column_config.LinkColumn(
>>> "Trending apps",
>>> help="The top trending Streamlit apps",
>>> validate=r"^https://[a-z]+\.streamlit\.app$",
>>> max_chars=100,
>>> display_text=r"https://(.*?)\.streamlit\.app"
>>> ),
>>> "creator": st.column_config.LinkColumn(
>>> "App Creator", display_text="Open profile"
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-link-column.streamlit.app/
height: 300px
"""
if display_text and display_text.startswith(":material/"):
display_text = validate_material_icon(display_text)
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=default,
type_config=LinkColumnConfig(
type="link",
max_chars=max_chars,
validate=validate,
display_text=display_text,
),
)
@gather_metrics("column_config.CheckboxColumn")
def CheckboxColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: bool | None = None,
) -> ColumnConfig:
"""Configure a checkbox column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for boolean values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a checkbox widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : bool or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "widgets": ["st.selectbox", "st.number_input", "st.text_area", "st.button"],
>>> "favorite": [True, False, False, True],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "favorite": st.column_config.CheckboxColumn(
>>> "Your favorite?",
>>> help="Select your **favorite** widgets",
>>> default=False,
>>> )
>>> },
>>> disabled=["widgets"],
>>> hide_index=True,
>>> )
.. output::
https://doc-checkbox-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=default,
type_config=CheckboxColumnConfig(type="checkbox"),
)
@gather_metrics("column_config.SelectboxColumn")
def SelectboxColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: SelectboxOptionValue | None = None,
options: Iterable[SelectboxOptionValue] | None = None,
format_func: Callable[[SelectboxOptionValue], str] | None = None,
) -> ColumnConfig:
"""Configure a selectbox column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for Pandas categorical values. This command needs to
be used in the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
When used with ``st.data_editor``, editing will be enabled with a selectbox widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : str, int, float, bool, or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
options : Iterable[str, int, float, bool] or None
The options that can be selected during editing. If this is ``None``
(default), the options will be inferred from the underlying dataframe
column if its dtype is "category". For more information, see `Pandas docs
<https://pandas.pydata.org/docs/user_guide/categorical.html>`_).
format_func : function or None
Function to modify the display of the options. It receives
the raw option defined in ``options`` as an argument and should output
the label to be shown for that option. If this is ``None`` (default),
the raw option is used as the label.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "category": [
>>> "📊 Data Exploration",
>>> "📈 Data Visualization",
>>> "🤖 LLM",
>>> "📊 Data Exploration",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "category": st.column_config.SelectboxColumn(
>>> "App Category",
>>> help="The category of the app",
>>> width="medium",
>>> options=[
>>> "📊 Data Exploration",
>>> "📈 Data Visualization",
>>> "🤖 LLM",
>>> ],
>>> required=True,
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-selectbox-column.streamlit.app/
height: 300px
"""
# Process options with format_func
processed_options: Iterable[str | int | float | SelectboxOption] | None = options
if options and format_func is not None:
processed_options = []
for option in options:
processed_options.append(
SelectboxOption(value=option, label=format_func(option))
)
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=default,
type_config=SelectboxColumnConfig(
type="selectbox",
options=list(processed_options) if processed_options is not None else None,
),
)
@gather_metrics("column_config.BarChartColumn")
def BarChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
color: ChartColor | None = None,
) -> ColumnConfig:
"""Configure a bar chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned: bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
y_min : int, float, or None
The minimum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the minimum of its data.
y_max : int, float, or None
The maximum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the maximum of its data.
color : "auto", "auto-inverse", str, or None
The color to use for the chart. This can be one of the following:
- ``None`` (default): The primary color is used.
- ``"auto"``: If the data is increasing, the chart is green; if the
data is decreasing, the chart is red.
- ``"auto-inverse"``: If the data is increasing, the chart is red; if
the data is decreasing, the chart is green.
- A single color value that is applied to all charts in the column.
In addition to the basic color palette (red, orange, yellow, green,
blue, violet, gray/grey, and primary), this supports hex codes like
``"#483d8b"``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.BarChartColumn(
>>> "Sales (last 6 months)",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-barchart-column.streamlit.app/
height: 300px
"""
if color is not None:
_validate_chart_color(color)
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=BarChartColumnConfig(
type="bar_chart", y_min=y_min, y_max=y_max, color=color
),
)
@gather_metrics("column_config.LineChartColumn")
def LineChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
color: ChartColor | None = None,
) -> ColumnConfig:
"""Configure a line chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
y_min : int, float, or None
The minimum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the minimum of its data.
y_max : int, float, or None
The maximum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the maximum of its data.
color : "auto", "auto-inverse", str, or None
The color to use for the chart. This can be one of the following:
- ``None`` (default): The primary color is used.
- ``"auto"``: If the data is increasing, the chart is green; if the
data is decreasing, the chart is red.
- ``"auto-inverse"``: If the data is increasing, the chart is red; if
the data is decreasing, the chart is green.
- A single color value that is applied to all charts in the column.
In addition to the basic color palette (red, orange, yellow, green,
blue, violet, gray/grey, and primary), this supports hex codes like
``"#483d8b"``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.LineChartColumn(
>>> "Sales (last 6 months)",
>>> width="medium",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-linechart-column.streamlit.app/
height: 300px
"""
if color is not None:
_validate_chart_color(color)
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=LineChartColumnConfig(
type="line_chart", y_min=y_min, y_max=y_max, color=color
),
)
@gather_metrics("column_config.AreaChartColumn")
def AreaChartColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
y_min: int | float | None = None,
y_max: int | float | None = None,
color: ChartColor | None = None,
) -> ColumnConfig:
"""Configure an area chart column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a list of numbers. Chart columns are not editable
at the moment. This command needs to be used in the ``column_config`` parameter
of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
y_min : int, float, or None
The minimum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the minimum of its data.
y_max : int, float, or None
The maximum value on the y-axis for all cells in the column. If this is
``None`` (default), every cell will use the maximum of its data.
color : "auto", "auto-inverse", str, or None
The color to use for the chart. This can be one of the following:
- ``None`` (default): The primary color is used.
- ``"auto"``: If the data is increasing, the chart is green; if the
data is decreasing, the chart is red.
- ``"auto-inverse"``: If the data is increasing, the chart is red; if
the data is decreasing, the chart is green.
- A single color value that is applied to all charts in the column.
In addition to the basic color palette (red, orange, yellow, green,
blue, violet, gray/grey, and primary), this supports hex codes like
``"#483d8b"``.
The basic color palette can be configured in the theme settings.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.AreaChartColumn(
>>> "Sales (last 6 months)",
>>> width="medium",
>>> help="The sales volume in the last 6 months",
>>> y_min=0,
>>> y_max=100,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-areachart-column.streamlit.app/
height: 300px
"""
if color is not None:
_validate_chart_color(color)
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=AreaChartColumnConfig(
type="area_chart", y_min=y_min, y_max=y_max, color=color
),
)
@gather_metrics("column_config.ImageColumn")
def ImageColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
) -> ColumnConfig:
"""Configure an image column in ``st.dataframe`` or ``st.data_editor``.
The cell values need to be one of:
* A URL to fetch the image from. This can also be a relative URL of an image
deployed via `static file serving <https://docs.streamlit.io/develop/concepts/configuration/serving-static-files>`_.
Note that you can NOT use an arbitrary local image if it is not available through
a public URL.
* A data URL containing an SVG XML like ``data:image/svg+xml;utf8,<svg xmlns=...</svg>``.
* A data URL containing a Base64 encoded image like ``data:image/png;base64,iVBO...``.
Image columns are not editable at the moment. This command needs to be used in the
``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "apps": [
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/5435b8cb-6c6c-490b-9608-799b543655d3/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/ef9a7627-13f2-47e5-8f65-3f69bb38a5c2/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/31b99099-8eae-4ff8-aa89-042895ed3843/Home_Page.png",
>>> "https://storage.googleapis.com/s4a-prod-share-preview/default/st_app_screenshot_image/6a399b09-241e-4ae7-a31f-7640dc1d181e/Home_Page.png",
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "apps": st.column_config.ImageColumn(
>>> "Preview Image", help="Streamlit app preview screenshots"
>>> )
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-image-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=ImageColumnConfig(type="image"),
)
@gather_metrics("column_config.ListColumn")
def ListColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
disabled: bool | None = None,
required: bool | None = None,
default: Iterable[str] | None = None,
) -> ColumnConfig:
"""Configure a list column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for list-like values. This command needs to
be used in the ``column_config`` parameter of ``st.dataframe`` or
``st.data_editor``. When used with ``st.data_editor``, users can freely
type in new options and remove existing ones.
.. Note::
Editing for non-string or mixed type lists can cause issues with Arrow
serialization. We recommend that you disable editing for these columns
or convert all list values to strings.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
default : Iterable of str or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [
>>> [0, 4, 26, 80, 100, 40],
>>> [80, 20, 80, 35, 40, 100],
>>> [10, 20, 80, 80, 70, 0],
>>> [10, 100, 20, 100, 30, 100],
>>> ],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.ListColumn(
>>> "Sales (last 6 months)",
>>> help="The sales volume in the last 6 months",
>>> width="medium",
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-list-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
disabled=disabled,
required=required,
default=None if default is None else list(default),
type_config=ListColumnConfig(type="list"),
)
@gather_metrics("column_config.MultiselectColumn")
def MultiselectColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: Iterable[str] | None = None,
options: Iterable[str] | None = None,
accept_new_options: bool | None = None,
color: str
| Literal["auto"]
| ThemeColor
| Iterable[str | ThemeColor]
| None = None,
format_func: Callable[[str], str] | None = None,
) -> ColumnConfig:
"""Configure a multiselect column in ``st.dataframe`` or ``st.data_editor``.
This command needs to be used in the ``column_config`` parameter of
``st.dataframe`` or ``st.data_editor``. When used with ``st.data_editor``,
users can select options from a dropdown menu. You can configure the
column to allow freely typed options, too.
You can also use this column type to display colored labels in a read-only
``st.dataframe``.
.. Note::
Editing for non-string or mixed type lists can cause issues with Arrow
serialization. We recommend that you disable editing for these columns
or convert all list values to strings.
Parameters
----------
label : str or None
The label shown at the top of the column. If None (default),
the column name is used.
width : "small", "medium", "large", or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. Defaults to False.
required : bool or None
Whether edited cells in the column need to have a value. If True, an edited cell
can only be submitted if it has a value other than None. Defaults to False.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : Iterable of str or None
Specifies the default value in this column when a new row is added by the user.
options : Iterable of str or None
The options that can be selected during editing.
accept_new_options : bool or None
Whether the user can add selections that aren't included in ``options``.
If this is ``False`` (default), the user can only select from the
items in ``options``. If this is ``True``, the user can enter new
items that don't exist in ``options``.
When a user enters and selects a new item, it is included in the
returned cell list value as a string. The new item is not added to
the options drop-down menu.
color : str, Iterable of str, or None
The color to use for different options. This can be:
- None (default): The options are displayed without color.
- ``"auto"``: The options are colored based on the configured categorical chart colors.
- A single color value that is used for all options. This can be one of
the following strings:
- ``"primary"`` to use the primary theme color.
- A CSS named color name like ``"darkBlue"`` or ``"maroon"``.
- A hex color code like ``"#483d8b"`` or ``"#6A5ACD80"``.
- An RGB or RGBA color code like ``"rgb(255,0,0)"`` or
``"RGB(70, 130, 180, .7)"``.
- An HSL or HSLA color code like ``"hsl(248, 53%, 58%)"``
or ``"HSL(147, 50%, 47%, .3)"``.
- An iterable of color values that are mapped to the options. The colors
are applied in sequence, cycling through the iterable if there are
more options than colors.
format_func : function or None
Function to modify the display of the options. It receives
the raw option defined in ``options`` as an argument and should output
the label to be shown for that option. When used in ``st.data_editor``,
this has no impact on the returned value. If this is ``None``
(default), the raw option is used as the label.
Examples
--------
**Example 1: Editable multiselect column**
To customize the label colors, provide a list of colors to the ``color``
parameter. You can also format the option labels with the ``format_func``
parameter.
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
... {
... "category": [
... ["exploration", "visualization"],
... ["llm", "visualization"],
... ["exploration"],
... ],
... }
... )
>>>
>>> st.data_editor(
... data_df,
... column_config={
... "category": st.column_config.MultiselectColumn(
... "App Categories",
... help="The categories of the app",
... options=[
... "exploration",
... "visualization",
... "llm",
... ],
... color=["#ffa421", "#803df5", "#00c0f2"],
... format_func=lambda x: x.capitalize(),
... ),
... },
... )
.. output::
https://doc-multiselect-column-1.streamlit.app/
height: 300px
**Example 2: Colored tags for st.dataframe**
When using ``st.dataframe``, the multiselect column is read-only
and can be used to display colored tags. In this example, the dataframe
uses the primary theme color for all tags.
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
... {
... "category": [
... ["exploration", "visualization"],
... ["llm", "visualization"],
... ["exploration"],
... ],
... }
... )
>>>
>>> st.dataframe(
... data_df,
... column_config={
... "category": st.column_config.MultiselectColumn(
... "App Categories",
... options=["exploration", "visualization", "llm"],
... color="primary",
... format_func=lambda x: x.capitalize(),
... ),
... },
... )
.. output::
https://doc-multiselect-column-2.streamlit.app/
height: 300px
"""
# Process options with color and format_func:
processed_options: list[MultiselectOption] | None = None
if options is not None:
processed_options = []
# Convert color to an iterator
color_iter: Iterator[str] | None = None
if color is not None:
if isinstance(color, str):
# Single color for all options
color_iter = itertools.repeat(color)
else:
# Iterable of colors - cycle through them
color_iter = itertools.cycle(color)
for option in options:
# Start with the option value
option_dict = MultiselectOption(value=option)
# Apply format_func to generate label if not already present
if format_func is not None:
option_dict["label"] = format_func(option_dict["value"])
# Apply color if provided and not already present
if color_iter is not None and "color" not in option_dict:
option_dict["color"] = next(color_iter)
processed_options.append(option_dict)
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=None if default is None else list(default),
type_config=MultiselectColumnConfig(
type="multiselect",
options=processed_options,
accept_new_options=accept_new_options,
),
)
@gather_metrics("column_config.DatetimeColumn")
def DatetimeColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: datetime.datetime | None = None,
format: str | Literal["localized", "distance", "calendar", "iso8601"] | None = None,
min_value: datetime.datetime | None = None,
max_value: datetime.datetime | None = None,
step: int | float | datetime.timedelta | None = None,
timezone: str | None = None,
) -> ColumnConfig:
"""Configure a datetime column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for datetime values. This command needs to be
used in the ``column_config`` parameter of ``st.dataframe`` or
``st.data_editor``. When used with ``st.data_editor``, editing will be enabled
with a datetime picker widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : datetime.datetime or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
format : str, "localized", "distance", "calendar", "iso8601", or None
A format string controlling how datetimes are displayed.
This can be one of the following values:
- ``None`` (default): Show the datetime in ``"YYYY-MM-DD HH:mm:ss"``
format (e.g. "2025-03-04 20:00:00").
- ``"localized"``: Show the datetime in the default locale format (e.g.
"Mar 4, 2025, 12:00:00 PM" in the America/Los_Angeles timezone).
- ``"distance"``: Show the datetime in a relative format (e.g.
"a few seconds ago").
- ``"calendar"``: Show the datetime in a calendar format (e.g.
"Today at 8:00 PM").
- ``"iso8601"``: Show the datetime in ISO 8601 format (e.g.
"2025-03-04T20:00:00.000Z").
- A momentJS format string: Format the datetime with a string, like
``"ddd ha"`` to show "Tue 8pm". For available formats, see
`momentJS <https://momentjs.com/docs/#/displaying/format/>`_.
Formatting from ``column_config`` always takes precedence over
formatting from ``pandas.Styler``. The formatting does not impact the
return value when used in ``st.data_editor``.
min_value : datetime.datetime or None
The minimum datetime that can be entered. If this is ``None``
(default), there will be no minimum.
max_value : datetime.datetime or None
The maximum datetime that can be entered. If this is ``None``
(default), there will be no maximum.
step : int, float, datetime.timedelta, or None
The stepping interval in seconds. If this is ``None`` (default), the
step will be 1 second.
timezone : str or None
The timezone of this column. If this is ``None`` (default), the
timezone is inferred from the underlying data.
Examples
--------
>>> from datetime import datetime
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "appointment": [
>>> datetime(2024, 2, 5, 12, 30),
>>> datetime(2023, 11, 10, 18, 0),
>>> datetime(2024, 3, 11, 20, 10),
>>> datetime(2023, 9, 12, 3, 0),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "appointment": st.column_config.DatetimeColumn(
>>> "Appointment",
>>> min_value=datetime(2023, 6, 1),
>>> max_value=datetime(2025, 1, 1),
>>> format="D MMM YYYY, h:mm a",
>>> step=60,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-datetime-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=None if default is None else default.isoformat(),
type_config=DatetimeColumnConfig(
type="datetime",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step.total_seconds() if isinstance(step, datetime.timedelta) else step,
timezone=timezone,
),
)
@gather_metrics("column_config.TimeColumn")
def TimeColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: datetime.time | None = None,
format: str | Literal["localized", "iso8601"] | None = None,
min_value: datetime.time | None = None,
max_value: datetime.time | None = None,
step: int | float | datetime.timedelta | None = None,
) -> ColumnConfig:
"""Configure a time column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for time values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When
used with ``st.data_editor``, editing will be enabled with a time picker widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : datetime.time or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
format : str, "localized", "iso8601", or None
A format string controlling how times are displayed.
This can be one of the following values:
- ``None`` (default): Show the time in ``"HH:mm:ss"`` format (e.g.
"20:00:00").
- ``"localized"``: Show the time in the default locale format (e.g.
"12:00:00 PM" in the America/Los_Angeles timezone).
- ``"iso8601"``: Show the time in ISO 8601 format (e.g.
"20:00:00.000Z").
- A momentJS format string: Format the time with a string, like
``"ha"`` to show "8pm". For available formats, see
`momentJS <https://momentjs.com/docs/#/displaying/format/>`_.
Formatting from ``column_config`` always takes precedence over
formatting from ``pandas.Styler``. The formatting does not impact the
return value when used in ``st.data_editor``.
min_value : datetime.time or None
The minimum time that can be entered. If this is ``None`` (default),
there will be no minimum.
max_value : datetime.time or None
The maximum time that can be entered. If this is ``None`` (default),
there will be no maximum.
step : int, float, datetime.timedelta, or None
The stepping interval in seconds. If this is ``None`` (default), the
step will be 1 second.
Examples
--------
>>> from datetime import time
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "appointment": [
>>> time(12, 30),
>>> time(18, 0),
>>> time(9, 10),
>>> time(16, 25),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "appointment": st.column_config.TimeColumn(
>>> "Appointment",
>>> min_value=time(8, 0, 0),
>>> max_value=time(19, 0, 0),
>>> format="hh:mm a",
>>> step=60,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-time-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=None if default is None else default.isoformat(),
type_config=TimeColumnConfig(
type="time",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step.total_seconds() if isinstance(step, datetime.timedelta) else step,
),
)
@gather_metrics("column_config.DateColumn")
def DateColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
disabled: bool | None = None,
required: bool | None = None,
pinned: bool | None = None,
default: datetime.date | None = None,
format: str | Literal["localized", "distance", "iso8601"] | None = None,
min_value: datetime.date | None = None,
max_value: datetime.date | None = None,
step: int | None = None,
) -> ColumnConfig:
"""Configure a date column in ``st.dataframe`` or ``st.data_editor``.
This is the default column type for date values. This command needs to be used in
the ``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``. When used
with ``st.data_editor``, editing will be enabled with a date picker widget.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
disabled : bool or None
Whether editing should be disabled for this column. If this is ``None``
(default), Streamlit will enable editing wherever possible.
If a column has mixed types, it may become uneditable regardless of
``disabled``.
required : bool or None
Whether edited cells in the column need to have a value. If this is
``False`` (default), the user can submit empty values for this column.
If this is ``True``, an edited cell in this column can only be
submitted if its value is not ``None``, and a new row will only be
submitted after the user fills in this column.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
default : datetime.date or None
Specifies the default value in this column when a new row is added by
the user. This defaults to ``None``.
format : str, "localized", "distance", "iso8601", or None
A format string controlling how dates are displayed.
This can be one of the following values:
- ``None`` (default): Show the date in ``"YYYY-MM-DD"`` format (e.g.
"2025-03-04").
- ``"localized"``: Show the date in the default locale format (e.g.
"Mar 4, 2025" in the America/Los_Angeles timezone).
- ``"distance"``: Show the date in a relative format (e.g.
"a few seconds ago").
- ``"iso8601"``: Show the date in ISO 8601 format (e.g.
"2025-03-04").
- A momentJS format string: Format the date with a string, like
``"ddd, MMM Do"`` to show "Tue, Mar 4th". For available formats, see
`momentJS <https://momentjs.com/docs/#/displaying/format/>`_.
Formatting from ``column_config`` always takes precedence over
formatting from ``pandas.Styler``. The formatting does not impact the
return value when used in ``st.data_editor``.
min_value : datetime.date or None
The minimum date that can be entered. If this is ``None`` (default),
there will be no minimum.
max_value : datetime.date or None
The maximum date that can be entered. If this is ``None`` (default),
there will be no maximum.
step : int or None
The stepping interval in days. If this is ``None`` (default), the step
will be 1 day.
Examples
--------
>>> from datetime import date
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "birthday": [
>>> date(1980, 1, 1),
>>> date(1990, 5, 3),
>>> date(1974, 5, 19),
>>> date(2001, 8, 17),
>>> ]
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "birthday": st.column_config.DateColumn(
>>> "Birthday",
>>> min_value=date(1900, 1, 1),
>>> max_value=date(2005, 1, 1),
>>> format="DD.MM.YYYY",
>>> step=1,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-date-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
disabled=disabled,
required=required,
pinned=pinned,
default=None if default is None else default.isoformat(),
type_config=DateColumnConfig(
type="date",
format=format,
min_value=None if min_value is None else min_value.isoformat(),
max_value=None if max_value is None else max_value.isoformat(),
step=step,
),
)
@gather_metrics("column_config.ProgressColumn")
def ProgressColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
format: str | NumberFormat | None = None,
min_value: int | float | None = None,
max_value: int | float | None = None,
step: int | float | None = None,
color: ChartColor | None = None,
) -> ColumnConfig:
"""Configure a progress column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain a number. Progress columns are not editable at the moment.
This command needs to be used in the ``column_config`` parameter of ``st.dataframe``
or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
format : str, "plain", "localized", "percent", "dollar", "euro", "yen", "accounting", "compact", "scientific", "engineering", or None
A format string controlling how the numbers are displayed.
This can be one of the following values:
- ``None`` (default): Streamlit infers the formatting from the data.
- ``"plain"``: Show the full number without any formatting (e.g. "1234.567").
- ``"localized"``: Show the number in the default locale format (e.g. "1,234.567").
- ``"percent"``: Show the number as a percentage (e.g. "123456.70%").
- ``"dollar"``: Show the number as a dollar amount (e.g. "$1,234.57").
- ``"euro"``: Show the number as a euro amount (e.g. "€1,234.57").
- ``"yen"``: Show the number as a yen amount (e.g. "¥1,235").
- ``"accounting"``: Show the number in an accounting format (e.g. "1,234.00").
- ``"bytes"``: Show the number in a byte format (e.g. "1.2KB").
- ``"compact"``: Show the number in a compact format (e.g. "1.2K").
- ``"scientific"``: Show the number in scientific notation (e.g. "1.235E3").
- ``"engineering"``: Show the number in engineering notation (e.g. "1.235E3").
- printf-style format string: Format the number with a printf
specifier, like ``"%d"`` to show a signed integer (e.g. "1234") or
``"%X"`` to show an unsigned hexadecimal integer (e.g. "4D2"). You
can also add prefixes and suffixes. To show British pounds, use
``"£ %.2f"`` (e.g. "£ 1234.57"). For more information, see `sprint-js
<https://github.com/alexei/sprintf.js?tab=readme-ov-file#format-specification>`_.
Number formatting from ``column_config`` always takes precedence over
number formatting from ``pandas.Styler``. The number formatting does
not impact the return value when used in ``st.data_editor``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
min_value : int, float, or None
The minimum value of the progress bar. If this is ``None`` (default),
the minimum will be 0.
max_value : int, float, or None
The maximum value of the progress bar. If this is ``None`` (default),
the maximum will be 100 for integer values and 1.0 for float values.
step : int, float, or None
The precision of numbers. If this is ``None`` (default), integer columns
will have a step of 1 and float columns will have a step of 0.01.
Setting ``step`` for float columns will ensure a consistent number of
digits after the decimal are displayed.
color : "auto", "auto-inverse", str, or None
The color to use for the chart. This can be one of the following:
- ``None`` (default): The primary color is used.
- ``"auto"``: If the value is more than half, the bar is green; if the
value is less than half, the bar is red.
- ``"auto-inverse"``: If the value is more than half, the bar is red;
if the value is less than half, the bar is green.
- A single color value that is applied to all charts in the column.
In addition to the basic color palette (red, orange, yellow, green,
blue, violet, gray/grey, and primary), this supports hex codes like
``"#483d8b"``.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "sales": [200, 550, 1000, 80],
>>> }
>>> )
>>>
>>> st.data_editor(
>>> data_df,
>>> column_config={
>>> "sales": st.column_config.ProgressColumn(
>>> "Sales volume",
>>> help="The sales volume in USD",
>>> format="$%f",
>>> min_value=0,
>>> max_value=1000,
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-progress-column.streamlit.app/
height: 300px
""" # noqa: E501
if color is not None:
_validate_chart_color(color)
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=ProgressColumnConfig(
type="progress",
format=format,
min_value=min_value,
max_value=max_value,
step=step,
color=color,
),
)
@gather_metrics("column_config.JsonColumn")
def JsonColumn(
label: str | None = None,
*,
width: ColumnWidth | None = None,
help: str | None = None,
pinned: bool | None = None,
) -> ColumnConfig:
"""Configure a JSON column in ``st.dataframe`` or ``st.data_editor``.
Cells need to contain JSON strings or JSON-compatible objects. JSON columns
are not editable at the moment. This command needs to be used in the
``column_config`` parameter of ``st.dataframe`` or ``st.data_editor``.
Parameters
----------
label : str or None
The label shown at the top of the column. If this is ``None``
(default), the column name is used.
width : "small", "medium", "large", int, or None
The display width of the column. If this is ``None`` (default), the
column will be sized to fit the cell contents. Otherwise, this can be
one of the following:
- ``"small"``: 75px wide
- ``"medium"``: 200px wide
- ``"large"``: 400px wide
- An integer specifying the width in pixels
If the total width of all columns is less than the width of the
dataframe, the remaining space will be distributed evenly among all
columns.
help : str or None
A tooltip that gets displayed when hovering over the column label. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown, including
the Markdown directives described in the ``body`` parameter of
``st.markdown``.
pinned : bool or None
Whether the column is pinned. A pinned column will stay visible on the
left side no matter where the user scrolls. If this is ``None``
(default), Streamlit will decide: index columns are pinned, and data
columns are not pinned.
Examples
--------
>>> import pandas as pd
>>> import streamlit as st
>>>
>>> data_df = pd.DataFrame(
>>> {
>>> "json": [
>>> {"foo": "bar", "bar": "baz"},
>>> {"foo": "baz", "bar": "qux"},
>>> {"foo": "qux", "bar": "foo"},
>>> None,
>>> ],
>>> }
>>> )
>>>
>>> st.dataframe(
>>> data_df,
>>> column_config={
>>> "json": st.column_config.JsonColumn(
>>> "JSON Data",
>>> help="JSON strings or objects",
>>> width="large",
>>> ),
>>> },
>>> hide_index=True,
>>> )
.. output::
https://doc-json-column.streamlit.app/
height: 300px
"""
return ColumnConfig(
label=label,
width=width,
help=help,
pinned=pinned,
type_config=JsonColumnConfig(type="json"),
)
| ColumnConfig |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 14137,
"end": 15065
} | class ____(Benchmark):
param_names = ['sparse_type', 'density', 'format']
params = [
['spmatrix', 'sparray'],
[0.01, 0.1, 0.5],
['csr', 'csc', 'coo', 'lil', 'dok', 'dia'],
]
def setup(self, sparse_type, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
if sparse_type == "sparray":
self.X = sparse.random_array((n, n), format=format, density=density)
else:
self.X = sparse.random(n, n, format=format, density=density)
def time_diagonal(self, sparse_type, density, format):
self.X.diagonal()
# Retain old benchmark results (remove this if changing the benchmark)
time_diagonal.version = (
"d84f53fdc6abc208136c8ce48ca156370f6803562f6908eb6bd1424f50310cf1"
)
| Diagonal |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 1652,
"end": 3282
} | class ____(Benchmark):
r"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Chichinadze}}(x) = x_{1}^{2} - 12 x_{1}
+ 8 \sin\left(\frac{5}{2} \pi x_{1}\right)
+ 10 \cos\left(\frac{1}{2} \pi x_{1}\right) + 11
- 0.2 \frac{\sqrt{5}}{e^{\frac{1}{2} \left(x_{2} -0.5 \right)^{2}}}
with :math:`x_i \in [-30, 30]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -42.94438701899098` for :math:`x =
[6.189866586965680, 0.5]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#33 has a dividing factor of 2 in the sin term. However, f(x)
for the given solution does not give the global minimum. i.e. the equation
is at odds with the solution.
Only by removing the dividing factor of 2, i.e. `8 * sin(5 * pi * x[0])`
does the given solution result in the given global minimum.
Do we keep the result or equation?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[6.189866586965680, 0.5]]
self.fglob = -42.94438701899098
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 - 12 * x[0] + 11 + 10 * cos(pi * x[0] / 2)
+ 8 * sin(5 * pi * x[0] / 2)
- 1.0 / sqrt(5) * exp(-((x[1] - 0.5) ** 2) / 2))
| Chichinadze |
python | python-poetry__poetry | src/poetry/mixology/result.py | {
"start": 212,
"end": 676
} | class ____:
def __init__(
self,
root: ProjectPackage,
packages: list[Package],
attempted_solutions: int,
) -> None:
self._root = root
self._packages = packages
self._attempted_solutions = attempted_solutions
@property
def packages(self) -> list[Package]:
return self._packages
@property
def attempted_solutions(self) -> int:
return self._attempted_solutions
| SolverResult |
python | plotly__plotly.py | plotly/graph_objs/icicle/hoverlabel/_font.py | {
"start": 233,
"end": 17138
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle.hoverlabel"
_path_str = "icicle.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.icicle.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | fluentpython__example-code | 14-it-generator/aritprog_v0.py | {
"start": 129,
"end": 529
} | class ____:
def __init__(self, begin, step, end=None):
self.begin = begin
self.step = step
self.end = end # None -> "infinite" series
def __iter__(self):
result = type(self.begin + self.step)(self.begin)
forever = self.end is None
while forever or result < self.end:
yield result
result += self.step
| ArithmeticProgression |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 217435,
"end": 219251
} | class ____:
def spcreator(self, D, *args, sorted_indices=False, **kwargs):
"""Replace D with a non-canonical equivalent: containing
duplicate elements and explicit zeros"""
construct = super().spcreator
M = construct(D, *args, **kwargs)
zero_pos = (M.toarray() == 0).nonzero()
has_zeros = (zero_pos[0].size > 0)
if has_zeros:
k = zero_pos[0].size//2
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k])
arg1 = self._arg1_for_noncanonical(M, sorted_indices)
if 'shape' not in kwargs:
kwargs['shape'] = M.shape
NC = construct(arg1, **kwargs)
# check that result is valid
if NC.dtype in [np.float32, np.complex64]:
# For single-precision floats, the differences between M and NC
# that are introduced by the extra operations involved in the
# construction of NC necessitate a more lenient tolerance level
# than the default.
rtol = 1e-05
else:
rtol = 1e-07
assert_allclose(NC.toarray(), M.toarray(), rtol=rtol)
# check that at least one explicit zero
if has_zeros:
assert_((NC.data == 0).any())
# TODO check that NC has duplicates (which are not explicit zeros)
return NC
@pytest.mark.skip(reason='bool(matrix) counts explicit zeros')
def test_bool(self):
pass
@pytest.mark.skip(reason='getnnz-axis counts explicit zeros')
def test_getnnz_axis(self):
pass
@pytest.mark.skip(reason='nnz counts explicit zeros')
def test_empty(self):
pass
| _NonCanonicalMixin |
python | tensorflow__tensorflow | tensorflow/python/distribute/sharded_variable_test.py | {
"start": 3802,
"end": 5134
} | class ____(test.TestCase):
def test_fixed_shards_partitioner(self):
partitioner = sharded_variable.FixedShardsPartitioner(num_shards=2)
got = partitioner(tensor_shape.TensorShape([10, 3]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
def test_min_size_partitioner(self):
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=10)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
def test_max_size_partitioner(self):
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=4)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
partitioner = sharded_variable.MaxSizePartitioner(
max_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=1024)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [1, 1])
| PartitionerTest |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 118573,
"end": 118655
} | class ____(UnittestPair):
CLS = (str, bytes)
TYPE_NAME = "string"
| StringPair |
python | django-haystack__django-haystack | test_haystack/elasticsearch5_tests/test_backend.py | {
"start": 57034,
"end": 58220
} | class ____(TestCase):
fixtures = ["bulk_data.json"]
def setUp(self):
super().setUp()
# Wipe it clean.
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = Elasticsearch5MockModelSearchIndex()
self.sammi = Elasticsearch5AnotherMockModelSearchIndex()
self.ui.build(indexes=[self.smmi, self.sammi])
connections["elasticsearch"]._index = self.ui
self.sqs = SearchQuerySet("elasticsearch")
self.smmi.update(using="elasticsearch")
self.sammi.update(using="elasticsearch")
def tearDown(self):
# Restore.
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def test_pickling(self):
results = self.sqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
| LiveElasticsearch5PickleTestCase |
python | google__pytype | pytype/pytd/type_match.py | {
"start": 2416,
"end": 21702
} | class ____(pytd_utils.TypeMatcher):
"""Class for matching types against other types."""
def __init__(self, direct_subclasses=None, any_also_is_bottom=True):
"""Construct.
Args:
direct_subclasses: A dictionary, mapping pytd.Type to lists of pytd.Type.
any_also_is_bottom: Whether we should, (if True) consider
pytd.AnythingType() to also be at the bottom of the type hierarchy, thus
making it a subclass of everything, or (if False) to be only at the top.
"""
self.direct_subclasses = direct_subclasses or {}
self.any_also_is_bottom = any_also_is_bottom
self.solver = booleq.Solver()
self._implications = {}
def default_match(self, t1, t2, *unused_args, **unused_kwargs):
# Don't allow pytd_utils.TypeMatcher to do default matching.
raise AssertionError(
f"Can't compare {type(t1).__name__} and {type(t2).__name__}"
)
def get_superclasses(self, t):
"""Get all base classes of this type.
Args:
t: A pytd.Type
Returns:
A list of pytd.Type.
"""
if isinstance(t, pytd.ClassType):
return sum((self.get_superclasses(c) for c in t.cls.bases), [t])
elif isinstance(t, pytd.AnythingType):
# All types, even "?", inherit from object.
return [pytd.NamedType("builtins.object")]
elif isinstance(t, pytd.GenericType):
return self.get_superclasses(t.base_type)
else:
log.warning("Can't extract superclasses from %s", type(t))
return [pytd.NamedType("builtins.object")]
def get_subclasses(self, t):
"""Get all classes derived from this type.
Args:
t: A pytd.Type
Returns:
A list of pytd.Type.
"""
if isinstance(t, pytd.ClassType):
subclasses = self.direct_subclasses.get(t, [])
return sum(
(self.get_subclasses(pytd.ClassType(c.name, c)) for c in subclasses),
[t],
)
else:
raise NotImplementedError(f"Can't extract subclasses from {type(t)}")
def type_parameter(
self,
unknown: _UnknownType,
base_class: pytd.Class,
item: pytd.TemplateItem,
) -> StrictType:
"""This generates the type parameter when matching against a generic type.
For example, when we match ~unknown1 against list[T], we need an additional
type to model the T in "~unknown1[T]". This type would have the name
"~unknown1.list.T".
Args:
unknown: An unknown type. This is the type that's matched against
base_class[T].
base_class: The base class of the generic we're matching the unknown
against. E.g. "list".
item: The actual type parameter. ("T" in the examples above).
Returns:
A type (pytd.Node) to represent this type parameter.
"""
assert is_unknown(unknown)
name = unknown.name + "." + base_class.name + "." + item.type_param.name
# We do *not* consider subclasses or superclasses when matching type
# parameters.
# So for example, if we pass list[int] to f(x: list[T]), we assume that
# T can only be "int", not "int + object". This might be considered
# incorrect, but typically gives us more intuitive results.
# Note that this only happens if we match ~unknown against generic types,
# not for matching of "known" types against each other.
return StrictType(name)
def _get_parameters(self, t1, t2):
if isinstance(t1, pytd.TupleType) and isinstance(t2, pytd.TupleType):
# No change needed; the parameters will be compared element-wise.
return t1.parameters, t2.parameters
elif isinstance(t2, pytd.TupleType):
# Since we call _get_parameters after confirming that t1 and t2 have
# compatible base types, t1 is a homogeneous tuple here.
return (t1.element_type,) * len(t2.parameters), t2.parameters
elif isinstance(t1, pytd.TupleType):
return (pytd_utils.JoinTypes(t1.parameters),), t2.parameters
elif isinstance(t1, pytd.CallableType) and isinstance(
t2, pytd.CallableType
):
# Flip the arguments, since argument types are contravariant.
return t2.args + (t1.ret,), t1.args + (t2.ret,)
elif (
t1.base_type.cls.name == "builtins.type"
and t2.base_type.cls.name == "typing.Callable"
):
# We'll only check the return type, since getting the argument types for
# initializing a class is tricky.
return t1.parameters, (t2.parameters[-1],)
elif (
t1.base_type.cls.name == "typing.Callable"
and t2.base_type.cls.name == "builtins.type"
):
return (t1.parameters[-1],), t2.parameters
elif isinstance(t1, pytd.CallableType):
# We're matching against GenericType(Callable, (Any, _RET)), so we don't
# need the argument types.
return (pytd.AnythingType(), t1.ret), t2.parameters
elif isinstance(t2, pytd.CallableType):
return t1.parameters, (pytd.AnythingType(), t2.ret)
else:
num_extra_params = len(t1.parameters) - len(t2.parameters)
# Matching, e.g., Dict[str, int] against Iterable[K] is legitimate.
assert num_extra_params >= 0, (
t1.base_type.cls.name,
t2.base_type.cls.name,
)
t2_parameters = t2.parameters + (pytd.AnythingType(),) * num_extra_params
return t1.parameters, t2_parameters
def match_Generic_against_Generic( # pylint: disable=invalid-name
self,
t1: pytd.GenericType,
t2: pytd.GenericType,
subst: _SubstType,
) -> booleq.BooleanTerm:
"""Match a pytd.GenericType against another pytd.GenericType."""
assert isinstance(t1.base_type, pytd.ClassType), type(t1.base_type)
assert isinstance(t2.base_type, pytd.ClassType), type(t2.base_type)
base1 = pytd.ClassType(t1.base_type.cls.name, t1.base_type.cls)
base2 = pytd.ClassType(t2.base_type.cls.name, t2.base_type.cls)
base_type_cmp = self.match_type_against_type(base1, base2, subst)
if base_type_cmp is booleq.FALSE:
return booleq.FALSE
t1_parameters, t2_parameters = self._get_parameters(t1, t2)
if len(t1_parameters) != len(t2_parameters):
return booleq.FALSE
# Type parameters are covariant:
# E.g. passing list[int] as argument for list[object] succeeds.
param_cmp = [
self.match_type_against_type(p1, p2, subst)
for p1, p2 in zip(t1_parameters, t2_parameters)
]
return booleq.And([base_type_cmp] + param_cmp)
def match_Unknown_against_Generic( # pylint: disable=invalid-name
self, t1: _UnknownType, t2: pytd.GenericType, subst: _SubstType
) -> booleq.BooleanTerm:
assert isinstance(t2.base_type, pytd.ClassType)
# No inheritance for base classes - you can only inherit from an
# instantiated template, but not from a template itself.
base_match = booleq.Eq(t1.name, t2.base_type.cls.name)
type_params = [
self.type_parameter(t1, t2.base_type.cls, item)
for item in t2.base_type.cls.template
]
for type_param in type_params:
self.solver.register_variable(type_param.name)
if isinstance(t2, pytd.TupleType):
t2_parameters = (pytd_utils.JoinTypes(t2.parameters),)
else:
t2_parameters = t2.parameters
params = [
self.match_type_against_type(p1, p2, subst)
for p1, p2 in zip(type_params, t2_parameters)
]
return booleq.And([base_match] + params)
def match_Generic_against_Unknown(self, t1, t2, subst): # pylint: disable=invalid-name
# Note: This flips p1 and p2 above.
return self.match_Unknown_against_Generic(t2, t1, subst) # pylint: disable=arguments-out-of-order
def maybe_lookup_type_param(self, t, subst):
while isinstance(t, pytd.TypeParameter):
# We can only have type parameters in a class, and if so, we should have
# added them to the type parameter substitution map (subst) beforehand:
assert t in subst
if subst[t] is None:
# Function type parameter. Can be anything.
t = pytd.AnythingType()
else:
assert subst[t] != t, "Cyclic type parameter."
t = subst[t]
return t
def unclass(self, t):
"""Prevent further subclass or superclass expansion for this type."""
if isinstance(t, pytd.ClassType):
# When t.name and t.cls.name differ (e.g., int vs. builtins.int), the
# latter is the complete name.
return pytd.NamedType(t.cls.name)
else:
return t
def expand_superclasses(self, t):
class_and_superclasses = self.get_superclasses(t)
return [self.unclass(t) for t in class_and_superclasses]
def expand_subclasses(self, t):
class_and_subclasses = self.get_subclasses(t)
return [self.unclass(t) for t in class_and_subclasses]
def match_type_against_type(self, t1, t2, subst):
types = (t1, t2, frozenset(subst.items()))
if types in self._implications:
return self._implications[types]
implication = self._implications[types] = self._match_type_against_type(
t1, t2, subst
)
return implication
def _full_name(self, t):
return t.name
def _match_type_against_type(self, t1, t2, subst):
"""Match a pytd.Type against another pytd.Type."""
t1 = self.maybe_lookup_type_param(t1, subst)
t2 = self.maybe_lookup_type_param(t2, subst)
# TODO(b/159058933): Use utils:TypeMatcher to simplify this?
if isinstance(t2, pytd.AnythingType):
# We can match anything against AnythingType. (It's like top)
return booleq.TRUE
elif isinstance(t1, pytd.AnythingType):
if self.any_also_is_bottom:
# We can match AnythingType against everything. (It's like bottom)
return booleq.TRUE
else:
return booleq.FALSE
elif isinstance(t1, pytd.NothingType):
# nothing as an actual type matches against everything, since it
# represents an empty value.
return booleq.TRUE
elif isinstance(t2, pytd.NothingType):
# We can't match anything against nothing as an expected type (except
# nothing itself, above).
return booleq.FALSE
elif isinstance(t1, pytd.UnionType):
return booleq.And(
self.match_type_against_type(u, t2, subst) for u in t1.type_list
)
elif isinstance(t2, pytd.UnionType):
return booleq.Or(
self.match_type_against_type(t1, u, subst) for u in t2.type_list
)
elif (
isinstance(t1, pytd.ClassType)
and isinstance(t2, StrictType)
or isinstance(t1, StrictType)
and isinstance(t2, pytd.ClassType)
):
# For strict types, avoid subclasses of the left side.
return booleq.Eq(self._full_name(t1), self._full_name(t2))
elif isinstance(t1, pytd.ClassType) and t2.name == "builtins.object":
return booleq.TRUE
elif t1.name in ("builtins.type", "typing.Callable") and t2.name in (
"builtins.type",
"typing.Callable",
):
return booleq.TRUE
elif isinstance(t1, pytd.ClassType):
# ClassTypes are similar to Unions, except they're disjunctions: We can
# match the type or any of its base classes against the formal parameter.
return booleq.Or(
self.match_type_against_type(t, t2, subst)
for t in self.expand_superclasses(t1)
)
elif isinstance(t2, pytd.ClassType):
# ClassTypes on the right are exactly like Unions: We can match against
# this type or any of its subclasses.
return booleq.Or(
self.match_type_against_type(t1, t, subst)
for t in self.expand_subclasses(t2)
)
assert not isinstance(t1, pytd.ClassType)
assert not isinstance(t2, pytd.ClassType)
if is_unknown(t1) and isinstance(t2, pytd.GenericType):
return self.match_Unknown_against_Generic(t1, t2, subst)
elif isinstance(t1, pytd.GenericType) and is_unknown(t2):
return self.match_Generic_against_Unknown(t1, t2, subst)
elif isinstance(t1, pytd.GenericType) and isinstance(t2, pytd.GenericType):
return self.match_Generic_against_Generic(t1, t2, subst)
elif isinstance(t1, pytd.GenericType):
# E.g. list[...] matches against list, or even object.
return self.match_type_against_type(t1.base_type, t2, subst)
elif isinstance(t2, pytd.GenericType):
if self.any_also_is_bottom:
# E.g. list (a.k.a. list[Any]) matches against list[str]
return self.match_type_against_type(t1, t2.base_type, subst)
else:
return booleq.FALSE
elif is_unknown(t1) and is_unknown(t2):
return booleq.Eq(t1.name, t2.name)
elif isinstance(t1, (pytd.NamedType, StrictType)) and isinstance(
t2, (pytd.NamedType, StrictType)
):
if is_complete(t1) and is_complete(t2) and t1.name != t2.name:
# Optimization: If we know these two can never be equal, just return
# false right away.
return booleq.FALSE
else:
return booleq.Eq(t1.name, t2.name)
elif isinstance(t1, pytd.NamedType) and isinstance(t2, pytd.Literal):
return booleq.FALSE
elif isinstance(t1, pytd.LateType) or isinstance(t2, pytd.LateType):
# Unresolved types never match against anything.
return booleq.FALSE
elif isinstance(t1, pytd.Literal) and isinstance(t2, pytd.Literal):
return booleq.TRUE if t1.value == t2.value else booleq.FALSE
else:
raise AssertionError(
f"Don't know how to match {type(t1)} against {type(t2)}"
)
# pylint: disable=invalid-name
def match_Signature_against_Signature(
self, sig1, sig2, subst, skip_self=False
):
"""Match a pytd.Signature against another pytd.Signature.
Args:
sig1: The caller
sig2: The callee
subst: Current type parameters.
skip_self: If True, doesn't compare the first parameter, which is
considered (and verified) to be "self".
Returns:
An instance of booleq.BooleanTerm, i.e. a boolean formula.
"""
# Signatures have type parameters, too. We ignore them, since they can
# be anything. (See maybe_lookup_type_param())
subst.update({p.type_param: None for p in sig1.template + sig2.template})
params1 = sig1.params
params2 = sig2.params
if skip_self:
# Methods in an ~unknown need to declare their methods with "self"
assert params1 and params1[0].name == "self"
params1 = params1[1:]
if params2 and params2[0].name == "self":
params2 = params2[1:]
equalities = []
if len(params1) > len(params2) and not sig2.has_optional:
return booleq.FALSE # extra parameters
if sig1.starargs is not None and sig2.starargs is not None:
equalities.append(
self.match_type_against_type(
sig1.starargs.type, sig2.starargs.type, subst
)
)
if sig1.starstarargs is not None and sig2.starstarargs is not None:
equalities.append(
self.match_type_against_type(
sig1.starstarargs.type, sig2.starstarargs.type, subst
)
)
# TODO(b/159058933): Handle kwonly parameters (on either side). Presumably,
# a kwonly on the left side means that it was a keyword param.
for p1, p2 in zip(params1, params2):
if p1.optional and not p2.optional:
return booleq.FALSE
for i, p2 in enumerate(params2):
if i >= len(params1):
if not p2.optional:
return booleq.FALSE # missing parameter
else:
pass
else:
p1 = params1[i]
if p1.name != p2.name and not (
pytd_utils.ANON_PARAM.match(p1.name)
or pytd_utils.ANON_PARAM.match(p2.name)
):
return booleq.FALSE
equalities.append(self.match_type_against_type(p1.type, p2.type, subst))
equalities.append(
self.match_type_against_type(sig1.return_type, sig2.return_type, subst)
)
return booleq.And(equalities)
def match_Signature_against_Function(self, sig, f, subst, skip_self=False): # pylint: disable=invalid-name
def make_or(inner):
return booleq.Or(
self.match_Signature_against_Signature(inner, s, subst, skip_self)
for s in f.signatures
)
return booleq.And(make_or(inner) for inner in visitors.ExpandSignature(sig))
def match_Function_against_Function(self, f1, f2, subst, skip_self=False): # pylint: disable=invalid-name
return booleq.And(
self.match_Signature_against_Function(s1, f2, subst, skip_self)
for s1 in f1.signatures
)
def match_Function_against_Class(self, f1, cls2, subst, cache):
cls2_methods = cache.get(id(cls2))
if cls2_methods is None:
cls2_methods = cache[id(cls2)] = {f.name: f for f in cls2.methods}
if f1.name not in cls2_methods:
# The class itself doesn't have this method, but base classes might.
# TODO(b/159058933): This should do MRO order, not depth-first.
for base in cls2.bases:
if isinstance(base, pytd.AnythingType):
# AnythingType can contain any method. However, that would mean that
# a class that inherits from AnythingType contains any method
# imaginable, and hence is a match for anything. To prevent the bad
# results caused by that, return FALSE here.
return booleq.FALSE
elif isinstance(base, (pytd.ClassType, pytd.GenericType)):
if isinstance(base, pytd.ClassType):
cls = base.cls
values = tuple(pytd.AnythingType() for _ in cls.template)
elif isinstance(base, pytd.TupleType):
cls = base.base_type.cls
values = (pytd_utils.JoinTypes(base.parameters),)
else:
cls = base.base_type.cls
values = base.parameters
if values:
subst = subst.copy()
for param, value in zip(cls.template, values):
subst[param.type_param] = value
implication = self.match_Function_against_Class(f1, cls, subst, cache)
if implication is not booleq.FALSE:
return implication
else:
# Funky types like UnionType are hard to match against (and shouldn't
# appear as a base class) so we treat them as catch-all.
log.warning(
"Assuming that %s has method %s", pytd_utils.Print(base), f1.name
)
return booleq.TRUE
return booleq.FALSE
else:
f2 = cls2_methods[f1.name]
return self.match_Function_against_Function(f1, f2, subst, skip_self=True)
def match_Class_against_Class(self, cls1, cls2, subst): # pylint: disable=invalid-name
"""Match a pytd.Class against another pytd.Class."""
return self.match_Functions_against_Class(cls1.methods, cls2, subst)
def match_Protocol_against_Unknown(self, protocol, unknown, subst): # pylint: disable=invalid-name
"""Match a typing.Protocol against an unknown class."""
filtered_methods = [f for f in protocol.methods if f.is_abstract]
return self.match_Functions_against_Class(filtered_methods, unknown, subst)
def match_Functions_against_Class(self, methods, cls2, subst):
implications = []
cache = {}
for f1 in methods:
implication = self.match_Function_against_Class(f1, cls2, subst, cache)
implications.append(implication)
if implication is booleq.FALSE:
break
# TODO(b/159058933): class attributes
return booleq.And(implications)
| TypeMatch |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_doc_building.py | {
"start": 8073,
"end": 12769
} | class ____(TestCase):
"""Test build command creation."""
def test_command_env(self):
"""Test build command env vars."""
env = {"FOOBAR": "foobar", "BIN_PATH": "foobar"}
cmd = BuildCommand("echo", environment=env)
for key in list(env.keys()):
self.assertEqual(cmd._environment[key], env[key])
def test_result(self):
"""Test result of output using unix true/false commands."""
cmd = BuildCommand("true")
cmd.run()
self.assertTrue(cmd.successful)
cmd = BuildCommand("false")
cmd.run()
self.assertTrue(cmd.failed)
def test_missing_command(self):
"""Test missing command."""
path = os.path.join("non-existant", str(uuid.uuid4()))
self.assertFalse(os.path.exists(path))
cmd = BuildCommand(path)
cmd.run()
self.assertEqual(cmd.exit_code, -1)
# There is no stacktrace here.
self.assertIsNone(cmd.output)
self.assertIsNone(cmd.error)
def test_output(self):
"""Test output command."""
project = APIProject(**get(Project).__dict__)
api_client = mock.MagicMock()
build_env = LocalBuildEnvironment(
project=project,
build={
"id": 1,
},
api_client=api_client,
)
cmd = BuildCommand(["/bin/bash", "-c", "echo -n FOOBAR"], build_env=build_env)
# Mock BuildCommand.sanitized_output just to count the amount of calls,
# but use the original method to behaves as real
original_sanitized_output = cmd.sanitize_output
with patch(
"readthedocs.doc_builder.environments.BuildCommand.sanitize_output"
) as sanitize_output: # noqa
sanitize_output.side_effect = original_sanitized_output
cmd.run()
cmd.save(api_client=api_client)
self.assertEqual(cmd.output, "FOOBAR")
api_client.command.post.assert_called_once_with(
{
"build": mock.ANY,
"command": "/bin/bash -c echo -n FOOBAR",
"output": "FOOBAR",
"exit_code": 0,
"start_time": mock.ANY,
"end_time": mock.ANY,
}
)
# Check that we sanitize the output
self.assertEqual(sanitize_output.call_count, 1)
def test_error_output(self):
"""Test error output from command."""
cmd = BuildCommand(["/bin/bash", "-c", "echo -n FOOBAR 1>&2"])
cmd.run()
self.assertEqual(cmd.output, "FOOBAR")
self.assertEqual(cmd.error, "")
def test_sanitize_output(self):
cmd = BuildCommand(["/bin/bash", "-c", "echo"])
checks = (
("Hola", "Hola"),
("H\x00i", "Hi"),
("H\x00i \x00\x00\x00You!\x00", "Hi You!"),
)
for output, sanitized in checks:
self.assertEqual(cmd.sanitize_output(output), sanitized)
def test_obfuscate_output_private_variables(self):
build_env = mock.MagicMock()
build_env.project = mock.MagicMock()
build_env.project._environment_variables = mock.MagicMock()
build_env.project._environment_variables.items.return_value = [
(
"PUBLIC",
{
"public": True,
"value": "public-value",
},
),
(
"PRIVATE",
{
"public": False,
"value": "private-value",
},
),
]
cmd = BuildCommand(["/bin/bash", "-c", "echo"], build_env=build_env)
checks = (
("public-value", "public-value"),
("private-value", "priv****"),
)
for output, sanitized in checks:
self.assertEqual(cmd.sanitize_output(output), sanitized)
@patch("subprocess.Popen")
def test_unicode_output(self, mock_subprocess):
"""Unicode output from command."""
mock_process = Mock(
**{
"communicate.return_value": (SAMPLE_UTF8_BYTES, b""),
}
)
mock_subprocess.return_value = mock_process
cmd = BuildCommand(["echo", "test"], cwd="/tmp/foobar")
cmd.run()
self.assertEqual(
cmd.output,
"H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9",
)
# TODO: translate this tests once we have DockerBuildEnvironment properly
# mocked. These can be done together with `TestDockerBuildEnvironment`.
@pytest.mark.skip
| TestBuildCommand |
python | tensorflow__tensorflow | tensorflow/python/training/monitored_session_test.py | {
"start": 26673,
"end": 31845
} | class ____(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(sess.graph, coord_sess.graph)
self.assertEqual(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_assert.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
| CoordinatedSessionTest |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/utils.py | {
"start": 5796,
"end": 6387
} | class ____(ABC):
@property
@abstractmethod
def max_tokens_key(self) -> str: ...
@abstractmethod
def get_text_from_response(self, response: dict) -> str: ...
def get_text_from_stream_response(self, response: dict) -> str:
return self.get_text_from_response(response)
def get_request_body(self, prompt: str, inference_parameters: dict) -> dict:
return {"prompt": prompt, **inference_parameters}
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None
completion_to_prompt: Optional[Callable[[str], str]] = None
| Provider |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 58131,
"end": 59634
} | class ____(_AdaptiveAvgPoolNd):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
:math:`S=\text{output\_size}`.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveAvgPool2d((5, 7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveAvgPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
output_size: _size_2_opt_t
def forward(self, input: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.adaptive_avg_pool2d(input, self.output_size)
| AdaptiveAvgPool2d |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_linalg.py | {
"start": 930,
"end": 1001
} | class ____(NamedTuple):
sign: Array
logabsdet: Array
| SlogdetResult |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/direct_url.py | {
"start": 426,
"end": 1804
} | class ____(Exception):
pass
def _get(
d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
) -> Optional[T]:
"""Get value from dictionary and verify expected type."""
if key not in d:
return default
value = d[key]
if not isinstance(value, expected_type):
raise DirectUrlValidationError(
f"{value!r} has unexpected type for {key} (expected {expected_type})"
)
return value
def _get_required(
d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
) -> T:
value = _get(d, expected_type, key, default)
if value is None:
raise DirectUrlValidationError(f"{key} must have a value")
return value
def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
infos = [info for info in infos if info is not None]
if not infos:
raise DirectUrlValidationError(
"missing one of archive_info, dir_info, vcs_info"
)
if len(infos) > 1:
raise DirectUrlValidationError(
"more than one of archive_info, dir_info, vcs_info"
)
assert infos[0] is not None
return infos[0]
def _filter_none(**kwargs: Any) -> Dict[str, Any]:
"""Make dict excluding None values."""
return {k: v for k, v in kwargs.items() if v is not None}
@dataclass
| DirectUrlValidationError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_default_format12.py | {
"start": 315,
"end": 2268
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_format12.xlsx")
def test_create_file(self):
"""Test the creation of a file with user defined default format"""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 14},
"default_row_height": 24,
"default_column_width": 96,
},
)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32})
# Set user column width and row height to test positioning calculation.
# The column width is the default in this font.
worksheet.set_row_pixels(8, 32)
# Set column to text column width less than 1 character.
worksheet.set_column_pixels(6, 6, 10)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_character_units(self):
"""Test the creation of a file with user defined default format"""
# Same as
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 14},
"default_row_height": 24,
"default_column_width": 96,
},
)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32})
# Set user column width and row height to test positioning calculation.
# The column width is the default in this font.
worksheet.set_row(8, 24.0)
# Set column to text column width less than 1 character.
worksheet.set_column(6, 6, 0.56)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ansible__ansible | test/integration/targets/task-esoterica/action_plugins/echo.py | {
"start": 84,
"end": 210
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
return dict(action_args=self._task.args)
| ActionModule |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-data-science/llama_index/llms/oci_data_science/client.py | {
"start": 12081,
"end": 20230
} | class ____(BaseClient):
"""
Synchronous HTTP client for invoking models with support for request and streaming APIs.
"""
def __init__(self, *args, **kwargs) -> None:
"""
Initialize the Client.
Args:
*args: Positional arguments forwarded to BaseClient.
**kwargs: Keyword arguments forwarded to BaseClient.
"""
super().__init__(*args, **kwargs)
self._client = httpx.Client(timeout=self.timeout)
def is_closed(self) -> bool:
return self._client.is_closed
def close(self) -> None:
"""Close the underlying HTTPX client."""
self._client.close()
def __enter__(self: _T) -> _T: # noqa: PYI019
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]] = None,
exc: Optional[BaseException] = None,
exc_tb: Optional[TracebackType] = None,
) -> None:
self.close()
def __del__(self) -> None:
try:
self.close()
except Exception:
pass
@_retry_decorator
def _request(
self, payload: Dict[str, Any], headers: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""
Send a POST request to the configured endpoint with retry and error handling.
Args:
payload (Dict[str, Any]): Parameters for the request payload.
headers (Optional[Dict[str, str]]): HTTP headers to include in the request.
Returns:
Dict[str, Any]: Decoded JSON response.
Raises:
ExtendedRequestException: Raised when the request fails.
"""
logger.debug(f"Starting synchronous request with payload: {payload}")
try:
response = self._client.post(
self.endpoint,
headers=self._prepare_headers(stream=False, headers=headers),
auth=self.auth,
json=payload,
)
logger.debug(f"Received response with status code: {response.status_code}")
response.raise_for_status()
json_response = response.json()
logger.debug(f"Response JSON: {json_response}")
return json_response
except Exception as e:
last_exception_text = (
e.response.text if hasattr(e, "response") and e.response else str(e)
)
logger.error(
f"Request failed. Error: {e!s}. Details: {last_exception_text}"
)
raise ExtendedRequestException(
f"Request failed: {e!s}. Details: {last_exception_text}",
e,
last_exception_text,
) from e
def _stream(
self, payload: Dict[str, Any], headers: Optional[Dict[str, str]] = None
) -> Iterator[Mapping[str, Any]]:
"""
Send a POST request expecting a streaming response.
Args:
payload (Dict[str, Any]): Parameters for the request payload.
headers (Optional[Dict[str, str]]): HTTP headers to include in the request.
Yields:
Mapping[str, Any]: Decoded JSON response line-by-line.
Raises:
ExtendedRequestException: Raised when the request fails.
"""
logger.debug(f"Starting synchronous streaming request with payload: {payload}")
last_exception_text = None
for attempt in range(1, self.retries + 2): # retries + initial attempt
logger.debug(f"Attempt {attempt} for synchronous streaming request.")
try:
with self._client.stream(
"POST",
self.endpoint,
headers=self._prepare_headers(stream=True, headers=headers),
auth=self.auth,
json={**payload, "stream": True},
) as response:
try:
logger.debug(
f"Received streaming response with status code: {response.status_code}"
)
response.raise_for_status()
for line in response.iter_lines():
if not line: # Skip empty lines
continue
parsed_line = self._parse_streaming_line(line)
if parsed_line:
logger.debug(f"Yielding parsed line: {parsed_line}")
yield parsed_line
return
except Exception as e:
last_exception_text = (
e.response.read().decode(
e.response.encoding or DEFAULT_ENCODING
)
if hasattr(e, "response") and e.response
else str(e)
)
raise
except Exception as e:
if attempt <= self.retries and _should_retry_exception(e):
delay = self.backoff_factor * (2 ** (attempt - 1))
logger.warning(
f"Streaming attempt {attempt} failed: {e}. Retrying in {delay} seconds..."
)
time.sleep(delay)
else:
logger.error(
f"Streaming request failed. Error: {e!s}. Details: {last_exception_text}"
)
raise ExtendedRequestException(
f"Streaming request failed: {e!s}. Details: {last_exception_text}",
e,
last_exception_text,
) from e
def generate(
self,
prompt: str,
payload: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
stream: bool = True,
) -> Union[Dict[str, Any], Iterator[Mapping[str, Any]]]:
"""
Generate text completion for the given prompt.
Args:
prompt (str): Input text prompt for the model.
payload (Optional[Dict[str, Any]]): Additional parameters for the request payload.
headers (Optional[Dict[str, str]]): HTTP headers to include in the request.
stream (bool): Whether to use streaming for the response.
Returns:
Union[Dict[str, Any], Iterator[Mapping[str, Any]]]: A full JSON response or an iterator for streaming responses.
"""
logger.debug(f"Generating text with prompt: {prompt}, stream: {stream}")
payload = {**(payload or {}), "prompt": prompt}
headers = {"route": "/v1/completions", **(headers or {})}
if stream:
return self._stream(payload=payload, headers=headers)
return self._request(payload=payload, headers=headers)
def chat(
self,
messages: List[Dict[str, Any]],
payload: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
stream: bool = True,
) -> Union[Dict[str, Any], Iterator[Mapping[str, Any]]]:
"""
Perform a chat interaction with the model.
Args:
messages (List[Dict[str, Any]]): List of message dictionaries for chat interaction.
payload (Optional[Dict[str, Any]]): Additional parameters for the request payload.
headers (Optional[Dict[str, str]]): HTTP headers to include in the request.
stream (bool): Whether to use streaming for the response.
Returns:
Union[Dict[str, Any], Iterator[Mapping[str, Any]]]: A full JSON response or an iterator for streaming responses.
"""
logger.debug(f"Starting chat with messages: {messages}, stream: {stream}")
payload = {**(payload or {}), "messages": messages}
headers = {"route": "/v1/chat/completions", **(headers or {})}
if stream:
return self._stream(payload=payload, headers=headers)
return self._request(payload=payload, headers=headers)
| Client |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_sum_to_be.py | {
"start": 494,
"end": 2878
} | class ____(ColumnAggregateExpectation):
"""Expect the sum of a column to be exactly a value."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {"a": [1, 2, 3, 4, 5]},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 15},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 14},
"out": {"success": False},
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.sum",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("sum_total",)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
actual_value = metrics["column.sum"]
predicted_value = self._get_success_kwargs().get("sum_total")
success = actual_value == predicted_value
return {"success": success, "result": {"observed_value": actual_value}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"column aggregate expectation",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@joshua-stauffer", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnSumToBe().print_diagnostic_checklist()
| ExpectColumnSumToBe |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 17058,
"end": 17104
} | class ____(SiglipEncoder):
pass
| Aimv2Encoder |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor21.py | {
"start": 140,
"end": 594
} | class ____:
def __init__(self, a: int, b: str):
pass
T_A = TypeVar("T_A", bound=ClassA)
def func1(cls: type[T_A]) -> T_A:
# This should generate an error.
y = cls()
x = cls(1, "")
reveal_type(x, expected_text="T_A@func1")
return x
_T = TypeVar("_T")
def func2(cls: type[_T]) -> _T:
# This should generate an error.
y = cls(1, "")
x = cls()
reveal_type(x, expected_text="_T@func2")
return x
| ClassA |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-upstage/llama_index/llms/upstage/base.py | {
"start": 1312,
"end": 12018
} | class ____(OpenAI):
"""
Upstage LLM.
Examples:
`pip install llama-index-llms-upstage`
```python
from llama_index.llms.upstage import Upstage
import os
os.environ["UPSTAGE_API_KEY"] = "YOUR_API_KEY"
llm = Upstage()
stream = llm.stream("Hello, how are you?")
for response in stream:
print(response.delta, end="")
```
"""
model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True)
model: str = Field(
default=DEFAULT_UPSTAGE_MODEL, description="The Upstage model to use."
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use during generation.",
gte=0.0,
lte=1.0,
)
max_tokens: Optional[int] = Field(
description="The maximum number of tokens to generate."
)
logprobs: Optional[bool] = Field(
description="Whether to return logprobs per token."
)
top_logprobs: int = Field(
description="The number of top token logprobs to return.",
default=0,
gte=0,
lte=20,
)
additional_kwargs: Dict[str, Any] = Field(
description="Additional kwargs for the Upstage API.", default_factory=dict
)
max_retries: int = Field(
description="The maximum number of API retries.", default=3, gte=0
)
timeout: float = Field(
description="The timeout, in seconds, for API requests.", default=60.0, gte=0.0
)
reuse_client: bool = Field(
description=(
"Reuse the OpenAI client between requests. When doing anything with large "
"volumes of async API calls, setting this to false can improve stability."
),
default=True,
)
tokenizer_name: str = Field(
description=(
"Huggingface pretrained tokenizer name "
"upstage opened solar tokenizer in Huggingface. https://huggingface.co/upstage/solar-1-mini-tokenizer"
),
default=SOLAR_TOKENIZERS[DEFAULT_UPSTAGE_MODEL],
)
api_key: str = Field(
default=None, alias="upstage_api_key", description="The Upstage API key."
)
api_base: str = Field(
default="https://api.upstage.ai/v1/solar",
description="The Upstage API base URL.",
)
top_p: Optional[float] = Field(
default=1,
gte=0,
lte=1,
description="An optional parameter to trigger nucleus sampling.",
)
frequency_penalty: Optional[float] = Field(
default=0,
gte=-2,
lte=2,
description="An optional parameter that controls the model’s tendency to repeat tokens.",
)
presence_penalty: Optional[float] = Field(
default=0,
gte=-2,
lte=2,
description="An optional parameter that adjusts the model’s tendency to include tokens already present in the input or generated text.",
)
response_format: Optional[dict] = Field(
default=None,
description="An object specifying the format that the model must generate.",
)
_client: Optional[SyncOpenAI] = PrivateAttr()
_aclient: Optional[AsyncOpenAI] = PrivateAttr()
_http_client: Optional[httpx.Client] = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_UPSTAGE_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = None,
logprobs: Optional[bool] = None,
top_logprobs: int = 0,
additional_kwargs: Dict[str, Any] = None,
max_retries: int = 3,
timeout: float = 60.0,
reuse_client: bool = True,
tokenizer_name: str = "upstage/solar-1-mini-tokenizer",
api_key: Optional[str] = None,
api_base: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None, # from base class
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
reasoning_effort: Optional[Literal["low", "medium", "high"]] = None,
top_p: Optional[float] = None,
frequency_penalty: Optional[float] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[dict] = None,
**kwargs: Any,
) -> None:
if "upstage_api_key" in kwargs:
api_key = kwargs.pop("upstage_api_key")
additional_kwargs = additional_kwargs or {}
api_key, api_base = resolve_upstage_credentials(
api_key=api_key, api_base=api_base
)
default_headers = (default_headers or {}) | {"x-upstage-client": "llamaindex"}
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
logprobs=logprobs,
top_logprobs=top_logprobs,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
api_key=api_key,
api_base=api_base,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
self.tokenizer_name = tokenizer_name
self._client = None
self._aclient = None
self._http_client = http_client
self.reasoning_effort = reasoning_effort
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
self.response_format = response_format
def _get_model_name(self) -> str:
return self.model
@classmethod
def class_name(cls) -> str:
return "upstage_llm"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=upstage_modelname_to_contextsize(
modelname=self._get_model_name()
),
num_output=self.max_tokens or -1,
is_chat_model=is_chat_model(model=self._get_model_name()),
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
model_name=self.model,
)
@property
def _tokenizer(self) -> Optional[Tokenizer]:
"""
Get a Huggingface tokenizer for solar models.
"""
if SOLAR_TOKENIZERS.get(self.model) != self.tokenizer_name:
warnings.warn(
f"You are using a different tokenizer than the one specified in the model. This may cause issues with token counting. Please use {SOLAR_TOKENIZERS[self.model]} as the tokenizer name."
)
return Tokenizer.from_pretrained(self.tokenizer_name)
def get_num_tokens_from_message(self, messages: Sequence[ChatMessage]) -> int:
tokens_per_message = 5 # <|im_start|>{role}\n{message}<|im_end|>
tokens_prefix = 1 # <|startoftext|>
tokens_suffix = 3 # <|im_start|>assistant\n
num_tokens = 0
num_tokens += tokens_prefix
message_dicts = to_openai_message_dicts(messages)
for message in message_dicts:
num_tokens += tokens_per_message
for value in message.values():
num_tokens += len(
self._tokenizer.encode(str(value), add_special_tokens=False)
)
num_tokens += tokens_suffix
return num_tokens
@llm_retry_decorator
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if is_doc_parsing_model(self.model, kwargs):
document_contents = self._parse_documents(kwargs.pop("file_path"))
messages.append(ChatMessage(role="user", content=document_contents))
return super()._chat(messages, **kwargs)
@llm_retry_decorator
def _achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if is_doc_parsing_model(self.model, kwargs):
document_contents = self._parse_documents(kwargs.pop("file_path"))
messages.append(ChatMessage(role="user", content=document_contents))
return super()._achat(messages, **kwargs)
@llm_retry_decorator
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if is_doc_parsing_model(self.model, kwargs):
document_contents = self._parse_documents(kwargs.pop("file_path"))
messages.append(ChatMessage(role="user", content=document_contents))
return super()._stream_chat(messages, **kwargs)
@llm_retry_decorator
def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if is_doc_parsing_model(self.model, kwargs):
document_contents = self._parse_documents(kwargs.pop("file_path"))
messages.append(ChatMessage(role="user", content=document_contents))
return super()._astream_chat(messages, **kwargs)
def _parse_documents(
self, file_path: Union[str, Path, List[str], List[Path]]
) -> str:
document_contents = "Documents:\n"
loader = UpstageDocumentParseReader(
api_key=self.api_key, output_format="text", coordinates=False
)
docs = loader.load_data(file_path)
if isinstance(file_path, list):
file_titles = [os.path.basename(path) for path in file_path]
else:
file_titles = [os.path.basename(file_path)]
for i, doc in enumerate(docs):
file_title = file_titles[min(i, len(file_titles) - 1)]
document_contents += f"{file_title}:\n{doc.text}\n\n"
return document_contents
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
all_kwargs = super()._get_model_kwargs(**kwargs)
return all_kwargs | {
"reasoning_effort": self.reasoning_effort,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"response_format": self.response_format,
}
| Upstage |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 58338,
"end": 61606
} | class ____(Response):
"""
Response of events.get_multi_task_plots endpoint.
:param plots: Plots mapping (keyed by task name)
:type plots: dict
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_multi_task_plots"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"plots": {
"description": "Plots mapping (keyed by task name)",
"type": ["object", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
plots: Optional[dict] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetMultiTaskPlotsResponse, self).__init__(**kwargs)
self.plots = plots
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("plots")
def plots(self) -> Optional[dict]:
return self._property_plots
@plots.setter
def plots(self, value: Optional[dict]) -> None:
if value is None:
self._property_plots = None
return
self.assert_isinstance(value, "plots", (dict,))
self._property_plots = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetMultiTaskPlotsResponse |
python | doocs__leetcode | solution/2000-2099/2044.Count Number of Maximum Bitwise-OR Subsets/Solution.py | {
"start": 0,
"end": 398
} | class ____:
def countMaxOrSubsets(self, nums: List[int]) -> int:
def dfs(i, t):
nonlocal ans, mx
if i == len(nums):
if t == mx:
ans += 1
return
dfs(i + 1, t)
dfs(i + 1, t | nums[i])
ans = 0
mx = reduce(lambda x, y: x | y, nums)
dfs(0, 0)
return ans
| Solution |
python | pytorch__pytorch | torch/_numpy/_util.py | {
"start": 462,
"end": 7725
} | class ____(TypeError, RuntimeError):
pass
def cast_if_needed(tensor, dtype):
# NB: no casting if dtype=None
if dtype is not None and tensor.dtype != dtype:
tensor = tensor.to(dtype)
return tensor
def cast_int_to_float(x):
# cast integers and bools to the default float dtype
if _dtypes_impl._category(x.dtype) < 2:
x = x.to(_dtypes_impl.default_dtypes().float_dtype)
return x
# a replica of the version in ./numpy/numpy/core/src/multiarray/common.h
def normalize_axis_index(ax, ndim, argname=None):
if not (-ndim <= ax < ndim):
raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}")
if ax < 0:
ax += ndim
return ax
# from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis)
if not allow_duplicate and len(set(map(int, axis))) != len(axis):
if argname:
raise ValueError(f"repeated axis in `{argname}` argument")
else:
raise ValueError("repeated axis")
return axis
def allow_only_single_axis(axis):
if axis is None:
return axis
if len(axis) != 1:
raise NotImplementedError("does not handle tuple axis")
return axis[0]
def expand_shape(arr_shape, axis):
# taken from numpy 1.23.x, expand_dims function
if type(axis) not in (list, tuple):
axis = (axis,)
out_ndim = len(axis) + len(arr_shape)
axis = normalize_axis_tuple(axis, out_ndim)
shape_it = iter(arr_shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return shape
def apply_keepdims(tensor, axis, ndim):
if axis is None:
# tensor was a scalar
shape = (1,) * ndim
tensor = tensor.expand(shape).contiguous()
else:
shape = expand_shape(tensor.shape, axis)
tensor = tensor.reshape(shape)
return tensor
def axis_none_flatten(*tensors, axis=None):
"""Flatten the arrays if axis is None."""
if axis is None:
tensors = tuple(ar.flatten() for ar in tensors)
return tensors, 0
else:
return tensors, axis
def typecast_tensor(t, target_dtype, casting):
"""Dtype-cast tensor to target_dtype.
Parameters
----------
t : torch.Tensor
The tensor to cast
target_dtype : torch dtype object
The array dtype to cast all tensors to
casting : str
The casting mode, see `np.can_cast`
Returns
-------
`torch.Tensor` of the `target_dtype` dtype
Raises
------
ValueError
if the argument cannot be cast according to the `casting` rule
"""
can_cast = _dtypes_impl.can_cast_impl
if not can_cast(t.dtype, target_dtype, casting=casting):
raise TypeError(
f"Cannot cast array data from {t.dtype} to"
f" {target_dtype} according to the rule '{casting}'"
)
return cast_if_needed(t, target_dtype)
def typecast_tensors(tensors, target_dtype, casting):
return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors)
def _try_convert_to_tensor(obj):
try:
tensor = torch.as_tensor(obj)
except Exception as e:
mesg = f"failed to convert {obj} to ndarray. \nInternal error is: {str(e)}."
raise NotImplementedError(mesg) # noqa: B904
return tensor
def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0):
"""The core logic of the array(...) function.
Parameters
----------
obj : tensor_like
The thing to coerce
dtype : torch.dtype object or None
Coerce to this torch dtype
copy : bool
Copy or not
ndmin : int
The results as least this many dimensions
is_weak : bool
Whether obj is a weakly typed python scalar.
Returns
-------
tensor : torch.Tensor
a tensor object with requested dtype, ndim and copy semantics.
Notes
-----
This is almost a "tensor_like" coercive function. Does not handle wrapper
ndarrays (those should be handled in the ndarray-aware layer prior to
invoking this function).
"""
if isinstance(obj, torch.Tensor):
tensor = obj
else:
# tensor.dtype is the pytorch default, typically float32. If obj's elements
# are not exactly representable in float32, we've lost precision:
# >>> torch.as_tensor(1e12).item() - 1e12
# -4096.0
default_dtype = torch.get_default_dtype()
torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32))
try:
tensor = _try_convert_to_tensor(obj)
finally:
torch.set_default_dtype(default_dtype)
# type cast if requested
tensor = cast_if_needed(tensor, dtype)
# adjust ndim if needed
ndim_extra = ndmin - tensor.ndim
if ndim_extra > 0:
tensor = tensor.view((1,) * ndim_extra + tensor.shape)
# special handling for np._CopyMode
try:
copy = bool(copy)
except ValueError:
# TODO handle _CopyMode.IF_NEEDED correctly
copy = False
# copy if requested
if copy:
tensor = tensor.clone()
return tensor
def ndarrays_to_tensors(*inputs):
"""Convert all ndarrays from `inputs` to tensors. (other things are intact)"""
from ._ndarray import ndarray
if len(inputs) == 0:
return ValueError()
elif len(inputs) == 1:
input_ = inputs[0]
if isinstance(input_, ndarray):
return input_.tensor
elif isinstance(input_, tuple):
result = []
for sub_input in input_:
sub_result = ndarrays_to_tensors(sub_input)
result.append(sub_result)
return tuple(result)
else:
return input_
else:
assert isinstance(inputs, tuple) # sanity check
return ndarrays_to_tensors(inputs)
| UFuncTypeError |
python | numba__numba | numba/core/rewrites/static_getitem.py | {
"start": 1788,
"end": 3624
} | class ____(Rewrite):
"""
Rewrite IR expressions of the kind `getitem(value=arr, index=$XX)`
where `$XX` is a StringLiteral value as
`static_getitem(value=arr, index=<literal value>)`.
"""
def match(self, func_ir, block, typemap, calltypes):
"""
Detect all getitem expressions and find which ones have
string literal indexes
"""
self.getitems = getitems = {}
self.block = block
self.calltypes = calltypes
for expr in block.find_exprs(op='getitem'):
if expr.op == 'getitem':
index_ty = typemap[expr.index.name]
if isinstance(index_ty, types.StringLiteral):
getitems[expr] = (expr.index, index_ty.literal_value)
return len(getitems) > 0
def apply(self):
"""
Rewrite all matching getitems as static_getitems where the index
is the literal value of the string.
"""
new_block = ir.Block(self.block.scope, self.block.loc)
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const, lit_val = self.getitems[expr]
new_expr = ir.Expr.static_getitem(value=expr.value,
index=lit_val,
index_var=expr.index,
loc=expr.loc)
self.calltypes[new_expr] = self.calltypes[expr]
inst = ir.Assign(value=new_expr, target=inst.target,
loc=inst.loc)
new_block.append(inst)
return new_block
@register_rewrite('after-inference')
| RewriteStringLiteralGetitems |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 16042,
"end": 16429
} | class ____(BaseModel):
"""
Schema for updating TaskInstance to a terminal state except SUCCESS state.
"""
model_config = ConfigDict(
extra="forbid",
)
state: TerminalStateNonSuccess
end_date: Annotated[AwareDatetime, Field(title="End Date")]
rendered_map_index: Annotated[str | None, Field(title="Rendered Map Index")] = None
| TITerminalStatePayload |
python | apache__airflow | airflow-core/src/airflow/jobs/triggerer_job_runner.py | {
"start": 9105,
"end": 10397
} | class ____:
log_path: str
ti: RuntimeTI = attrs.field(repr=False)
bound_logger: WrappedLogger = attrs.field(init=False, repr=False)
def __call__(self, processors: Iterable[structlog.typing.Processor]) -> WrappedLogger:
if hasattr(self, "bound_logger"):
return self.bound_logger
from airflow.sdk.log import init_log_file
log_file = init_log_file(self.log_path)
pretty_logs = False
if pretty_logs:
underlying_logger: WrappedLogger = structlog.WriteLogger(log_file.open("w", buffering=1))
else:
underlying_logger = structlog.BytesLogger(log_file.open("wb"))
logger = structlog.wrap_logger(underlying_logger, processors=processors).bind()
self.bound_logger = logger
return logger
def upload_to_remote(self):
from airflow.sdk.log import upload_to_remote
if not hasattr(self, "bound_logger"):
# Never actually called, nothing to do
return
upload_to_remote(self.bound_logger, self.ti)
def in_process_api_server() -> InProcessExecutionAPI:
from airflow.api_fastapi.execution_api.app import InProcessExecutionAPI
api = InProcessExecutionAPI()
return api
@attrs.define(kw_only=True)
| TriggerLoggingFactory |
python | numpy__numpy | benchmarks/benchmarks/bench_shape_base.py | {
"start": 52,
"end": 1938
} | class ____(Benchmark):
params = [1, 10, 100]
param_names = ['size']
def setup(self, n):
self.a_2d = np.ones((2 * n, 2 * n))
self.b_1d = np.ones(2 * n)
self.b_2d = 2 * self.a_2d
self.a = np.ones(3 * n)
self.b = np.ones(3 * n)
self.one_2d = np.ones((1 * n, 3 * n))
self.two_2d = np.ones((1 * n, 3 * n))
self.three_2d = np.ones((1 * n, 6 * n))
self.four_1d = np.ones(6 * n)
self.five_0d = np.ones(1 * n)
self.six_1d = np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero_2d = np.full((2 * n, 6 * n), 0)
self.one = np.ones(3 * n)
self.two = 2 * np.ones((3, 3 * n))
self.three = 3 * np.ones(3 * n)
self.four = 4 * np.ones(3 * n)
self.five = 5 * np.ones(1 * n)
self.six = 6 * np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero = np.full((2 * n, 6 * n), 0)
def time_block_simple_row_wise(self, n):
np.block([self.a_2d, self.b_2d])
def time_block_simple_column_wise(self, n):
np.block([[self.a_2d], [self.b_2d]])
def time_block_complicated(self, n):
np.block([[self.one_2d, self.two_2d],
[self.three_2d],
[self.four_1d],
[self.five_0d, self.six_1d],
[self.zero_2d]])
def time_nested(self, n):
np.block([
[
np.block([
[self.one],
[self.three],
[self.four]
]),
self.two
],
[self.five, self.six],
[self.zero]
])
def time_no_lists(self, n):
np.block(1)
np.block(np.eye(3 * n))
| Block |
python | pallets__flask | src/flask/sessions.py | {
"start": 3177,
"end": 3785
} | class ____(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
raise RuntimeError(
"The session is unavailable because no secret "
"key was set. Set the secret_key on the "
"application to something unique and secret."
)
__setitem__ = __delitem__ = clear = pop = popitem = update = setdefault = _fail # noqa: B950
del _fail
| NullSession |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.