language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dabeaz-course__practical-python | Solutions/8_1/test_stock.py | {
"start": 47,
"end": 714
} | class ____(unittest.TestCase):
def test_create(self):
s = stock.Stock('GOOG', 100, 490.1)
self.assertEqual(s.name, 'GOOG')
self.assertEqual(s.shares, 100)
self.assertEqual(s.price, 490.1)
def test_cost(self):
s = stock.Stock('GOOG', 100, 490.1)
self.assertEqual(s.cost, 49010.0)
def test_sell(self):
s = stock.Stock('GOOG', 100, 490.1)
s.sell(25)
self.assertEqual(s.shares, 75)
def test_shares_check(self):
s = stock.Stock('GOOG', 100, 490.1)
with self.assertRaises(TypeError):
s.shares = '100'
if __name__ == '__main__':
unittest.main()
| TestStock |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 22390,
"end": 22892
} | class ____(ObjectLostError):
"""Indicates that an object fetch timed out.
Args:
object_ref_hex: Hex ID of the object.
"""
def __str__(self):
return (
self._base_str()
+ "\n\n"
+ (
f"Fetch for object {self.object_ref_hex} timed out because no "
"locations were found for the object. This may indicate a "
"system-level bug."
)
)
@DeveloperAPI
| ObjectFetchTimedOutError |
python | apache__airflow | airflow-core/tests/unit/utils/test_operator_helpers.py | {
"start": 940,
"end": 4083
} | class ____:
def setup_method(self):
self.dag_id = "dag_id"
self.task_id = "task_id"
self.try_number = 1
self.logical_date = "2017-05-21T00:00:00"
self.dag_run_id = "dag_run_id"
self.owner = ["owner1", "owner2"]
self.email = ["email1@test.com"]
self.context = {
"dag_run": mock.MagicMock(
name="dag_run",
run_id=self.dag_run_id,
logical_date=datetime.strptime(self.logical_date, "%Y-%m-%dT%H:%M:%S"),
),
"task_instance": mock.MagicMock(
name="task_instance",
task_id=self.task_id,
dag_id=self.dag_id,
try_number=self.try_number,
logical_date=datetime.strptime(self.logical_date, "%Y-%m-%dT%H:%M:%S"),
),
"task": mock.MagicMock(name="task", owner=self.owner, email=self.email),
}
def callable1(ds_nodash):
return (ds_nodash,)
def callable3(ds_nodash, *args, **kwargs):
return (ds_nodash, args, kwargs)
def callable4(ds_nodash, **kwargs):
return (ds_nodash, kwargs)
def callable5(**kwargs):
return (kwargs,)
def callable6(arg1, ds_nodash):
return (arg1, ds_nodash)
def callable7(arg1, **kwargs):
return (arg1, kwargs)
def callable8(arg1, *args, **kwargs):
return (arg1, args, kwargs)
def callable9(*args, **kwargs):
return (args, kwargs)
def callable10(arg1, *, ds_nodash="20200201"):
return (arg1, ds_nodash)
def callable11(*, ds_nodash, **kwargs):
return (
ds_nodash,
kwargs,
)
KWARGS = {
"ds_nodash": "20200101",
}
@pytest.mark.parametrize(
("func", "args", "kwargs", "expected"),
[
(callable1, (), KWARGS, ("20200101",)),
(
callable5,
(),
KWARGS,
(KWARGS,),
),
(callable6, (1,), KWARGS, (1, "20200101")),
(callable7, (1,), KWARGS, (1, KWARGS)),
(callable8, (1, 2), KWARGS, (1, (2,), KWARGS)),
(callable9, (1, 2), KWARGS, ((1, 2), KWARGS)),
(callable10, (1,), KWARGS, (1, "20200101")),
],
)
def test_make_kwargs_callable(func, args, kwargs, expected):
kwargs_callable = operator_helpers.make_kwargs_callable(func)
ret = kwargs_callable(*args, **kwargs)
assert ret == expected
def test_make_kwargs_callable_conflict():
def func(ds_nodash):
pytest.fail(f"Should not reach here: {ds_nodash}")
kwargs_callable = operator_helpers.make_kwargs_callable(func)
args = ["20200101"]
kwargs = {"ds_nodash": "20200101"}
with pytest.raises(ValueError, match="ds_nodash"):
kwargs_callable(*args, **kwargs)
@pytest.mark.parametrize(
("func", "args", "kwargs", "expected"),
[
(callable10, (1, 2), {"ds_nodash": 1}, {"ds_nodash": 1}),
(callable11, (1, 2), {"ds_nodash": 1}, {"ds_nodash": 1}),
],
)
def test_args_and_kwargs_conflicts(func, args, kwargs, expected):
kwargs_result = operator_helpers.determine_kwargs(func, args=args, kwargs=kwargs)
assert expected == kwargs_result
| TestOperatorHelpers |
python | squidfunk__mkdocs-material | material/plugins/optimize/plugin.py | {
"start": 1931,
"end": 14977
} | class ____(BasePlugin[OptimizeConfig]):
supports_multiple_instances = True
# Manifest
manifest: dict[str, str] = {}
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize incremental builds
self.is_serve = False
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
# Initialize thread pool
self.pool = ThreadPoolExecutor(self.config.concurrency)
self.pool_jobs: dict[str, Future] = {}
# Resolve and load manifest
def on_config(self, config):
if not self.config.enabled:
return
# Resolve cache directory (once) - this is necessary, so the cache is
# always relative to the configuration file, and thus project, and not
# relative to the current working directory, or it would not work with
# the projects plugin.
path = os.path.abspath(self.config.cache_dir)
if path != self.config.cache_dir:
self.config.cache_dir = os.path.join(
os.path.dirname(config.config_file_path),
os.path.normpath(self.config.cache_dir)
)
# Ensure cache directory exists
os.makedirs(self.config.cache_dir, exist_ok = True)
# Initialize manifest
self.manifest_file = os.path.join(
self.config.cache_dir, "manifest.json"
)
# Load manifest if it exists and the cache should be used
if os.path.isfile(self.manifest_file) and self.config.cache:
try:
with open(self.manifest_file) as f:
self.manifest = json.load(f)
except:
pass
# Initialize optimization pipeline
def on_env(self, env, *, config, files):
if not self.config.enabled:
return
# Skip if media files should not be optimized
if not self.config.optimize:
return
# Filter all optimizable media files and steal reponsibility from MkDocs
# by removing them from the files collection. Then, start a concurrent
# job that checks if an image was already optimized and can be returned
# from the cache, or optimize it accordingly.
for file in files.media_files():
if self._is_excluded(file):
continue
# Spawn concurrent job to optimize the given image and add future
# to job dictionary, as it returns the file we need to copy later
path = os.path.join(self.config.cache_dir, file.src_path)
self.pool_jobs[file.abs_src_path] = self.pool.submit(
self._optimize_image, file, path, config
)
# Steal responsibility from MkDocs
files.remove(file)
# Finish optimization pipeline
def on_post_build(self, *, config):
if not self.config.enabled:
return
# Skip if media files should not be optimized
if not self.config.optimize:
return
# Reconcile concurrent jobs - we need to wait for all jobs to finish
# before we can copy the optimized files to the output directory. If an
# exception occurred in one of the jobs, we raise it here, so the build
# fails and the author can fix the issue.
for path, future in self.pool_jobs.items():
if future.exception():
raise future.exception()
else:
file: File = future.result()
file.copy_file()
# Save manifest if cache should be used
if self.config.cache:
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# Compute and print gains through optimization
if self.config.print_gain_summary:
print(Style.NORMAL)
print(f" Optimizations:")
# Print summary for file extension
for seek in [".png", ".jpg"]:
size = size_opt = 0
for path, future in self.pool_jobs.items():
file: File = future.result()
# Skip files that are not of the given type
_, extension = os.path.splitext(path)
extension = ".jpg" if extension == ".jpeg" else extension
if extension != seek:
continue
# Compute size before and after optimization
size += os.path.getsize(path)
size_opt += os.path.getsize(file.abs_dest_path)
# Compute absolute and relative gain
if size and size_opt:
gain_abs = size - size_opt
gain_rel = (1 - size_opt / size) * 100
# Print summary for files
print(
f" *{seek} {Fore.GREEN}{_size(size_opt)}"
f"{Fore.WHITE}{Style.DIM} ↓ "
f"{_size(gain_abs)} [{gain_rel:3.1f}%]"
f"{Style.RESET_ALL}"
)
# Reset all styles
print(Style.RESET_ALL)
# Save manifest on shutdown
def on_shutdown(self):
if not self.config.enabled:
return
# Shutdown thread pool - if we're on Python 3.9 and above, cancel all
# pending futures that have not yet been scheduled
if sys.version_info >= (3, 9):
self.pool.shutdown(cancel_futures = True)
else:
self.pool.shutdown()
# Save manifest if cache should be used
if self.manifest and self.config.cache:
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# -------------------------------------------------------------------------
# Check if a file can be optimized
def _is_optimizable(self, file: File):
# Check if PNG images should be optimized
if file.url.endswith((".png")):
return self.config.optimize_png
# Check if JPG images should be optimized
if file.url.endswith((".jpg", ".jpeg")):
return self.config.optimize_jpg
# File can not be optimized by the plugin
return False
# Check if the given file is excluded
def _is_excluded(self, file: File):
if not self._is_optimizable(file):
return True
# Check if file matches one of the inclusion patterns
path = file.src_path
if self.config.optimize_include:
for pattern in self.config.optimize_include:
if fnmatch(file.src_uri, pattern):
return False
# File is not included
log.debug(f"Excluding file '{path}' due to inclusion patterns")
return True
# Check if file matches one of the exclusion patterns
for pattern in self.config.optimize_exclude:
if fnmatch(file.src_uri, pattern):
log.debug(f"Excluding file '{path}' due to exclusion patterns")
return True
# File is not excluded
return False
# Optimize image and write to cache
def _optimize_image(self, file: File, path: str, config: MkDocsConfig):
with open(file.abs_src_path, "rb") as f:
data = f.read()
hash = sha1(data).hexdigest()
# Check if file hash changed, so we need to optimize again
prev = self.manifest.get(file.url, "")
if hash != prev or not os.path.isfile(path):
os.makedirs(os.path.dirname(path), exist_ok = True)
# Optimize PNG image using pngquant
if file.url.endswith((".png")):
self._optimize_image_png(file, path, config)
# Optimize JPG image using pillow
if file.url.endswith((".jpg", ".jpeg")):
self._optimize_image_jpg(file, path, config)
# Compute size before and after optimization
size = len(data)
size_opt = os.path.getsize(path)
# Compute absolute and relative gain
gain_abs = size - size_opt
gain_rel = (1 - size_opt / size) * 100
# Print how much we gained, if we did and desired
gain = ""
if gain_abs and self.config.print_gain:
gain += " ↓ "
gain += " ".join([_size(gain_abs), f"[{gain_rel:3.1f}%]"])
# Print summary for file
log.info(
f"Optimized media file: {file.src_uri} "
f"{Fore.GREEN}{_size(size_opt)}"
f"{Fore.WHITE}{Style.DIM}{gain}"
f"{Style.RESET_ALL}"
)
# Update manifest by associating file with hash
self.manifest[file.url] = hash
# Compute project root
root = os.path.dirname(config.config_file_path)
# Compute source file system path
file.abs_src_path = path
file.src_path = os.path.relpath(path, root)
# Return file to be copied from cache
return file
# Optimize PNG image - we first tried to use libimagequant, but encountered
# the occassional segmentation fault, which means it's probably not a good
# choice. Instead, we just rely on pngquant which seems much more stable.
def _optimize_image_png(self, file: File, path: str, config: MkDocsConfig):
# Check if the required dependencies for optimizing are available, which
# is, at the absolute minimum, the 'pngquant' binary, and raise an error
# to the caller, so he can decide what to do with the error. The caller
# can treat this as a warning or an error to abort the build.
if not which("pngquant"):
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(file.abs_src_path, docs)
raise PluginError(
f"Couldn't optimize image '{path}' in '{docs}': 'pngquant' "
f"not found. Make sure 'pngquant' is installed and in your path"
)
# Build command line arguments
args = ["pngquant",
"--force", "--skip-if-larger",
"--output", path,
"--speed", f"{self.config.optimize_png_speed}"
]
# Add flag to remove optional metadata
if self.config.optimize_png_strip:
args.append("--strip")
# Set input file and run, then check if pngquant actually wrote a file,
# as we instruct it not to if the size of the optimized file is larger.
# This can happen if files are already compressed and optimized by
# the author. In that case, just copy the original file.
subprocess.run([*args, file.abs_src_path])
if not os.path.isfile(path):
utils.copy_file(file.abs_src_path, path)
# Optimize JPG image
def _optimize_image_jpg(self, file: File, path: str, config: MkDocsConfig):
# Check if the required dependencies for optimizing are available, which
# is, at the absolute minimum, the 'pillow' package, and raise an error
# to the caller, so he can decide what to do with the error. The caller
# can treat this as a warning or an error to abort the build.
if not _supports("Image"):
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(file.abs_src_path, docs)
raise PluginError(
f"Couldn't optimize image '{path}' in '{docs}': install "
f"required dependencies – pip install 'mkdocs-material[imaging]'"
)
# Open and save optimized image
image = Image.open(file.abs_src_path)
image.save(path, "jpeg",
quality = self.config.optimize_jpg_quality,
progressive = self.config.optimize_jpg_progressive
)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
# Check for presence of optional imports
@functools.lru_cache(maxsize = None)
def _supports(name: str):
return name in globals()
# -----------------------------------------------------------------------------
# Print human-readable size
def _size(value):
for unit in ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(value) < 1000.0:
return f"{value:3.1f} {unit}"
value /= 1000.0
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.optimize")
| OptimizePlugin |
python | pydantic__pydantic | pydantic/mypy.py | {
"start": 15625,
"end": 50001
} | class ____:
"""Transform the BaseModel subclass according to the plugin settings.
Attributes:
tracked_config_fields: A set of field configs that the plugin has to track their value.
"""
tracked_config_fields: set[str] = {
'extra',
'frozen',
'from_attributes',
'populate_by_name',
'validate_by_alias',
'validate_by_name',
'alias_generator',
'strict',
}
def __init__(
self,
cls: ClassDef,
reason: Expression | Statement,
api: SemanticAnalyzerPluginInterface,
plugin_config: PydanticPluginConfig,
) -> None:
self._cls = cls
self._reason = reason
self._api = api
self.plugin_config = plugin_config
def transform(self) -> bool:
"""Configures the BaseModel subclass according to the plugin settings.
In particular:
* determines the model config and fields,
* adds a fields-aware signature for the initializer and construct methods
* freezes the class if frozen = True
* stores the fields, config, and if the class is settings in the mypy metadata for access by subclasses
"""
info = self._cls.info
is_a_root_model = is_root_model(info)
config = self.collect_config()
fields, class_vars = self.collect_fields_and_class_vars(config, is_a_root_model)
if fields is None or class_vars is None:
# Some definitions are not ready. We need another pass.
return False
for field in fields:
if field.type is None:
return False
is_settings = info.has_base(BASESETTINGS_FULLNAME)
self.add_initializer(fields, config, is_settings, is_a_root_model)
self.add_model_construct_method(fields, config, is_settings, is_a_root_model)
self.set_frozen(fields, self._api, frozen=config.frozen is True)
self.adjust_decorator_signatures()
info.metadata[METADATA_KEY] = {
'fields': {field.name: field.serialize() for field in fields},
'class_vars': {class_var.name: class_var.serialize() for class_var in class_vars},
'config': config.get_values_dict(),
}
return True
def adjust_decorator_signatures(self) -> None:
"""When we decorate a function `f` with `pydantic.validator(...)`, `pydantic.field_validator`
or `pydantic.serializer(...)`, mypy sees `f` as a regular method taking a `self` instance,
even though pydantic internally wraps `f` with `classmethod` if necessary.
Teach mypy this by marking any function whose outermost decorator is a `validator()`,
`field_validator()` or `serializer()` call as a `classmethod`.
"""
for sym in self._cls.info.names.values():
if isinstance(sym.node, Decorator):
first_dec = sym.node.original_decorators[0]
if (
isinstance(first_dec, CallExpr)
and isinstance(first_dec.callee, NameExpr)
and first_dec.callee.fullname in IMPLICIT_CLASSMETHOD_DECORATOR_FULLNAMES
# @model_validator(mode="after") is an exception, it expects a regular method
and not (
first_dec.callee.fullname == MODEL_VALIDATOR_FULLNAME
and any(
first_dec.arg_names[i] == 'mode' and isinstance(arg, StrExpr) and arg.value == 'after'
for i, arg in enumerate(first_dec.args)
)
)
):
# TODO: Only do this if the first argument of the decorated function is `cls`
sym.node.func.is_class = True
def collect_config(self) -> ModelConfigData: # noqa: C901 (ignore complexity)
"""Collects the values of the config attributes that are used by the plugin, accounting for parent classes."""
cls = self._cls
config = ModelConfigData()
has_config_kwargs = False
has_config_from_namespace = False
# Handle `class MyModel(BaseModel, <name>=<expr>, ...):`
for name, expr in cls.keywords.items():
config_data = self.get_config_update(name, expr)
if config_data:
has_config_kwargs = True
config.update(config_data)
# Handle `model_config`
stmt: Statement | None = None
for stmt in cls.defs.body:
if not isinstance(stmt, (AssignmentStmt, ClassDef)):
continue
if isinstance(stmt, AssignmentStmt):
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr) or lhs.name != 'model_config':
continue
if isinstance(stmt.rvalue, CallExpr): # calls to `dict` or `ConfigDict`
for arg_name, arg in zip(stmt.rvalue.arg_names, stmt.rvalue.args):
if arg_name is None:
continue
config.update(self.get_config_update(arg_name, arg, lax_extra=True))
elif isinstance(stmt.rvalue, DictExpr): # dict literals
for key_expr, value_expr in stmt.rvalue.items:
if not isinstance(key_expr, StrExpr):
continue
config.update(self.get_config_update(key_expr.value, value_expr))
elif isinstance(stmt, ClassDef):
if stmt.name != 'Config': # 'deprecated' Config-class
continue
for substmt in stmt.defs.body:
if not isinstance(substmt, AssignmentStmt):
continue
lhs = substmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
config.update(self.get_config_update(lhs.name, substmt.rvalue))
if has_config_kwargs:
self._api.fail(
'Specifying config in two places is ambiguous, use either Config attribute or class kwargs',
cls,
)
break
has_config_from_namespace = True
if has_config_kwargs or has_config_from_namespace:
if (
stmt
and config.has_alias_generator
and not (config.validate_by_name or config.populate_by_name)
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(self._api, stmt)
for info in cls.info.mro[1:]: # 0 is the current class
if METADATA_KEY not in info.metadata:
continue
# Each class depends on the set of fields in its ancestors
self._api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
for name, value in info.metadata[METADATA_KEY]['config'].items():
config.setdefault(name, value)
return config
def collect_fields_and_class_vars(
self, model_config: ModelConfigData, is_root_model: bool
) -> tuple[list[PydanticModelField] | None, list[PydanticModelClassVar] | None]:
"""Collects the fields for the model, accounting for parent classes."""
cls = self._cls
# First, collect fields and ClassVars belonging to any class in the MRO, ignoring duplicates.
#
# We iterate through the MRO in reverse because attrs defined in the parent must appear
# earlier in the attributes list than attrs defined in the child. See:
# https://docs.python.org/3/library/dataclasses.html#inheritance
#
# However, we also want fields defined in the subtype to override ones defined
# in the parent. We can implement this via a dict without disrupting the attr order
# because dicts preserve insertion order in Python 3.7+.
found_fields: dict[str, PydanticModelField] = {}
found_class_vars: dict[str, PydanticModelClassVar] = {}
for info in reversed(cls.info.mro[1:-1]): # 0 is the current class, -2 is BaseModel, -1 is object
# if BASEMODEL_METADATA_TAG_KEY in info.metadata and BASEMODEL_METADATA_KEY not in info.metadata:
# # We haven't processed the base class yet. Need another pass.
# return None, None
if METADATA_KEY not in info.metadata:
continue
# Each class depends on the set of attributes in its dataclass ancestors.
self._api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
for name, data in info.metadata[METADATA_KEY]['fields'].items():
field = PydanticModelField.deserialize(info, data, self._api)
# (The following comment comes directly from the dataclasses plugin)
# TODO: We shouldn't be performing type operations during the main
# semantic analysis pass, since some TypeInfo attributes might
# still be in flux. This should be performed in a later phase.
field.expand_typevar_from_subtype(cls.info, self._api)
found_fields[name] = field
sym_node = cls.info.names.get(name)
if sym_node and sym_node.node and not isinstance(sym_node.node, Var):
self._api.fail(
'BaseModel field may only be overridden by another field',
sym_node.node,
)
# Collect ClassVars
for name, data in info.metadata[METADATA_KEY]['class_vars'].items():
found_class_vars[name] = PydanticModelClassVar.deserialize(data)
# Second, collect fields and ClassVars belonging to the current class.
current_field_names: set[str] = set()
current_class_vars_names: set[str] = set()
for stmt in self._get_assignment_statements_from_block(cls.defs):
maybe_field = self.collect_field_or_class_var_from_stmt(stmt, model_config, found_class_vars)
if maybe_field is None:
continue
lhs = stmt.lvalues[0]
assert isinstance(lhs, NameExpr) # collect_field_or_class_var_from_stmt guarantees this
if isinstance(maybe_field, PydanticModelField):
if is_root_model and lhs.name != 'root':
error_extra_fields_on_root_model(self._api, stmt)
else:
current_field_names.add(lhs.name)
found_fields[lhs.name] = maybe_field
elif isinstance(maybe_field, PydanticModelClassVar):
current_class_vars_names.add(lhs.name)
found_class_vars[lhs.name] = maybe_field
return list(found_fields.values()), list(found_class_vars.values())
def _get_assignment_statements_from_if_statement(self, stmt: IfStmt) -> Iterator[AssignmentStmt]:
for body in stmt.body:
if not body.is_unreachable:
yield from self._get_assignment_statements_from_block(body)
if stmt.else_body is not None and not stmt.else_body.is_unreachable:
yield from self._get_assignment_statements_from_block(stmt.else_body)
def _get_assignment_statements_from_block(self, block: Block) -> Iterator[AssignmentStmt]:
for stmt in block.body:
if isinstance(stmt, AssignmentStmt):
yield stmt
elif isinstance(stmt, IfStmt):
yield from self._get_assignment_statements_from_if_statement(stmt)
def collect_field_or_class_var_from_stmt( # noqa C901
self, stmt: AssignmentStmt, model_config: ModelConfigData, class_vars: dict[str, PydanticModelClassVar]
) -> PydanticModelField | PydanticModelClassVar | None:
"""Get pydantic model field from statement.
Args:
stmt: The statement.
model_config: Configuration settings for the model.
class_vars: ClassVars already known to be defined on the model.
Returns:
A pydantic model field if it could find the field in statement. Otherwise, `None`.
"""
cls = self._cls
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr) or not _fields.is_valid_field_name(lhs.name) or lhs.name == 'model_config':
return None
if not stmt.new_syntax:
if (
isinstance(stmt.rvalue, CallExpr)
and isinstance(stmt.rvalue.callee, CallExpr)
and isinstance(stmt.rvalue.callee.callee, NameExpr)
and stmt.rvalue.callee.callee.fullname in DECORATOR_FULLNAMES
):
# This is a (possibly-reused) validator or serializer, not a field
# In particular, it looks something like: my_validator = validator('my_field')(f)
# Eventually, we may want to attempt to respect model_config['ignored_types']
return None
if lhs.name in class_vars:
# Class vars are not fields and are not required to be annotated
return None
# The assignment does not have an annotation, and it's not anything else we recognize
error_untyped_fields(self._api, stmt)
return None
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
return None
if not _fields.is_valid_field_name(lhs.name) or lhs.name == 'model_config':
return None
sym = cls.info.names.get(lhs.name)
if sym is None: # pragma: no cover
# This is likely due to a star import (see the dataclasses plugin for a more detailed explanation)
# This is the same logic used in the dataclasses plugin
return None
node = sym.node
if isinstance(node, PlaceholderNode): # pragma: no cover
# See the PlaceholderNode docstring for more detail about how this can occur
# Basically, it is an edge case when dealing with complex import logic
# The dataclasses plugin now asserts this cannot happen, but I'd rather not error if it does..
return None
if isinstance(node, TypeAlias):
self._api.fail(
'Type aliases inside BaseModel definitions are not supported at runtime',
node,
)
# Skip processing this node. This doesn't match the runtime behaviour,
# but the only alternative would be to modify the SymbolTable,
# and it's a little hairy to do that in a plugin.
return None
if not isinstance(node, Var): # pragma: no cover
# Don't know if this edge case still happens with the `is_valid_field` check above
# but better safe than sorry
# The dataclasses plugin now asserts this cannot happen, but I'd rather not error if it does..
return None
# x: ClassVar[int] is not a field
if node.is_classvar:
return PydanticModelClassVar(lhs.name)
# x: InitVar[int] is not supported in BaseModel
node_type = get_proper_type(node.type)
if isinstance(node_type, Instance) and node_type.type.fullname == 'dataclasses.InitVar':
self._api.fail(
'InitVar is not supported in BaseModel',
node,
)
has_default = self.get_has_default(stmt)
strict = self.get_strict(stmt)
if sym.type is None and node.is_final and node.is_inferred:
# This follows the logic from the dataclasses plugin. The following comment is taken verbatim:
#
# This is a special case, assignment like x: Final = 42 is classified
# annotated above, but mypy strips the `Final` turning it into x = 42.
# We do not support inferred types in dataclasses, so we can try inferring
# type for simple literals, and otherwise require an explicit type
# argument for Final[...].
typ = self._api.analyze_simple_literal_type(stmt.rvalue, is_final=True)
if typ:
node.type = typ
else:
self._api.fail(
'Need type argument for Final[...] with non-literal default in BaseModel',
stmt,
)
node.type = AnyType(TypeOfAny.from_error)
if node.is_final and has_default:
# TODO this path should be removed (see https://github.com/pydantic/pydantic/issues/11119)
return PydanticModelClassVar(lhs.name)
alias, has_dynamic_alias = self.get_alias_info(stmt)
if (
has_dynamic_alias
and not (model_config.validate_by_name or model_config.populate_by_name)
and self.plugin_config.warn_required_dynamic_aliases
):
error_required_dynamic_aliases(self._api, stmt)
is_frozen = self.is_field_frozen(stmt)
init_type = self._infer_dataclass_attr_init_type(sym, lhs.name, stmt)
return PydanticModelField(
name=lhs.name,
has_dynamic_alias=has_dynamic_alias,
has_default=has_default,
strict=strict,
alias=alias,
is_frozen=is_frozen,
line=stmt.line,
column=stmt.column,
type=init_type,
info=cls.info,
)
def _infer_dataclass_attr_init_type(self, sym: SymbolTableNode, name: str, context: Context) -> Type | None:
"""Infer __init__ argument type for an attribute.
In particular, possibly use the signature of __set__.
"""
default = sym.type
if sym.implicit:
return default
t = get_proper_type(sym.type)
# Perform a simple-minded inference from the signature of __set__, if present.
# We can't use mypy.checkmember here, since this plugin runs before type checking.
# We only support some basic scanerios here, which is hopefully sufficient for
# the vast majority of use cases.
if not isinstance(t, Instance):
return default
setter = t.type.get('__set__')
if setter:
if isinstance(setter.node, FuncDef):
super_info = t.type.get_containing_type_info('__set__')
assert super_info
if setter.type:
setter_type = get_proper_type(map_type_from_supertype(setter.type, t.type, super_info))
else:
return AnyType(TypeOfAny.unannotated)
if isinstance(setter_type, CallableType) and setter_type.arg_kinds == [
ARG_POS,
ARG_POS,
ARG_POS,
]:
return expand_type_by_instance(setter_type.arg_types[2], t)
else:
self._api.fail(f'Unsupported signature for "__set__" in "{t.type.name}"', context)
else:
self._api.fail(f'Unsupported "__set__" in "{t.type.name}"', context)
return default
def add_initializer(
self, fields: list[PydanticModelField], config: ModelConfigData, is_settings: bool, is_root_model: bool
) -> None:
"""Adds a fields-aware `__init__` method to the class.
The added `__init__` will be annotated with types vs. all `Any` depending on the plugin settings.
"""
if '__init__' in self._cls.info.names and not self._cls.info.names['__init__'].plugin_generated:
return # Don't generate an __init__ if one already exists
typed = self.plugin_config.init_typed
model_strict = bool(config.strict)
use_alias = not (config.validate_by_name or config.populate_by_name) and config.validate_by_alias is not False
requires_dynamic_aliases = bool(config.has_alias_generator and not config.validate_by_name)
args = self.get_field_arguments(
fields,
typed=typed,
model_strict=model_strict,
requires_dynamic_aliases=requires_dynamic_aliases,
use_alias=use_alias,
is_settings=is_settings,
is_root_model=is_root_model,
force_typevars_invariant=True,
)
if is_settings:
base_settings_node = self._api.lookup_fully_qualified(BASESETTINGS_FULLNAME).node
assert isinstance(base_settings_node, TypeInfo)
if '__init__' in base_settings_node.names:
base_settings_init_node = base_settings_node.names['__init__'].node
assert isinstance(base_settings_init_node, FuncDef)
if base_settings_init_node is not None and base_settings_init_node.type is not None:
func_type = base_settings_init_node.type
assert isinstance(func_type, CallableType)
for arg_idx, arg_name in enumerate(func_type.arg_names):
if arg_name is None or arg_name.startswith('__') or not arg_name.startswith('_'):
continue
analyzed_variable_type = self._api.anal_type(func_type.arg_types[arg_idx])
if analyzed_variable_type is not None and arg_name == '_cli_settings_source':
# _cli_settings_source is defined as CliSettingsSource[Any], and as such
# the Any causes issues with --disallow-any-explicit. As a workaround, change
# the Any type (as if CliSettingsSource was left unparameterized):
analyzed_variable_type = analyzed_variable_type.accept(
ChangeExplicitTypeOfAny(TypeOfAny.from_omitted_generics)
)
variable = Var(arg_name, analyzed_variable_type)
args.append(Argument(variable, analyzed_variable_type, None, ARG_OPT))
if not self.should_init_forbid_extra(fields, config):
var = Var('kwargs')
args.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2))
add_method(self._api, self._cls, '__init__', args=args, return_type=NoneType())
def add_model_construct_method(
self,
fields: list[PydanticModelField],
config: ModelConfigData,
is_settings: bool,
is_root_model: bool,
) -> None:
"""Adds a fully typed `model_construct` classmethod to the class.
Similar to the fields-aware __init__ method, but always uses the field names (not aliases),
and does not treat settings fields as optional.
"""
set_str = self._api.named_type(f'{BUILTINS_NAME}.set', [self._api.named_type(f'{BUILTINS_NAME}.str')])
optional_set_str = UnionType([set_str, NoneType()])
fields_set_argument = Argument(Var('_fields_set', optional_set_str), optional_set_str, None, ARG_OPT)
with state.strict_optional_set(self._api.options.strict_optional):
args = self.get_field_arguments(
fields,
typed=True,
model_strict=bool(config.strict),
requires_dynamic_aliases=False,
use_alias=False,
is_settings=is_settings,
is_root_model=is_root_model,
)
if not self.should_init_forbid_extra(fields, config):
var = Var('kwargs')
args.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2))
args = args + [fields_set_argument] if is_root_model else [fields_set_argument] + args
add_method(
self._api,
self._cls,
'model_construct',
args=args,
return_type=fill_typevars(self._cls.info),
is_classmethod=True,
)
def set_frozen(self, fields: list[PydanticModelField], api: SemanticAnalyzerPluginInterface, frozen: bool) -> None:
"""Marks all fields as properties so that attempts to set them trigger mypy errors.
This is the same approach used by the attrs and dataclasses plugins.
"""
info = self._cls.info
for field in fields:
sym_node = info.names.get(field.name)
if sym_node is not None:
var = sym_node.node
if isinstance(var, Var):
var.is_property = frozen or field.is_frozen
elif isinstance(var, PlaceholderNode) and not self._api.final_iteration:
# See https://github.com/pydantic/pydantic/issues/5191 to hit this branch for test coverage
self._api.defer()
# `var` can also be a FuncDef or Decorator node (e.g. when overriding a field with a function or property).
# In that case, we don't want to do anything. Mypy will already raise an error that a field was not properly
# overridden.
else:
var = field.to_var(info, api, use_alias=False)
var.info = info
var.is_property = frozen
var._fullname = info.fullname + '.' + var.name
info.names[var.name] = SymbolTableNode(MDEF, var)
def get_config_update(self, name: str, arg: Expression, lax_extra: bool = False) -> ModelConfigData | None:
"""Determines the config update due to a single kwarg in the ConfigDict definition.
Warns if a tracked config attribute is set to a value the plugin doesn't know how to interpret (e.g., an int)
"""
if name not in self.tracked_config_fields:
return None
if name == 'extra':
if isinstance(arg, StrExpr):
forbid_extra = arg.value == 'forbid'
elif isinstance(arg, MemberExpr):
forbid_extra = arg.name == 'forbid'
else:
if not lax_extra:
# Only emit an error for other types of `arg` (e.g., `NameExpr`, `ConditionalExpr`, etc.) when
# reading from a config class, etc. If a ConfigDict is used, then we don't want to emit an error
# because you'll get type checking from the ConfigDict itself.
#
# It would be nice if we could introspect the types better otherwise, but I don't know what the API
# is to evaluate an expr into its type and then check if that type is compatible with the expected
# type. Note that you can still get proper type checking via: `model_config = ConfigDict(...)`, just
# if you don't use an explicit string, the plugin won't be able to infer whether extra is forbidden.
error_invalid_config_value(name, self._api, arg)
return None
return ModelConfigData(forbid_extra=forbid_extra)
if name == 'alias_generator':
has_alias_generator = True
if isinstance(arg, NameExpr) and arg.fullname == 'builtins.None':
has_alias_generator = False
return ModelConfigData(has_alias_generator=has_alias_generator)
if isinstance(arg, NameExpr) and arg.fullname in ('builtins.True', 'builtins.False'):
return ModelConfigData(**{name: arg.fullname == 'builtins.True'})
error_invalid_config_value(name, self._api, arg)
return None
@staticmethod
def get_has_default(stmt: AssignmentStmt) -> bool:
"""Returns a boolean indicating whether the field defined in `stmt` is a required field."""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only, so has no default
return False
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME:
# The "default value" is a call to `Field`; at this point, the field has a default if and only if:
# * there is a positional argument that is not `...`
# * there is a keyword argument named "default" that is not `...`
# * there is a "default_factory" that is not `None`
for arg, name in zip(expr.args, expr.arg_names):
# If name is None, then this arg is the default because it is the only positional argument.
if name is None or name == 'default':
return arg.__class__ is not EllipsisExpr
if name == 'default_factory':
return not (isinstance(arg, NameExpr) and arg.fullname == 'builtins.None')
return False
# Has no default if the "default value" is Ellipsis (i.e., `field_name: Annotation = ...`)
return not isinstance(expr, EllipsisExpr)
@staticmethod
def get_strict(stmt: AssignmentStmt) -> bool | None:
"""Returns a the `strict` value of a field if defined, otherwise `None`."""
expr = stmt.rvalue
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME:
for arg, name in zip(expr.args, expr.arg_names):
if name != 'strict':
continue
if isinstance(arg, NameExpr):
if arg.fullname == 'builtins.True':
return True
elif arg.fullname == 'builtins.False':
return False
return None
return None
@staticmethod
def get_alias_info(stmt: AssignmentStmt) -> tuple[str | None, bool]:
"""Returns a pair (alias, has_dynamic_alias), extracted from the declaration of the field defined in `stmt`.
`has_dynamic_alias` is True if and only if an alias is provided, but not as a string literal.
If `has_dynamic_alias` is True, `alias` will be None.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only
return None, False
if not (
isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME
):
# Assigned value is not a call to pydantic.fields.Field
return None, False
if 'validation_alias' in expr.arg_names:
arg = expr.args[expr.arg_names.index('validation_alias')]
elif 'alias' in expr.arg_names:
arg = expr.args[expr.arg_names.index('alias')]
else:
return None, False
if isinstance(arg, StrExpr):
return arg.value, False
else:
return None, True
@staticmethod
def is_field_frozen(stmt: AssignmentStmt) -> bool:
"""Returns whether the field is frozen, extracted from the declaration of the field defined in `stmt`.
Note that this is only whether the field was declared to be frozen in a `<field_name> = Field(frozen=True)`
sense; this does not determine whether the field is frozen because the entire model is frozen; that is
handled separately.
"""
expr = stmt.rvalue
if isinstance(expr, TempNode):
# TempNode means annotation-only
return False
if not (
isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME
):
# Assigned value is not a call to pydantic.fields.Field
return False
for i, arg_name in enumerate(expr.arg_names):
if arg_name == 'frozen':
arg = expr.args[i]
return isinstance(arg, NameExpr) and arg.fullname == 'builtins.True'
return False
def get_field_arguments(
self,
fields: list[PydanticModelField],
typed: bool,
model_strict: bool,
use_alias: bool,
requires_dynamic_aliases: bool,
is_settings: bool,
is_root_model: bool,
force_typevars_invariant: bool = False,
) -> list[Argument]:
"""Helper function used during the construction of the `__init__` and `model_construct` method signatures.
Returns a list of mypy Argument instances for use in the generated signatures.
"""
info = self._cls.info
arguments = [
field.to_argument(
info,
typed=typed,
model_strict=model_strict,
force_optional=requires_dynamic_aliases or is_settings,
use_alias=use_alias,
api=self._api,
force_typevars_invariant=force_typevars_invariant,
is_root_model_root=is_root_model and field.name == 'root',
)
for field in fields
if not (use_alias and field.has_dynamic_alias)
]
return arguments
def should_init_forbid_extra(self, fields: list[PydanticModelField], config: ModelConfigData) -> bool:
"""Indicates whether the generated `__init__` should get a `**kwargs` at the end of its signature.
We disallow arbitrary kwargs if the extra config setting is "forbid", or if the plugin config says to,
*unless* a required dynamic alias is present (since then we can't determine a valid signature).
"""
if not (config.validate_by_name or config.populate_by_name):
if self.is_dynamic_alias_present(fields, bool(config.has_alias_generator)):
return False
if config.forbid_extra:
return True
return self.plugin_config.init_forbid_extra
@staticmethod
def is_dynamic_alias_present(fields: list[PydanticModelField], has_alias_generator: bool) -> bool:
"""Returns whether any fields on the model have a "dynamic alias", i.e., an alias that cannot be
determined during static analysis.
"""
for field in fields:
if field.has_dynamic_alias:
return True
if has_alias_generator:
for field in fields:
if field.alias is None:
return True
return False
| PydanticModelTransformer |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 47290,
"end": 50413
} | class ____(Response):
"""
Response of dataviews.archive_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "dataviews"
_action = "archive_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"archived": {
"description": "Indicates whether the dataview was archived",
"type": "boolean",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, succeeded=None, failed=None, **kwargs):
super(ArchiveManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self):
return self._property_succeeded
@succeeded.setter
def succeeded(self, value):
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self):
return self._property_failed
@failed.setter
def failed(self, value):
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
| ArchiveManyResponse |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 3561,
"end": 8597
} | class ____(BaseModelWithConfig):
# Ref: JSON Schema 2020-12: https://json-schema.org/draft/2020-12/json-schema-core.html#name-the-json-schema-core-vocabu
# Core Vocabulary
schema_: Optional[str] = Field(default=None, alias="$schema")
vocabulary: Optional[str] = Field(default=None, alias="$vocabulary")
id: Optional[str] = Field(default=None, alias="$id")
anchor: Optional[str] = Field(default=None, alias="$anchor")
dynamicAnchor: Optional[str] = Field(default=None, alias="$dynamicAnchor")
ref: Optional[str] = Field(default=None, alias="$ref")
dynamicRef: Optional[str] = Field(default=None, alias="$dynamicRef")
defs: Optional[Dict[str, "SchemaOrBool"]] = Field(default=None, alias="$defs")
comment: Optional[str] = Field(default=None, alias="$comment")
# Ref: JSON Schema 2020-12: https://json-schema.org/draft/2020-12/json-schema-core.html#name-a-vocabulary-for-applying-s
# A Vocabulary for Applying Subschemas
allOf: Optional[List["SchemaOrBool"]] = None
anyOf: Optional[List["SchemaOrBool"]] = None
oneOf: Optional[List["SchemaOrBool"]] = None
not_: Optional["SchemaOrBool"] = Field(default=None, alias="not")
if_: Optional["SchemaOrBool"] = Field(default=None, alias="if")
then: Optional["SchemaOrBool"] = None
else_: Optional["SchemaOrBool"] = Field(default=None, alias="else")
dependentSchemas: Optional[Dict[str, "SchemaOrBool"]] = None
prefixItems: Optional[List["SchemaOrBool"]] = None
# TODO: uncomment and remove below when deprecating Pydantic v1
# It generates a list of schemas for tuples, before prefixItems was available
# items: Optional["SchemaOrBool"] = None
items: Optional[Union["SchemaOrBool", List["SchemaOrBool"]]] = None
contains: Optional["SchemaOrBool"] = None
properties: Optional[Dict[str, "SchemaOrBool"]] = None
patternProperties: Optional[Dict[str, "SchemaOrBool"]] = None
additionalProperties: Optional["SchemaOrBool"] = None
propertyNames: Optional["SchemaOrBool"] = None
unevaluatedItems: Optional["SchemaOrBool"] = None
unevaluatedProperties: Optional["SchemaOrBool"] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-structural
# A Vocabulary for Structural Validation
type: Optional[Union[SchemaType, List[SchemaType]]] = None
enum: Optional[List[Any]] = None
const: Optional[Any] = None
multipleOf: Optional[float] = Field(default=None, gt=0)
maximum: Optional[float] = None
exclusiveMaximum: Optional[float] = None
minimum: Optional[float] = None
exclusiveMinimum: Optional[float] = None
maxLength: Optional[int] = Field(default=None, ge=0)
minLength: Optional[int] = Field(default=None, ge=0)
pattern: Optional[str] = None
maxItems: Optional[int] = Field(default=None, ge=0)
minItems: Optional[int] = Field(default=None, ge=0)
uniqueItems: Optional[bool] = None
maxContains: Optional[int] = Field(default=None, ge=0)
minContains: Optional[int] = Field(default=None, ge=0)
maxProperties: Optional[int] = Field(default=None, ge=0)
minProperties: Optional[int] = Field(default=None, ge=0)
required: Optional[List[str]] = None
dependentRequired: Optional[Dict[str, Set[str]]] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-vocabularies-for-semantic-c
# Vocabularies for Semantic Content With "format"
format: Optional[str] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-the-conten
# A Vocabulary for the Contents of String-Encoded Data
contentEncoding: Optional[str] = None
contentMediaType: Optional[str] = None
contentSchema: Optional["SchemaOrBool"] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-basic-meta
# A Vocabulary for Basic Meta-Data Annotations
title: Optional[str] = None
description: Optional[str] = None
default: Optional[Any] = None
deprecated: Optional[bool] = None
readOnly: Optional[bool] = None
writeOnly: Optional[bool] = None
examples: Optional[List[Any]] = None
# Ref: OpenAPI 3.1.0: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#schema-object
# Schema Object
discriminator: Optional[Discriminator] = None
xml: Optional[XML] = None
externalDocs: Optional[ExternalDocumentation] = None
example: Annotated[
Optional[Any],
typing_deprecated(
"Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
"although still supported. Use examples instead."
),
] = None
# Ref: https://json-schema.org/draft/2020-12/json-schema-core.html#name-json-schema-documents
# A JSON Schema MUST be an object or a boolean.
SchemaOrBool = Union[Schema, bool]
| Schema |
python | pytorch__pytorch | test/inductor/test_block_analysis.py | {
"start": 579,
"end": 4528
} | class ____(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Create a GraphLowering, so we can access V.graph.
cls.graph = dummy_graph()
@parametrize(
"stride,symbol,expr",
[
(5, x, Identity(5 * x)),
(4, y, 4 * Identity(y)),
(3, x, Identity(3) * x),
],
)
def test_affine_identity(self, stride: int, symbol: sympy.Symbol, expr: sympy.Expr):
# Test that we can handle an identity expression in affine indexing.
matched_stride = BlockPatternMatcher.match_affine_block_expr(expr, symbol)
self.assertEqual(matched_stride, stride)
@parametrize(
"dims,strides,symbol,expr",
[
(
(2, 4),
(4, 1),
x,
4 * FloorDiv(Identity(x), 4) + ModularIndexing(x, 1, 4),
),
(
(3, 9),
(5, 2),
x,
5 * FloorDiv(x, 9) + 2 * ModularIndexing(Identity(x), 1, 9),
),
((2, 7), (1, 1), x, Identity(FloorDiv(x, 7) + ModularIndexing(x, 1, 7))),
],
)
def test_mod_div_identity(
self,
dims: tuple[int],
strides: tuple[int],
symbol: sympy.Symbol,
expr: sympy.Expr,
):
# Test that we can handle an identity expression in modular indexing.
numel = int(torch.prod(torch.Tensor(dims)))
num_dims = len(dims)
with V.set_graph_handler(self.graph):
match_result = BlockPatternMatcher.match_mod_div_block_expr(
expr, symbol, numel, num_dims
)
# Check the matched block dimensions.
self.assertNotEqual(match_result, None)
matched_dims, matched_strides, matched_block_index_exprs = match_result
self.assertEqual(matched_dims, dims)
self.assertEqual(matched_strides, strides)
@parametrize(
"symbol,expr,subexpr",
[
(x, Identity(x), x),
(x, Identity(x + 5), x),
(y, Identity(x + 2 * y) + 5, 2 * y),
],
)
def test_subexpr_identity(
self,
symbol: sympy.Symbol,
expr: sympy.Expr,
subexpr: sympy.Expr,
):
matched_subexpr = BlockPatternMatcher.get_subexpr_involving_symbol(expr, symbol)
self.assertEqual(matched_subexpr, subexpr)
def test_index_with_dynamic_shapes(self):
s0 = sympy.var("s0", integer=True)
s1 = sympy.var("s1", integer=True)
dims = [s1, sympy.Integer(3)]
num_dims = len(dims)
numel = dims[0] * dims[1]
strides = [sympy.Integer(3) * s0, sympy.Integer(1)]
block_index_exprs = [
FloorDiv(y, sympy.Integer(3)),
ModularIndexing(y, sympy.Integer(1), sympy.Integer(3)),
]
index = sympy_dot(strides, block_index_exprs)
with V.set_graph_handler(self.graph):
match = BlockPatternMatcher.match_mod_div_block_expr(
index, y, numel, num_dims
)
sizevars = V.graph.sizevars
for expected, actual in zip((dims, strides, block_index_exprs), match):
assert isinstance(expected, (list, tuple)) and isinstance(
actual, (list, tuple)
)
for expected_expr, actual_expr in zip(expected, actual):
assert isinstance(expected_expr, sympy.Expr) and isinstance(
actual_expr, sympy.Expr
)
self.assertTrue(
sizevars.statically_known_equals(
sizevars.remove_precomputed_replacements(expected_expr),
sizevars.remove_precomputed_replacements(actual_expr),
)
)
if __name__ == "__main__":
run_tests()
| BlockAnalysisTest |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 44621,
"end": 44771
} | class ____(models.Model):
target = models.OneToOneField(OneToOneTargetTestModel, primary_key=True, on_delete=models.CASCADE)
| OneToOneSourceTestModel |
python | mlflow__mlflow | dev/clint/src/clint/comments.py | {
"start": 309,
"end": 1234
} | class ____:
start: "Position"
end: "Position"
rules: set[str]
@classmethod
def from_token(cls, token: tokenize.TokenInfo) -> Self | None:
# Import here to avoid circular dependency
from clint.linter import Position
if match := NOQA_REGEX.match(token.string):
rules = set(match.group(1).upper().split(","))
start = Position(token.start[0], token.start[1])
end = Position(token.end[0], token.end[1])
return cls(start=start, end=end, rules=rules)
return None
def iter_comments(code: str) -> Iterator[tokenize.TokenInfo]:
readline = io.StringIO(code).readline
try:
tokens = tokenize.generate_tokens(readline)
for token in tokens:
if token.type == tokenize.COMMENT:
yield token
except tokenize.TokenError:
# Handle incomplete tokens at end of file
pass
| Noqa |
python | wandb__wandb | wandb/vendor/pygments/lexers/rebol.py | {
"start": 10519,
"end": 18617
} | class ____(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
| RedLexer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 729392,
"end": 729838
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "discussion")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
discussion = sgqlc.types.Field("Discussion", graphql_name="discussion")
| ConvertedToDiscussionEvent |
python | scipy__scipy | scipy/optimize/_trustregion_ncg.py | {
"start": 1431,
"end": 4580
} | class ____(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by a conjugate gradient method"""
def solve(self, trust_radius):
"""
Solve the subproblem using a conjugate gradient method.
Parameters
----------
trust_radius : float
We are allowed to wander only this far away from the origin.
Returns
-------
p : ndarray
The proposed step.
hits_boundary : bool
True if the proposed step is on the boundary of the trust region.
Notes
-----
This is algorithm (7.2) of Nocedal and Wright 2nd edition.
Only the function that computes the Hessian-vector product is required.
The Hessian itself is not required, and the Hessian does
not need to be positive semidefinite.
"""
# get the norm of jacobian and define the origin
p_origin = np.zeros_like(self.jac)
# define a default tolerance
tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag
# Stop the method if the search direction
# is a direction of nonpositive curvature.
if self.jac_mag < tolerance:
hits_boundary = False
return p_origin, hits_boundary
# init the state for the first iteration
z = p_origin
r = self.jac
d = -r
# Search for the min of the approximation of the objective function.
while True:
# do an iteration
Bd = self.hessp(d)
dBd = np.dot(d, Bd)
if dBd <= 0:
# Look at the two boundary points.
# Find both values of t to get the boundary points such that
# ||z + t d|| == trust_radius
# and then choose the one with the predicted min value.
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
pa = z + ta * d
pb = z + tb * d
if self(pa) < self(pb):
p_boundary = pa
else:
p_boundary = pb
hits_boundary = True
return p_boundary, hits_boundary
r_squared = np.dot(r, r)
alpha = r_squared / dBd
z_next = z + alpha * d
if scipy.linalg.norm(z_next) >= trust_radius:
# Find t >= 0 to get the boundary point such that
# ||z + t d|| == trust_radius
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
p_boundary = z + tb * d
hits_boundary = True
return p_boundary, hits_boundary
r_next = r + alpha * Bd
r_next_squared = np.dot(r_next, r_next)
if math.sqrt(r_next_squared) < tolerance:
hits_boundary = False
return z_next, hits_boundary
beta_next = r_next_squared / r_squared
d_next = -r_next + beta_next * d
# update the state for the next iteration
z = z_next
r = r_next
d = d_next
| CGSteihaugSubproblem |
python | realpython__materials | python-unittest/test_identity.py | {
"start": 18,
"end": 366
} | class ____(unittest.TestCase):
def test_list_aliases(self):
a = ["Python", "unittest"]
b = a
self.assertIs(a, b)
def test_list_objects(self):
a = ["Python", "unittest"]
b = ["Python", "unittest"]
self.assertIsNot(a, b)
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestListIdentity |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 565,
"end": 1167
} | class ____:
def get_generate_headers(self):
headers = {"Accept": "application/json", "Content-Type": "application/json", **self._session.auth.get_auth_header()}
return headers
def generate_record(
self,
payload: Any,
stream_slice: Optional[Mapping[str, Any]] = None,
):
headers = self.get_generate_headers()
args = {"method": "POST", "url": self.url_base + self.path(stream_slice=stream_slice), "headers": headers, "data": payload}
request = requests.Request(**args).prepare()
self._send_request(request)
| GeneratorMixin |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 17479,
"end": 17592
} | class ____(ApeException):
"""
Raised when a problem occurs from the configuration file.
"""
| ConfigError |
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 1965,
"end": 2666
} | class ____(ClickException):
message = "[bold][red]ERROR[/red][/bold]: {}"
def __init__(self, message=None, **kwargs):
if not message:
message = "Pipenv encountered a problem and had to exit."
extra = kwargs.pop("extra", [])
self.message = self.message.format(message)
self.extra = extra
def show(self, file=None):
if file is None:
file = sys.stderr
console = Console(file=file)
if self.extra:
if isinstance(self.extra, str):
self.extra = [self.extra]
for extra in self.extra:
console.print(extra)
console.print(f"{self.message}")
| PipenvException |
python | PyCQA__pylint | tests/checkers/unittest_format.py | {
"start": 556,
"end": 4819
} | class ____(CheckerTestCase):
CHECKER_CLASS = FormatChecker
def testCheckKeywordParensHandlesValidCases(self) -> None:
cases = [
"if foo:",
"if foo():",
"if (x and y) or z:",
"assert foo()",
"assert ()",
"if (1, 2) in (3, 4):",
"if (a or b) in c:",
"return (x for x in x)",
"if (x for x in x):",
"for x in (x for x in x):",
"not (foo or bar)",
"not (foo or bar) and baz",
"return [x for x in (3 if 1 else [4])]",
"return (x for x in ((3, 4) if 2 > 1 else (5, 6)))",
]
with self.assertNoMessages():
for code in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), 0)
def testCheckKeywordParensHandlesUnnecessaryParens(self) -> None:
cases = [
(MessageTest("superfluous-parens", line=1, args="if"), "if (foo):", 0),
(
MessageTest("superfluous-parens", line=1, args="if"),
"if ((foo, bar)):",
0,
),
(
MessageTest("superfluous-parens", line=1, args="if"),
"if (foo(bar)):",
0,
),
(MessageTest("superfluous-parens", line=1, args="not"), "not (foo)", 0),
(
MessageTest("superfluous-parens", line=1, args="not"),
"if not (foo):",
1,
),
(
MessageTest("superfluous-parens", line=1, args="if"),
"if (not (foo)):",
0,
),
(
MessageTest("superfluous-parens", line=1, args="not"),
"if (not (foo)):",
2,
),
(
MessageTest("superfluous-parens", line=1, args="for"),
"for (x) in (1, 2, 3):",
0,
),
(
MessageTest("superfluous-parens", line=1, args="if"),
"if (1) in (1, 2, 3):",
0,
),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
def testNoSuperfluousParensWalrusOperatorIf(self) -> None:
"""Parenthesis change the meaning of assignment in the walrus operator
and so are not always superfluous:
"""
cases = [
("if (odd := is_odd(i))\n"),
("not (foo := 5)\n"),
]
for code in cases:
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str(code))
def testPositiveSuperfluousParensWalrusOperatorIf(self) -> None:
"""Test positive superfluous parens cases with the walrus operator."""
cases = [
(
MessageTest("superfluous-parens", line=1, args="if"),
"if ((x := y)):\n",
),
(
MessageTest("superfluous-parens", line=1, args="not"),
"if not ((x := y)):\n",
),
]
for msg, code in cases:
with self.assertAddsMessages(msg):
self.checker.process_tokens(_tokenize_str(code))
def testCheckIfArgsAreNotUnicode(self) -> None:
cases = [("if (foo):", 0), ("assert (1 == 1)", 0)]
for code, offset in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
got = self.linter.release_messages()
assert isinstance(got[-1].args, str)
def testFuturePrintStatementWithoutParensWarning(self) -> None:
code = """from __future__ import print_function
print('Hello world!')
"""
tree = astroid.parse(code)
with self.assertNoMessages():
self.checker.process_module(tree)
self.checker.process_tokens(_tokenize_str(code))
def testKeywordParensFalsePositive(self) -> None:
code = "if 'bar' in (DICT or {}):"
with self.assertNoMessages():
self.checker._check_keyword_parentheses(_tokenize_str(code), start=2)
| TestSuperfluousParentheses |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/side_channel/float_properties_channel.py | {
"start": 139,
"end": 2227
} | class ____(SideChannel):
"""
This is the SideChannel for float properties shared with Unity.
You can modify the float properties of an environment with the commands
set_property, get_property and list_properties.
"""
def __init__(self, channel_id: uuid.UUID = None) -> None:
self._float_properties: Dict[str, float] = {}
if channel_id is None:
channel_id = uuid.UUID("60ccf7d0-4f7e-11ea-b238-784f4387d1f7")
super().__init__(channel_id)
def on_message_received(self, msg: IncomingMessage) -> None:
"""
Is called by the environment to the side channel. Can be called
multiple times per step if multiple messages are meant for that
SideChannel.
"""
k = msg.read_string()
v = msg.read_float32()
self._float_properties[k] = v
def set_property(self, key: str, value: float) -> None:
"""
Sets a property in the Unity Environment.
:param key: The string identifier of the property.
:param value: The float value of the property.
"""
self._float_properties[key] = value
msg = OutgoingMessage()
msg.write_string(key)
msg.write_float32(value)
super().queue_message_to_send(msg)
def get_property(self, key: str) -> Optional[float]:
"""
Gets a property in the Unity Environment. If the property was not
found, will return None.
:param key: The string identifier of the property.
:return: The float value of the property or None.
"""
return self._float_properties.get(key)
def list_properties(self) -> List[str]:
"""
Returns a list of all the string identifiers of the properties
currently present in the Unity Environment.
"""
return list(self._float_properties.keys())
def get_property_dict_copy(self) -> Dict[str, float]:
"""
Returns a copy of the float properties.
:return:
"""
return dict(self._float_properties)
| FloatPropertiesChannel |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread_item_list.py | {
"start": 1567,
"end": 2322
} | class ____(BaseModel):
id: str
"""Identifier of the thread item."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
heading: Optional[str] = None
"""Optional heading for the task. Defaults to null when not provided."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
summary: Optional[str] = None
"""Optional summary that describes the task. Defaults to null when omitted."""
task_type: Literal["custom", "thought"]
"""Subtype for the task."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.task"]
"""Type discriminator that is always `chatkit.task`."""
| DataChatKitTask |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_tool_emulator.py | {
"start": 2587,
"end": 3751
} | class ____(BaseChatModel):
"""Fake model for emulating tool responses."""
responses: list[str] = ["Emulated response"]
response_index: int = 0
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: Any = None,
**kwargs: Any,
) -> Any:
from langchain_core.outputs import ChatGeneration, ChatResult
response = self.responses[self.response_index % len(self.responses)]
self.response_index += 1
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=response))])
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: Any = None,
**kwargs: Any,
) -> Any:
from langchain_core.outputs import ChatGeneration, ChatResult
response = self.responses[self.response_index % len(self.responses)]
self.response_index += 1
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=response))])
@property
def _llm_type(self) -> str:
return "fake_emulator"
| FakeEmulatorModel |
python | sympy__sympy | sympy/assumptions/predicates/sets.py | {
"start": 7608,
"end": 8588
} | class ____(Predicate):
"""
Antihermitian predicate.
Explanation
===========
``Q.antihermitian(x)`` is true iff ``x`` belongs to the field of
antihermitian operators, i.e., operators in the form ``x*I``, where
``x`` is Hermitian.
Examples
========
>>> from sympy import Q, ask, Matrix, I
>>> ask(Q.antihermitian(I))
True
>>> ask(Q.antihermitian(2))
False
>>> A = Matrix([[0, -2 - I, 0], [2 - I, 0, -I], [0, -I, 0]])
>>> ask(Q.antihermitian(A))
True
>>> ask(Q.antihermitian(0))
True
References
==========
.. [1] https://mathworld.wolfram.com/HermitianOperator.html
"""
name = 'antihermitian'
handler = Dispatcher(
"AntiHermitianHandler",
doc=("Handler for Q.antihermitian.\n\n"
"Test that an expression belongs to the field of anti-Hermitian\n"
"operators, that is, operators in the form x*I, where x is Hermitian.")
)
| AntihermitianPredicate |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-toggl/llama_index/readers/toggl/dto.py | {
"start": 110,
"end": 189
} | class ____(enum.Enum):
json = "json"
markdown = "markdown"
| TogglOutFormat |
python | joke2k__faker | faker/providers/date_time/zh_CN/__init__.py | {
"start": 46,
"end": 714
} | class ____(DateTimeProvider):
MONTH_NAMES = {
"01": "一月",
"02": "二月",
"03": "三月",
"04": "四月",
"05": "五月",
"06": "六月",
"07": "七月",
"08": "八月",
"09": "九月",
"10": "十月",
"11": "十一月",
"12": "十二月",
}
DAY_NAMES = {
"0": "星期日",
"1": "星期一",
"2": "星期二",
"3": "星期三",
"4": "星期四",
"5": "星期五",
"6": "星期六",
}
def day_of_week(self) -> str:
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | getsentry__sentry | src/sentry/utils/circuit_breaker.py | {
"start": 588,
"end": 2703
} | class ____(TypedDict, total=False):
# The number of consecutive failures within a given window required to trigger the circuit breaker
error_limit: int
# The window of time in which those errors must happen
error_limit_window: int
# Allow a configurable subset of function calls to bypass the circuit breaker, for the purposes
# of determining when the service is healthy again and requests to it can resume.
allow_passthrough: bool
# The number of function calls allowed to bypass the circuit breaker every
# `passthrough_interval` seconds
passthrough_attempts_per_interval: int
# The window of time, in seconds, during which to allow `passthrough_attempts_per_interval`
# calls to bypass the circuit breaker
passthrough_interval: int
CIRCUIT_BREAKER_DEFAULTS = CircuitBreakerConfig(
error_limit=DEFAULT_ERROR_LIMIT,
error_limit_window=3600, # 1 hour
allow_passthrough=False,
passthrough_interval=15, # 15 sec
passthrough_attempts_per_interval=1,
)
def circuit_breaker_activated(
key: str,
error_limit: int = DEFAULT_ERROR_LIMIT,
passthrough_data: CircuitBreakerPassthrough | None = None,
) -> bool:
"""
Activates the circuit breaker if the error count for a cache key exceeds the error limit.
The circuit breaker can allow a certain number of requests to pass through per minute, defined by
the passthrough limit if provided.
"""
failure_count = cache.get_or_set(ERROR_COUNT_CACHE_KEY(key), default=0, timeout=60 * 60) or 0
if failure_count < error_limit:
return False # not blocked
# Limit has been exceeded, check if we should allow any requests to pass through
if passthrough_data:
if not ratelimiter.backend.is_limited(
PASSTHROUGH_RATELIMIT_KEY(key),
limit=passthrough_data["limit"],
window=passthrough_data["window"],
):
metrics.incr(f"circuit_breaker.{key}.bypassed")
return False # not blocked
metrics.incr(f"circuit_breaker.{key}.throttled")
return True # blocked
| CircuitBreakerConfig |
python | ethereum__web3.py | web3/geth.py | {
"start": 3432,
"end": 4215
} | class ____(Module):
"""
https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-txpool
"""
is_async = True
_content: Method[Callable[[], Awaitable[TxPoolContent]]] = Method(
RPC.txpool_content,
is_property=True,
)
async def content(self) -> TxPoolContent:
return await self._content()
_inspect: Method[Callable[[], Awaitable[TxPoolInspect]]] = Method(
RPC.txpool_inspect,
is_property=True,
)
async def inspect(self) -> TxPoolInspect:
return await self._inspect()
_status: Method[Callable[[], Awaitable[TxPoolStatus]]] = Method(
RPC.txpool_status,
is_property=True,
)
async def status(self) -> TxPoolStatus:
return await self._status()
| AsyncGethTxPool |
python | great-expectations__great_expectations | tests/datasource/fluent/test_snowflake_datasource.py | {
"start": 44408,
"end": 45150
} | class ____:
"""Test deprecation warnings for SnowflakeDatasource."""
def test_private_key_in_kwargs_connect_args_deprecated_warning(self):
"""Warn when private_key is in kwargs['connect_args']."""
with pytest.warns(DeprecationWarning, match="private_key.*deprecated"):
SnowflakeDatasource(
name="test_ds",
user="my_user",
password="<PLACEHOLDER PASSWORD>",
account="my_account",
schema="S_PUBLIC",
database="D_PUBLIC",
role="my_role",
warehouse="my_wh",
kwargs={"connect_args": {"private_key": b"test_key"}},
)
| TestSnowflakeDatasourceDeprecationWarnings |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes8.py | {
"start": 397,
"end": 1226
} | class ____(
Iterator[DirEntry[AnyStr]], ContextManager["_ScandirIterator[AnyStr]"]
):
def __iter__(self) -> Self: ...
def __next__(self) -> DirEntry[AnyStr]: ...
def close(self) -> None: ...
def __enter__(self) -> Self: ...
def __exit__(
self,
__exc_type: type[BaseException] | None,
__exc_value: BaseException | None,
__traceback: TracebackType | None,
) -> bool | None: ...
def scandir(path: AnyStr) -> _ScandirIterator[AnyStr]: ...
def thing(value: AnyStr):
with scandir(value) as it:
for file in it:
if isinstance(file.name, str):
if file.name.endswith(".xml"):
...
elif isinstance(file.name, bytes):
if file.name.endswith(b".xml"):
...
| _ScandirIterator |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 33621,
"end": 35507
} | class ____:
def __init__(self,string=None):
if string:
self.text = list(string)
else:
self.text = []
def setLength(self,sz):
if not sz :
self.text = []
return
assert sz>0
if sz >= self.length():
return
### just reset to empty buffer
self.text = self.text[0:sz]
def length(self):
return len(self.text)
def append(self,c):
self.text.append(c)
### return buffer as string. Arg 'a' is used as index
## into the buffer and 2nd argument shall be the length.
## If 2nd args is absent, we return chars till end of
## buffer starting with 'a'.
def getString(self,a=None,length=None):
if not a :
a = 0
assert a>=0
if a>= len(self.text) :
return ""
if not length:
## no second argument
L = self.text[a:]
else:
assert (a+length) <= len(self.text)
b = a + length
L = self.text[a:b]
s = ""
for x in L : s += x
return s
toString = getString ## alias
def __str__(self):
return str(self.text)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Reader ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
## When reading Japanese chars, it happens that a stream returns a
## 'char' of length 2. This looks like a bug in the appropriate
## codecs - but I'm rather unsure about this. Anyway, if this is
## the case, I'm going to split this string into a list of chars
## and put them on hold, ie. on a buffer. Next time when called
## we read from buffer until buffer is empty.
## wh: nov, 25th -> problem does not appear in Python 2.4.0.c1.
| StringBuffer |
python | tensorflow__tensorflow | tensorflow/python/eager/wrap_function_test.py | {
"start": 13853,
"end": 20932
} | class ____(test.TestCase):
def testAddFunction(self):
def fn(x):
v = variables.Variable(3, name='v')
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v + v2 + x
with self.cached_session() as sess:
result = fn(constant_op.constant(5))
sess.run(variables.global_variables_initializer())
expected = sess.run(result)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
wrapped_fn = g.wrap_function(fn, signature)
self.assertEqual(expected, wrapped_fn(constant_op.constant(5)).numpy())
def testCollections(self):
def fn(x):
v = variable_v1.VariableV1(
3, name='v', trainable=False, collections=['a'])
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32,
collections=['a', 'b'])
return v + v2 + x
def assert_collections(graph):
self.assertLen(graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), 1)
self.assertLen(graph.get_collection('a'), 2)
self.assertLen(graph.get_collection('b'), 1)
g = wrap_function.WrappedGraph()
g.wrap_function(fn, [tensor_spec.TensorSpec([], dtypes.int32)])
assert_collections(g.graph)
def assert_fn():
assert_collections(ops.get_default_graph())
return 1 # Return is required
# Assert that collections are accessible within a wrapped function.
g.wrap_function(assert_fn, [])
def testShareVariablesSameGraph(self):
def add_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(3), shape=[], dtype=dtypes.int32)
return v + x
def subtract_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v - x
def different_variable_fn_v1(x):
with variable_scope.variable_scope(
'no_reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(5), shape=[], dtype=dtypes.int32)
return v * x
def increment_variable_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(6), shape=[], dtype=dtypes.int32)
return v.assign_add(x)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
add = g.wrap_function(add_v1, signature)
subtract = g.wrap_function(subtract_v1, signature)
different_variable_fn = g.wrap_function(different_variable_fn_v1, signature)
increment_variable = g.wrap_function(increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# The shared variable has a starting value of 3 because add_v1 was wrapped
# first.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'reuse/v', 'no_reuse/v'}, set(g.variables.keys()))
def testShareVariablesDifferentGraphs(self):
def add_v1(x):
v = variables.Variable(3, name='v')
return v + x
def subtract_v1(x):
v = variables.Variable(4, name='v')
return v - x
def different_variable_fn_v1(x):
with ops.name_scope('different_scope'):
v = variables.Variable(5, name='v')
return v * x
def increment_variable_v1(x):
v = variables.Variable(6, name='v')
return v.assign_add(x)
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
vh = wrap_function.VariableHolder(share_variables=True)
new_graph = lambda: wrap_function.WrappedGraph(variable_holder=vh)
add = new_graph().wrap_function(add_v1, signature)
subtract = new_graph().wrap_function(subtract_v1, signature)
different_variable_fn = new_graph().wrap_function(
different_variable_fn_v1, signature)
increment_variable = new_graph().wrap_function(
increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# Because the variable in add_v1 was created first, its starting value is 3
# instead of the values defined in subtract_v1 or increment_variable_v1.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'v', 'different_scope/v'}, set(vh.variables.keys()))
@test_util.run_in_graph_and_eager_modes
def testImportedFunctionsRegistered(self):
if test_util.is_gpu_available():
self.skipTest('not a GPU test')
with ops.Graph().as_default() as graph:
x = array_ops.placeholder(dtypes.variant, shape=[], name='foo')
ds = dataset_ops.from_variant(x, structure=(
tensor_spec.TensorSpec([], dtypes.int32)))
y = ds.reduce(array_ops.zeros([], dtype=dtypes.int32), lambda p, q: p + q)
graph_def = graph.as_graph_def()
def fn_to_wrap(a):
returned_elements = graph_def_importer.import_graph_def(
graph_def, input_map={x.name: a}, return_elements=[y.name])
return returned_elements[0]
wrapped_fn = wrap_function.wrap_function(
fn_to_wrap, [tensor_spec.TensorSpec((), dtypes.variant)])
ds = dataset_ops.Dataset.from_tensor_slices([10, 20])
v = dataset_ops.to_variant(ds)
self.evaluate(wrapped_fn(v))
def testReturnOp(self):
def update_var_v1(x):
v = variables.Variable(3, name='v')
update_op = state_ops.assign(v, x).op
return update_op
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
update_var = g.wrap_function(update_var_v1, signature)
self.assertEqual(g.variables['v'].numpy(), 3)
update_var(constant_op.constant(12))
self.assertEqual(g.variables['v'].numpy(), 12)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| WrappedGraphTest |
python | wandb__wandb | tests/system_tests/test_core/test_offline_sync_beta.py | {
"start": 583,
"end": 11600
} | class ____:
"""A fake ServiceConnection for async testing."""
def __init__(self, mailbox: Mailbox) -> None:
self._mailbox = mailbox
self._cond = asyncio.Condition()
self._init_sync_addrs: list[str] = []
self._sync_addrs: list[str] = []
self._sync_status_addrs: list[str] = []
async def receive_init_sync(self) -> None:
"""Wait until an init_sync request."""
await self._wait_for(self._init_sync_addrs)
async def receive_sync(self) -> None:
"""Wait until a sync request."""
await self._wait_for(self._sync_addrs)
async def receive_sync_status(self) -> None:
"""Wait until a sync_status request."""
await self._wait_for(self._sync_status_addrs)
async def _wait_for(self, addrs: list[str]) -> None:
async with self._cond:
await asyncio.wait_for(
self._cond.wait_for(lambda: bool(addrs)),
timeout=5,
)
async def respond_init_sync(self, id: str) -> None:
"""Respond to an init_sync request."""
resp = wandb_sync_pb2.ServerInitSyncResponse(id=id)
await self._respond(self._init_sync_addrs, "init_sync_response", resp)
async def respond_sync(
self,
infos: list[str],
errors: list[str],
) -> None:
"""Respond to a sync request."""
resp = wandb_sync_pb2.ServerSyncResponse(
messages=self._to_messages(infos=infos, errors=errors),
)
await self._respond(self._sync_addrs, "sync_response", resp)
async def respond_sync_status(
self,
new_infos: list[str],
new_errors: list[str],
) -> None:
"""Respond to a sync_status request."""
resp = wandb_sync_pb2.ServerSyncStatusResponse(
new_messages=self._to_messages(infos=new_infos, errors=new_errors),
)
await self._respond(self._sync_status_addrs, "sync_status_response", resp)
def _to_messages(
self,
infos: list[str],
errors: list[str],
) -> list[wandb_sync_pb2.ServerSyncMessage]:
messages: list[wandb_sync_pb2.ServerSyncMessage] = []
for info in infos:
messages.append(
wandb_sync_pb2.ServerSyncMessage(
severity=wandb_sync_pb2.ServerSyncMessage.SEVERITY_INFO,
content=info,
)
)
for error in errors:
messages.append(
wandb_sync_pb2.ServerSyncMessage(
severity=wandb_sync_pb2.ServerSyncMessage.SEVERITY_ERROR,
content=error,
)
)
return messages
async def _respond(self, addrs: list[str], field: str, resp: Any) -> None:
async with self._cond:
await asyncio.wait_for(
self._cond.wait_for(lambda: bool(addrs)),
timeout=5,
)
addr = addrs.pop(0)
server_response = spb.ServerResponse()
server_response.request_id = addr
getattr(server_response, field).CopyFrom(resp)
await self._mailbox.deliver(server_response)
async def init_sync(
self,
paths: set[pathlib.Path],
settings: wandb.Settings,
) -> MailboxHandle[wandb_sync_pb2.ServerInitSyncResponse]:
return await self._make_handle(
self._init_sync_addrs,
lambda r: r.init_sync_response,
)
async def sync(
self,
id: str,
parallelism: int,
) -> MailboxHandle[wandb_sync_pb2.ServerSyncResponse]:
return await self._make_handle(
self._sync_addrs,
lambda r: r.sync_response,
)
async def sync_status(
self,
id: str,
) -> MailboxHandle[wandb_sync_pb2.ServerSyncStatusResponse]:
return await self._make_handle(
self._sync_status_addrs,
lambda r: r.sync_status_response,
)
async def _make_handle(
self,
addrs: list[str],
to_response: Callable[[spb.ServerResponse], _T],
) -> MailboxHandle[_T]:
req = spb.ServerRequest()
handle = self._mailbox.require_response(req)
async with self._cond:
addrs.append(req.request_id)
self._cond.notify_all()
return handle.map(to_response)
@pytest.fixture
def skip_asyncio_sleep(monkeypatch):
async def do_nothing(duration):
pass
monkeypatch.setattr(beta_sync, "_SLEEP", do_nothing)
def test_syncs_run(tmp_path, wandb_backend_spy, runner: CliRunner):
test_file = tmp_path / "test_file.txt"
test_file.touch()
with wandb.init(mode="offline") as run:
run.log({"test_sync": 321})
run.save(test_file, base_path=test_file.parent)
run.summary["test_sync_summary"] = "test summary"
result = runner.invoke(cli.beta, f"sync {run.settings.sync_dir}")
lines = result.output.splitlines()
assert lines[0] == "Syncing 1 file(s):"
assert lines[1].endswith(f"run-{run.id}.wandb")
# More lines possible depending on status updates. Not deterministic.
assert lines[-1] == f"wandb: [{run.path}] Finished syncing {run.settings.sync_file}"
with wandb_backend_spy.freeze() as snapshot:
history = snapshot.history(run_id=run.id)
assert len(history) == 1
assert history[0]["test_sync"] == 321
summary = snapshot.summary(run_id=run.id)
assert summary["test_sync_summary"] == "test summary"
files = snapshot.uploaded_files(run_id=run.id)
assert "test_file.txt" in files
@pytest.mark.parametrize("skip_synced", (True, False))
def test_skip_synced(tmp_path, runner: CliRunner, skip_synced):
(tmp_path / "run-1.wandb").touch()
(tmp_path / "run-2.wandb").touch()
(tmp_path / "run-2.wandb.synced").touch()
(tmp_path / "run-3.wandb").touch()
skip = "--skip-synced" if skip_synced else "--no-skip-synced"
result = runner.invoke(cli.beta, f"sync --dry-run {skip} {tmp_path}")
assert "run-1.wandb" in result.output
assert "run-3.wandb" in result.output
if skip_synced:
assert "run-2.wandb" not in result.output
else:
assert "run-2.wandb" in result.output
def test_merges_symlinks(
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
runner: CliRunner,
):
(tmp_path / "actual-run").mkdir()
(tmp_path / "actual-run/run.wandb").touch()
(tmp_path / "latest-run").symlink_to(tmp_path / "actual-run")
monkeypatch.chdir(tmp_path)
result = runner.invoke(cli.beta, "sync --dry-run .")
assert result.output.splitlines() == [
"Would sync 1 file(s):",
" actual-run/run.wandb",
]
def test_sync_wandb_file(tmp_path, runner: CliRunner):
file = tmp_path / "run.wandb"
file.touch()
result = runner.invoke(cli.beta, f"sync --dry-run {file}")
lines = result.output.splitlines()
assert lines[0] == "Would sync 1 file(s):"
assert lines[1].endswith("run.wandb")
def test_sync_run_directory(tmp_path, runner: CliRunner):
run_dir = tmp_path / "some-run"
run_dir.mkdir()
(run_dir / "run.wandb").touch()
result = runner.invoke(cli.beta, f"sync --dry-run {run_dir}")
lines = result.output.splitlines()
assert lines[0] == "Would sync 1 file(s):"
assert lines[1].endswith("run.wandb")
def test_sync_wandb_directory(tmp_path, runner: CliRunner):
wandb_dir = tmp_path / "wandb-dir"
run1_dir = wandb_dir / "run-1"
run2_dir = wandb_dir / "run-2"
wandb_dir.mkdir()
run1_dir.mkdir()
run2_dir.mkdir()
(run1_dir / "run-1.wandb").touch()
(run2_dir / "run-2.wandb").touch()
result = runner.invoke(cli.beta, f"sync --dry-run {wandb_dir}")
lines = result.output.splitlines()
assert lines[0] == "Would sync 2 file(s):"
assert lines[1].endswith("run-1.wandb")
assert lines[2].endswith("run-2.wandb")
def test_truncates_printed_paths(
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
runner: CliRunner,
):
monkeypatch.setattr(beta_sync, "_MAX_LIST_LINES", 5)
files = list((tmp_path / f"run-{i}.wandb") for i in range(20))
for file in files:
file.touch()
result = runner.invoke(cli.beta, f"sync --dry-run {tmp_path}")
lines = result.output.splitlines()
assert lines[0] == "Would sync 20 file(s):"
for line in lines[1:6]:
assert re.fullmatch(r" .+/run-\d+\.wandb", line)
assert lines[6] == " +15 more (pass --verbose to see all)"
def test_prints_relative_paths(
tmp_path: pathlib.Path,
monkeypatch: pytest.MonkeyPatch,
runner: CliRunner,
):
dir1_cwd = tmp_path / "cwd"
dir2_not = tmp_path / "not"
dir1_cwd.mkdir()
dir2_not.mkdir()
monkeypatch.chdir(dir1_cwd)
(dir1_cwd / "run-relative.wandb").touch()
(dir2_not / "run-absolute.wandb").touch()
result = runner.invoke(cli.beta, f"sync --dry-run {tmp_path}")
assert result.output.splitlines() == [
"Would sync 2 file(s):",
*sorted(
[
" run-relative.wandb",
f" {dir2_not / 'run-absolute.wandb'}",
]
),
]
def test_prints_status_updates(skip_asyncio_sleep, tmp_path, emulated_terminal):
_ = skip_asyncio_sleep
wandb_file = tmp_path / "run-test-progress.wandb"
singleton = wandb_setup.singleton()
mailbox = Mailbox(singleton.asyncer)
async def simulate_service(tester: _Tester):
await tester.respond_init_sync(id="sync-test")
await tester.respond_sync_status(
new_infos=["Msg 1.", "Msg 2."],
new_errors=["Err 1.", "Err 2."],
)
await tester.receive_sync_status()
assert emulated_terminal.read_stderr() == [
"wandb: Msg 1.",
"wandb: Msg 2.",
"wandb: ERROR Err 1.",
"wandb: ERROR Err 2.",
"wandb: ⢿ Syncing...",
]
await tester.respond_sync(
infos=["Final message."],
errors=["Final error."],
)
await tester.respond_sync_status(new_infos=[], new_errors=[])
async def do_test():
tester = _Tester(mailbox=mailbox)
async with asyncio_compat.open_task_group(exit_timeout=5) as group:
group.start_soon(simulate_service(tester))
group.start_soon(
beta_sync._do_sync(
set([wandb_file]),
service=tester, # type: ignore (we only mock used methods)
settings=wandb.Settings(),
printer=new_printer(),
parallelism=1,
)
)
assert emulated_terminal.read_stderr() == [
"wandb: Msg 1.",
"wandb: Msg 2.",
"wandb: ERROR Err 1.",
"wandb: ERROR Err 2.",
"wandb: Final message.",
"wandb: ERROR Final error.",
]
singleton.asyncer.run(do_test)
| _Tester |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/external_execution/__init__.py | {
"start": 754,
"end": 2291
} | class ____(Config):
multiplier: int = Field(default=1)
@asset
def number_x(
context: AssetExecutionContext,
pipes_subprocess_client: PipesSubprocessClient,
config: NumberConfig,
) -> None:
extras = {**get_common_extras(context), "multiplier": config.multiplier}
pipes_subprocess_client.run(
command=command_for_asset("number_x"), context=context, extras=extras
)
@asset
def number_y(
context: AssetExecutionContext,
pipes_subprocess_client: PipesSubprocessClient,
config: NumberConfig,
):
pipes_subprocess_client.run(
command=command_for_asset("number_y"),
context=context,
extras=get_common_extras(context),
env={"NUMBER_Y": "4"},
)
@asset(deps=[number_x, number_y])
def number_sum(
context: AssetExecutionContext, pipes_subprocess_client: PipesSubprocessClient
) -> None:
pipes_subprocess_client.run(
command=command_for_asset("number_sum"), context=context, extras=get_common_extras(context)
)
pipes_subprocess_client = PipesSubprocessClient(
env=get_env(),
)
defs = Definitions(
assets=[number_x, number_y, number_sum],
resources={"pipes_subprocess_client": pipes_subprocess_client},
)
if __name__ == "__main__":
from dagster import instance_for_test, materialize
with instance_for_test() as instance:
materialize(
[number_x, number_y, number_sum],
instance=instance,
resources={"pipes_subprocess_client": pipes_subprocess_client},
)
| NumberConfig |
python | pytorch__pytorch | test/distributed/test_inductor_collectives.py | {
"start": 2053,
"end": 33418
} | class ____(DynamoDistributedMultiProcTestCase):
"""
Run correctness checks in multi-proc runner, mark with minimum # GPUs to run under
"""
device = acc.type if (acc := torch.accelerator.current_accelerator()) else "cpu"
def get_world_trs(self):
return {
"tag": "",
"ranks": list(range(self.world_size)),
"group_size": self.world_size,
}
@property
def world_size(self) -> int:
# hack: no matter whether we have 2 or 3 or 4 gpus, just run on 2
# works around issue with skipif<2 and workers with unpredictable #s gpu
return 2
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_broadcast_inductor(self):
"""
Testing if broadcast works correctly when using inductor
"""
def example(tensor, src, *, tag, ranks, group_size):
res = torch.ops.c10d_functional.broadcast(
tensor, src, tag, ranks, group_size
)
res = torch.ops.c10d_functional.wait_tensor(res)
return res
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
example = functools.partial(
example,
**self.get_world_trs(),
)
t = torch.randn(4, 4, device=self.device)
inputs = (
t if self.rank == 0 else torch.zeros(4, 4, device=self.device),
0,
)
eager_out = example(*inputs)
self.assertTrue(same(t, eager_out))
compiled_func = compile(example, inputs)
compiled_out = compiled_func(*inputs)
self.assertTrue(same(eager_out, compiled_out))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_allreduce_inductor(self):
"""
This is matmul/cat/allreduce is a pattern we aim to optimize.
"""
def matmul_cat_col(a, b, c, d, e, f, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
z = torch.cat((x, y))
ar = torch.ops.c10d_functional.all_reduce(z, "sum", tag, ranks, group_size)
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
matmul_cat_col = functools.partial(
matmul_cat_col,
**self.get_world_trs(),
)
inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 6
eager_out = matmul_cat_col(*inputs)
compiled_matmul_cat_col = compile(matmul_cat_col, inputs)
inductor_out = compiled_matmul_cat_col(*inputs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_allreduce_inductor_cudagraph_trees(self):
"""
Tests whether cudagraph trees support all_reduce from nccl
"""
import torch.distributed as dist
# dist.all_reduce is an inplace op in eager mode but a functionanlized op in compiled mode.
# so we define eager_func and func separately for the same semantic.
def eager_func(x):
y = x * x
dist.all_reduce(y, op=dist.ReduceOp.SUM)
x = torch.nn.functional.silu(x)
return x * y
def func(x):
y = x * x
y = dist.all_reduce(y, op=dist.ReduceOp.SUM)
x = torch.nn.functional.silu(x)
return x * y
options = {
"triton.cudagraphs": True,
"triton.cudagraph_trees": True,
}
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
compiled_func = torch.compile(
func, backend="inductor", fullgraph=True, options=options, dynamic=None
)
for nelem in [1024, 2048, 4096]:
# CI (Tesla T4) does not support bfloat16 compilation natively,
# using float
x = torch.randn(nelem, device=self.device, dtype=torch.float)
golden_out = eager_func(x)
for _ in range(3):
compiled_out = compiled_func(x)
self.assertEqual(golden_out, compiled_out)
def test_c10d_functional_tagged_pt2_compliant(self):
op = torch.ops._c10d_functional.all_reduce.default
self.assertIn(torch.Tag.pt2_compliant_tag, op.tags)
op = torch.ops.c10d_functional.all_reduce.default
self.assertIn(torch.Tag.pt2_compliant_tag, op.tags)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_eager_allreduce_inductor_wait(self):
def eager_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
z = torch.cat((x, y))
ar = torch.ops.c10d_functional.all_reduce(z, "sum", tag, ranks, group_size)
return ar
def inductor_func(ar, e, f):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
eager_func = functools.partial(
eager_func,
**self.get_world_trs(),
)
eager_inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 4
inductor_inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 2
eager_out = inductor_func(eager_func(*eager_inputs), *inductor_inputs)
compiled_inductor_func = compile(
inductor_func, [eager_func(*eager_inputs)] + list(inductor_inputs)
)
inductor_out = compiled_inductor_func(
eager_func(*eager_inputs), *inductor_inputs
)
print(f"eager_out, {eager_out}")
print(f"inductor_out, {inductor_out}")
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_inductor_allreduce_eager_wait(self):
def inductor_func(a, b, c, d, *, tag, ranks, group_size):
x = torch.matmul(a, b)
y = torch.matmul(c, d)
z = torch.cat((x, y))
ar = torch.ops.c10d_functional.all_reduce(z, "sum", tag, ranks, group_size)
return ar
def eager_func(ar, e, f):
g = torch.matmul(e, f)
ar = torch.ops.c10d_functional.wait_tensor(ar)
out = torch.add(ar, g.repeat(2, 1))
return (out,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inductor_func = functools.partial(
inductor_func,
**self.get_world_trs(),
)
inductor_inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 4
eager_inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 2
eager_out = eager_func(inductor_func(*inductor_inputs), *eager_inputs)
compiled_inductor_func = compile(inductor_func, inductor_inputs)
inductor_out = eager_func(
compiled_inductor_func(*inductor_inputs), *eager_inputs
)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@xfailIf(TEST_XPU) # https://github.com/intel/torch-xpu-ops/issues/1728
@skipIfRocm
@xfailIf(TEST_XPU) # https://github.com/intel/torch-xpu-ops/issues/1728
def test_eager_async_allreduce_inductor_wait(self):
import torch.distributed as dist
from torch._inductor.utils import run_and_get_code
def all_reduce_non_functional_eager(x):
y = x * x
work = dist.all_reduce(y, op=dist.ReduceOp.SUM, async_op=True)
assert isinstance(work, torch.distributed.Work)
return work, y
def all_reduce_wait(work, y): # potentially compiled
if torch.compiler.is_dynamo_compiling():
torch.ops.c10d_functional.wait_tensor(y)
else:
work.wait(datetime.timedelta(seconds=10))
# Under compile, if `wait_tensor(y)` above is correctly executed,
# `y`'s data is in its final form and the output of this function will match eager;
# otherwise, `y * y` will run in parallel with `all_reduce(y)` and the output of this function
# will not match eager.
return y * y
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
x = torch.ones(12800, 12800, device=self.device) + self.rank
self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0)
# NOTE: We run for 10 iterations each, to ensure that the GPU execution is way behind CPU
# and that `y * y` on CPU side will be issued before `all_reduce(y)` on GPU side is done,
# thus guaranteeing that in the bad case `y * y` on GPU side will run in parallel with `all_reduce(y)`
# thus will produce the wrong result that fails the unit test.
def _run_loop_collective_wait(x, wait_fn, expected_registry_size):
for _ in range(10):
self.assertEqual(
torch._C._distributed_c10d._get_work_registry_size(), 0
)
work, y = all_reduce_non_functional_eager(x)
self.assertEqual(
torch._C._distributed_c10d._get_work_registry_size(),
expected_registry_size,
)
out = wait_fn(work, y)
self.assertEqual(
torch._C._distributed_c10d._get_work_registry_size(), 0
)
return work, y, out
# Test: Pure-eager
all_reduce_wait_eager = all_reduce_wait
work, y, out_ref = _run_loop_collective_wait(
x,
wait_fn=all_reduce_wait_eager,
expected_registry_size=0,
)
all_reduce_wait_compiled = torch.compile(
all_reduce_wait,
backend="inductor",
fullgraph=True,
)
# Test: Issue comm in eager -> wait for comm in compile. Use the context manager.
with _functional_collectives.allow_inflight_collective_as_graph_input_ctx():
work, y, out_compiled = _run_loop_collective_wait(
x, wait_fn=all_reduce_wait_compiled, expected_registry_size=1
)
self.assertEqual(out_ref, out_compiled)
# Check that `wait_tensor()` is in the Inductor generated code
_, triton_codes = run_and_get_code(all_reduce_wait_compiled, work, y)
FileCheck().check("torch.ops._c10d_functional.wait_tensor.default(").run(
triton_codes[0]
)
# Failure Case: Issue comm in eager -> wait for comm in compile. Doesn't use the context manager.
_, _, out_compiled = _run_loop_collective_wait(
x, wait_fn=all_reduce_wait_compiled, expected_registry_size=0
)
# In this case `.wait_tensor(y)` in compiled region will not be able to find the corresponding work object
# to invoke the wait, thus the result will not match eager.
self.assertNotEqual(out_ref, out_compiled)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@patch.object(torch._inductor.config, "allow_buffer_reuse", True)
def test_allreduce_input_buffer_reuse(self):
def func(a, *, tag, ranks, group_size):
ar = _functional_collectives.all_reduce(a, "sum", ranks, tag)
c = torch.relu(a)
d = torch.matmul(c, c)
e = d + ar
return (e,)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inputs = torch.ones(4, 4, device=self.device) + self.rank
compiled = torch.compile(func)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_permute_tensor(self):
def func(tensor, src_dst_pairs, *, tag, ranks, group_size):
return _functional_collectives.permute_tensor(
tensor, src_dst_pairs, ranks, tag
)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inputs = (
# rank0: [0., 1.], rank1: [2., 3.]
torch.arange(2, dtype=torch.float32, device=self.device)
+ 2 * self.rank,
[1, 0],
)
compiled = torch.compile(func)
out = compiled(*inputs, **self.get_world_trs())
correct = func(*inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
# rank0: [2., 3.], rank1: [0., 1.]
expected = torch.arange(2, dtype=torch.float32, device=self.device) + 2 * (
(self.rank - 1 + self.world_size) % self.world_size
)
self.assertEqual(out, expected)
self.assertEqual(correct, expected)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@patch.object(torch._inductor.config, "allow_buffer_reuse", True)
def test_allgather_output_buffer_reuse(self):
class Model(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.emb = torch.nn.Embedding(4, 4)
def forward(self, x, world_size, tag, ranks, group_size):
y = self.emb(x)
last_dim = y.dim() - 1
res = _functional_collectives.all_gather_tensor(y, 0, ranks, tag)
out = torch.cat(torch.chunk(res, world_size, dim=0), dim=last_dim)
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
model = Model().to(self.device)
model_compiled = torch.compile(model)
inp = torch.tensor([[2, 1, 3, 0]], dtype=torch.long, device=self.device)
out = model_compiled(inp, self.world_size, **self.get_world_trs())
correct = model(inp, self.world_size, **self.get_world_trs())
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_allgather_scalar_tensor_input(self):
def func(tensor, world_size):
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
torch.distributed.all_gather(tensor_list, tensor)
return tensor_list
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
func_compiled = torch.compile(func)
inp = torch.tensor(self.rank, dtype=torch.long, device=self.device)
out = func_compiled(inp, self.world_size)
correct = func(inp, self.world_size)
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_allgather_contiguous_input(self):
class Model(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.emb = torch.nn.Embedding(4, 4)
def forward(self, x, world_size, tag, ranks, group_size):
y = self.emb(x)
last_dim = y.dim() - 1
y = y.transpose_(0, last_dim).contiguous()
_functional_collectives.all_gather_tensor(y, 0, ranks, tag)
out = y.transpose_(0, last_dim).contiguous()
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
model = Model().to(self.device)
model_compiled = torch.compile(model)
inp = torch.tensor([[2, 1, 3, 0]], dtype=torch.long, device=self.device)
out = model_compiled(inp, self.world_size, **self.get_world_trs())
correct = model(inp, self.world_size, **self.get_world_trs())
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_allgather_into_tensor_inductor(self):
"""
This is matmul/cat/allreduce is a pattern we aim to optimize.
"""
def example(a, b, *, tag, ranks, group_size):
c = torch.matmul(a, b)
ag = torch.ops.c10d_functional.all_gather_into_tensor(
c, tag, ranks, group_size
)
ag = torch.ops.c10d_functional.wait_tensor(ag)
return (ag,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
example = functools.partial(
example,
**self.get_world_trs(),
)
inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 2
eager_out = example(*inputs)
compiled_matmul_cat_col = compile(example, inputs)
inductor_out = compiled_matmul_cat_col(*inputs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_reduce_scatter_tensor_inductor(self):
def example(a, b, *, tag, ranks, group_size):
c = torch.matmul(a, b)
ag = torch.ops.c10d_functional.reduce_scatter_tensor(
c, "sum", tag, ranks, group_size
)
ag = torch.ops.c10d_functional.wait_tensor(ag)
return (ag,)
def compile(func, example_inputs):
graph = make_fx(func)(*example_inputs)
return inductor_compile_fx(graph, example_inputs)
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
example = functools.partial(
example,
**self.get_world_trs(),
)
inputs = (torch.ones(4, 4, device=self.device) + self.rank,) * 2
eager_out = example(*inputs)
compiled_fn = compile(example, inputs)
inductor_out = compiled_fn(*inputs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
def test_all_to_all_single_inductor(self):
def example(
inp,
input_split_sizes_tensor,
output_split_sizes_tensor,
*,
tag,
ranks,
group_size,
):
input_split_sizes = input_split_sizes_tensor.tolist()
output_split_sizes = output_split_sizes_tensor.tolist()
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
output_split_sizes,
input_split_sizes,
tag,
ranks,
group_size,
)
a2a = torch.ops.c10d_functional.wait_tensor(a2a)
out = a2a / a2a.sum(dim=0)
return out
with (
_dynamo_dist_per_rank_init(self.rank, self.world_size),
torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
),
):
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
input_split_sizes_tensor = torch.tensor(
[(i + 1) * (self.rank + 1) for i in range(self.world_size)],
dtype=torch.int64,
)
output_split_sizes_tensor = torch.tensor(
[(i + 1) * (self.rank + 1) for i in range(self.world_size)],
dtype=torch.int64,
)
inputs = (
torch.ones(int(row), 5, device=self.device) * (self.rank + 1),
input_split_sizes_tensor,
output_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
(
FileCheck()
.check_regex(
"torch.ops._c10d_functional.all_to_all_single.default\\("
"arg\\d+_\\d+, "
"\\[u\\d+, u\\d+\\], "
"\\[u\\d+, u\\d+\\]"
)
.run(code)
)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
# The goal of this test is that when `unsafe_allow_recompute_of_collectives=False`,
# The partitioner will *never* recompute collectives in the backward, even
# if the activation_memory_budget partitioner is being used,
# unless there is a manual user checkpoint() region (which we know makes it safe
# to recompute the collective, since we assume that the user applied the AC
# region consistently across all ranks)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
@patch.object(torch._functorch.config, "activation_memory_budget", 0.01)
@parametrize("override_with_ac", [False, True])
def test_all_to_all_recompute_is_always_banned(self, override_with_ac):
@torch.library.custom_op("custom_ns::foo", mutates_args=())
def foo(x: torch.Tensor) -> torch.Tensor:
return x + 1
@foo.register_fake
def _(x):
return torch.empty_like(x)
def setup_context(ctx, inputs, output):
ctx.save_for_backward(inputs[0])
return
def backward(ctx, grad):
(x,) = ctx.saved_tensors
return grad * x
foo.register_autograd(backward, setup_context=setup_context)
class AllToAllSingle(torch.autograd.Function):
@staticmethod
def forward(
ctx,
input: torch.Tensor,
output_split_sizes,
input_split_sizes,
tag,
ranks,
group_size: int,
) -> torch.Tensor:
ctx.output_split_sizes = input_split_sizes
ctx.input_split_sizes = output_split_sizes
ctx.group_size = group_size
a2a = torch.ops._c10d_functional.all_to_all_single.default(
input,
output_split_sizes,
input_split_sizes,
"0",
)
a2a = torch.ops.c10d_functional.wait_tensor(a2a)
return a2a
@staticmethod
def backward(ctx, grad):
grad = torch.ops._c10d_functional.all_to_all_single.default(
grad,
ctx.output_split_sizes,
ctx.input_split_sizes,
"0",
)
return (
torch.ops.c10d_functional.wait_tensor(grad),
None,
None,
None,
None,
None,
)
def alltoall_autograd(
inp,
output_split_sizes,
input_split_sizes,
tag,
ranks,
group_size,
):
out = AllToAllSingle.apply(
inp, output_split_sizes, input_split_sizes, tag, ranks, group_size
)
return out
# simple mode to track how many collective ops we saw in the backward
class TrackingMode(TorchDispatchMode):
def __init__(self):
super().__init__()
self.ops_counter = Counter()
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
rs = func(*args, **kwargs)
self.ops_counter[func] += 1
return rs
def example(
inp,
input_split_sizes_tensor,
output_split_sizes_tensor,
*,
tag,
ranks,
group_size,
):
input_split_sizes = input_split_sizes_tensor.tolist()
output_split_sizes = output_split_sizes_tensor.tolist()
a2a = torch.ops.custom_ns.alltoall_autograd.default(
inp,
output_split_sizes,
input_split_sizes,
tag,
ranks,
group_size,
)
return torch.ops.custom_ns.foo(a2a)
with (
_dynamo_dist_per_rank_init(self.rank, self.world_size),
torch._dynamo.config.patch(
dynamic_shapes=True,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
),
torch.library._scoped_library("custom_ns", "FRAGMENT") as lib,
):
lib.define(
"alltoall_autograd(Tensor input, SymInt[]? output_split_sizes, SymInt[]? input_split_sizes, str tag, int[] ranks, int group_size) -> Tensor" # noqa: B950
)
lib.impl("alltoall_autograd", alltoall_autograd, "Autograd")
lib.impl("alltoall_autograd", alltoall_autograd, "Meta")
row = self.world_size * (self.rank + 1) * (self.world_size + 1) / 2
input_split_sizes_tensor = torch.tensor(
[(i + 1) * (self.rank + 1) for i in range(self.world_size)],
dtype=torch.int64,
)
output_split_sizes_tensor = torch.tensor(
[(i + 1) * (self.rank + 1) for i in range(self.world_size)],
dtype=torch.int64,
)
inputs = (
torch.ones(int(row), 5, device=self.device, requires_grad=True)
* (self.rank + 1),
input_split_sizes_tensor,
output_split_sizes_tensor,
)
trs = self.get_world_trs()
compiled_fn = torch.compile(
example,
fullgraph=True,
dynamic=True,
backend="aot_eager_decomp_partition",
)
if override_with_ac:
def compiled_fn_wrapper(*args):
return example(*inputs, **trs)
out = torch.utils.checkpoint.checkpoint(
compiled_fn_wrapper, *inputs, use_reentrant=False
)
else:
out = compiled_fn(*inputs, **trs)
# track how many all_to_alls we saw in the backward
with TrackingMode() as m:
out.sum().backward()
if override_with_ac:
# We wrapped our test in AC, which overrides the partitioner decision
# of never recomputing collectives.
# So we should properly see the all2all be recomputed in the backward
self.assertEqual(
m.ops_counter[torch.ops._c10d_functional.all_to_all_single.default],
2,
)
else:
# there is 1 all2all in the fw, and 1 all2all in the backward.
# notably: even though activation_memory_budget == 0 ("recompute_everything"),
# we are still choosing *not* to recompute the all2all from the fw
self.assertEqual(
m.ops_counter[torch.ops._c10d_functional.all_to_all_single.default],
1,
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_all_to_all_single_inductor_split_sizes_none(self):
def example(inp, *, tag, ranks, group_size):
a2a = torch.ops.c10d_functional.all_to_all_single(
inp,
None,
None,
tag,
ranks,
group_size,
)
a2a = torch.ops.c10d_functional.wait_tensor(a2a)
out = a2a / a2a.sum(dim=0)
return out
with _dynamo_dist_per_rank_init(self.rank, self.world_size):
inputs = (
torch.ones(self.world_size, self.world_size, device=self.device)
* (self.rank + 1),
)
trs = self.get_world_trs()
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
(
FileCheck()
.check_regex(
"torch.ops._c10d_functional.all_to_all_single.default\\("
"arg\\d+_\\d+, "
"\\[s\\d+ // \\d, s\\d+ // \\d\\], "
"\\[s\\d+ // \\d, s\\d+ // \\d\\]"
)
.run(code)
)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
self.assertTrue(same(eager_out, inductor_out, tol=0.001))
@instantiate_parametrized_tests
@requires_accelerator_dist_backend(["nccl", "xccl"])
@unittest.skipIf(
not torch.accelerator.is_available(),
"No accelerator is available",
)
| TestCollectivesMultiProc |
python | instagram__MonkeyType | monkeytype/db/base.py | {
"start": 576,
"end": 1926
} | class ____(metaclass=ABCMeta):
"""An interface that all concrete calltrace storage backends must implement."""
@abstractmethod
def add(self, traces: Iterable[CallTrace]) -> None:
"""Store the supplied call traces in the backing store"""
pass
@abstractmethod
def filter(
self, module: str, qualname_prefix: Optional[str] = None, limit: int = 2000
) -> List[CallTraceThunk]:
"""Query the backing store for any traces that match the supplied query.
By returning a list of thunks we let the caller get a partial result in the
event that decoding one or more call traces fails.
"""
pass
@classmethod
def make_store(cls, connection_string: str) -> "CallTraceStore":
"""Create a new store instance.
This is a factory function that is intended to be used by the CLI.
"""
raise NotImplementedError(
f"Your CallTraceStore ({cls.__module__}.{cls.__name__}) "
f"does not implement make_store()"
)
def list_modules(self) -> List[str]:
"""List of traced modules from the backing store"""
raise NotImplementedError(
f"Your CallTraceStore ({self.__class__.__module__}.{self.__class__.__name__}) "
f"does not implement list_modules()"
)
| CallTraceStore |
python | huggingface__transformers | src/transformers/models/florence2/modular_florence2.py | {
"start": 64839,
"end": 65129
} | class ____(LlavaPreTrainedModel):
config_class = Florence2Config
base_model_prefix = "model"
_supports_attention_backend = False
@auto_docstring(
custom_intro="""
Florence-2 is a vision model for captioning, detection, and segmentation.
"""
)
| Florence2PreTrainedModel |
python | pyinstaller__pyinstaller | PyInstaller/utils/win32/icon.py | {
"start": 2534,
"end": 2714
} | class ____(Structure):
_names_ = ("bWidth", "bHeight", "bColorCount", "bReserved", "wPlanes", "wBitCount", "dwBytesInRes", "dwImageOffset")
_format_ = "bbbbhhii"
| ICONDIRENTRY |
python | ray-project__ray | python/ray/tune/tests/test_integration_pytorch_lightning.py | {
"start": 331,
"end": 548
} | class ____(Dataset):
def __init__(self, values):
self.values = values
def __getitem__(self, index):
return self.values[index]
def __len__(self):
return len(self.values)
| _MockDataset |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-sec-filings/llama_index/readers/sec_filings/prepline_sec_filings/api/app.py | {
"start": 924,
"end": 1337
} | class ____(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return record.getMessage().find("/healthcheck") == -1
logging.getLogger("uvicorn.access").addFilter(HealthCheckFilter())
@app.get("/healthcheck", status_code=status.HTTP_200_OK, include_in_schema=False)
def healthcheck(request: Request):
return {"healthcheck": "HEALTHCHECK STATUS: EVERYTHING OK!"}
| HealthCheckFilter |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py | {
"start": 17953,
"end": 155054
} | class ____(quantize_model_test_base.QuantizedModelTest):
@parameterized.parameters(
testing.parameter_combinations([{
'shapes': [
([3, 3], [3, 3]),
([3, None], [None, 3]),
([None, None], [None, None]),
([4, 3, 3], [4, 3, 3]),
([4, 3, None], [4, None, 3]),
([None, None, None], [None, None, None]),
],
'activation_fn': [None, nn_ops.relu, nn_ops.relu6],
'has_bias': [True, False],
'use_kernel': [True, False],
}])
)
@test_util.run_in_graph_and_eager_modes
def test_qat_matmul_model(
self,
shapes: Sequence[Tuple[_TensorShape, _TensorShape]],
activation_fn: Optional[ops.Operation],
has_bias: bool,
use_kernel: bool,
):
n = 5
x_shape = [v if v is not None else n for v in shapes[0]]
y_shape = [v if v is not None else n for v in shapes[1]]
class MatmulModel(module.Module):
def __init__(self, bias: Optional[core.Tensor]):
self._bias = bias
self._kernel = np.random.uniform(size=y_shape).astype('f4')
self._min = (-0.8, -0.8, -0.9)
self._max = (0.9, 0.9, 1.0)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=shapes[0], dtype=dtypes.float32
)
]
)
def matmul_with_kernel(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
return self._matmul(x, self._kernel)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=shapes[0], dtype=dtypes.float32
),
tensor_spec.TensorSpec(
name='y', shape=shapes[1], dtype=dtypes.float32
),
]
)
def matmul_without_kernel(
self, x: core.Tensor, y: core.Tensor
) -> Mapping[str, core.Tensor]:
return self._matmul(x, y)
def _matmul(self, x, y):
x = array_ops.fake_quant_with_min_max_vars(
x,
min=ops.convert_to_tensor(self._min[0]),
max=ops.convert_to_tensor(self._max[0]),
num_bits=8,
narrow_range=False,
)
y = array_ops.fake_quant_with_min_max_vars(
y,
min=ops.convert_to_tensor(self._min[1]),
max=ops.convert_to_tensor(self._max[1]),
num_bits=8,
narrow_range=False,
)
out = math_ops.matmul(x, y)
if self._bias is not None:
out = nn_ops.bias_add(out, self._bias)
if activation_fn is not None:
out = activation_fn(out)
out = array_ops.fake_quant_with_min_max_vars(
out,
min=ops.convert_to_tensor(self._min[2]),
max=ops.convert_to_tensor(self._max[2]),
num_bits=8,
narrow_range=False,
)
return {'output': out}
bias = None
if has_bias:
bias_shape = shapes[1][-1]
if bias_shape is not None:
bias = array_ops.constant(
np.random.uniform(size=[shapes[1][-1]]), dtype=dtypes.float32
)
model = MatmulModel(bias)
x = array_ops.constant(
np.random.uniform(size=x_shape), dtype=dtypes.float32
)
y = array_ops.constant(
np.random.uniform(size=y_shape), dtype=dtypes.float32
)
if use_kernel:
model.matmul = model.matmul_with_kernel
model_inputs = {'x': x}
else:
model.matmul = model.matmul_without_kernel
model_inputs = {'x': x, 'y': y}
saved_model_save.save(
model, self._input_saved_model_path, signatures=model.matmul
)
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model with TF opset as the baseline.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
expected_outputs = model.matmul(**model_inputs)
got_outputs = converted_model.signatures[signature_key](**model_inputs)
self.assertAllClose(expected_outputs, got_outputs, atol=1e-1)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Check the converted model in the XLA opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.XLA,
enable_two_input_tensors=not use_kernel,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_op(graphdef, 'XlaDotV2'))
new_outputs = converted_model.signatures[signature_key](**model_inputs)
# The difference between TF and XLA path is expected to be small (smaller
# or equal to 1 in the quantized domain).
self.assertAllClose(new_outputs, expected_outputs, atol=1e-1)
@parameterized.parameters(
testing.parameter_combinations([{
'activation_fn': [None, nn_ops.relu, nn_ops.relu6],
'has_bias': [True, False],
'has_batch_norm': [True, False],
'target_opset': [quant_opts_pb2.XLA],
}])
)
@test_util.run_in_graph_and_eager_modes
def test_qat_conv_model(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
has_batch_norm: bool,
target_opset: quant_opts_pb2.OpSet,
):
class ConvModel(module.Module):
def __init__(self):
self.filter_value = np.random.uniform(
low=-0.5, high=0.5, size=(2, 3, 3, 2)
).astype('f4')
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='input', shape=[1, 3, 4, 3], dtype=dtypes.float32
),
]
)
def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a 2D convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
q_input = array_ops.fake_quant_with_min_max_args(
input_tensor, min=-0.1, max=0.2, num_bits=8, narrow_range=False
)
filter_tensor = ops.convert_to_tensor(self.filter_value)
filter_min = array_ops.identity(
array_ops.constant([-0.5, -0.5], dtype=dtypes.float32)
)
filter_max = array_ops.identity(
array_ops.constant([0.5, 0.5], dtype=dtypes.float32)
)
q_filter = array_ops.fake_quant_with_min_max_vars_per_channel(
filter_tensor, filter_min, filter_max, num_bits=8, narrow_range=True
)
bias = array_ops.constant([0.1, 0.2], dtype=dtypes.float32)
scale, offset = [1.0] * 2, [0.5] * 2
mean, variance = scale, offset
out = nn_ops.conv2d(
q_input,
q_filter,
strides=[1, 1, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC',
name='sample/conv2d',
)
if has_bias:
out = nn_ops.bias_add(out, bias, data_format='NHWC')
if activation_fn is not None:
# The accuracy is not good when having FusedBatchNorm without
# activation in this test.
if has_batch_norm:
# Fusing is supported for non-training case.
out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
out, scale, offset, mean, variance, is_training=False
)
out = activation_fn(out)
out_min = array_ops.constant([-0.18, -0.32], dtype=dtypes.float32)
out_max = array_ops.constant([0.5, 0.5], dtype=dtypes.float32)
q_out = array_ops.fake_quant_with_min_max_vars_per_channel(
out, min=out_min, max=out_max, num_bits=8, narrow_range=True
)
return {'output': q_out}
model = ConvModel()
saved_model_save.save(model, self._input_saved_model_path)
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model with TF opset as the baseline.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
input_data = np.random.uniform(
low=-0.1, high=0.2, size=(1, 3, 4, 3)
).astype('f4')
expected_outputs = model.conv(input_data)
got_outputs = converted_model.signatures[signature_key](
input=ops.convert_to_tensor(input_data)
)
self.assertAllClose(expected_outputs, got_outputs, atol=0.00323)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Check the converted model in the target opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(
self._contains_op(graphdef, 'XlaConvV2', node_name='sample/conv2d.*')
)
new_outputs = converted_model.signatures[signature_key](
input=ops.convert_to_tensor(input_data)
)
# The difference between TF and XLA path is expected to be small (smaller
# or equal to 1 in the quantized domain).
self.assertAllClose(new_outputs, got_outputs, atol=0.00154)
# Currently, only some specific forms of equantions are supported for
# batchmatmul conversion.
@parameterized.parameters(
testing.parameter_combinations([{
'equation': ('abc,cd->abd', 'abcd,cde->abe'),
'shape_unknown': (True, False),
'activation_fn': (None, nn_ops.relu, nn_ops.relu6),
'has_bias': (True, False),
'use_kernel': (True, False),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_qat_einsum_model_with_batchmatmul_conversion(
self,
equation: str,
shape_unknown: bool,
activation_fn: Optional[ops.Operation],
has_bias: bool,
use_kernel: bool,
):
x_shape, y_shape, bias_shape, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes(
equation, shape_unknown, has_bias and not shape_unknown
)
)
model = self._create_einsum_model(
equation,
y_shape,
x_signature,
y_signature,
bias_shape,
activation_fn,
is_qat_model=True,
)
x = array_ops.constant(
np.random.uniform(size=x_shape), dtype=dtypes.float32
)
y = array_ops.constant(
np.random.uniform(size=y_shape), dtype=dtypes.float32
)
if use_kernel:
model.einsum = model.einsum_with_kernel
model_inputs = {'x': x}
else:
model.einsum = model.einsum_without_kernel
model_inputs = {'x': x, 'y': y}
saved_model_save.save(
model, self._input_saved_model_path, signatures=model.einsum
)
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model with TF opset as the baseline.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.TF,
enable_two_input_tensors=not use_kernel,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
expected_outputs = model.einsum(**model_inputs)
got_outputs = converted_model.signatures[signature_key](**model_inputs)
self.assertAllClose(expected_outputs, got_outputs, atol=1e-1)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Check the converted model in the XLA opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.XLA,
enable_two_input_tensors=not use_kernel,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_op(graphdef, 'XlaDotV2'))
new_outputs = converted_model.signatures[signature_key](**model_inputs)
# The difference between TF and XLA path is expected to be small (smaller
# or equal to 1 in the quantized domain).
self.assertAllClose(new_outputs, expected_outputs, atol=1e-1)
# Equations only supported for XLA operations.
@parameterized.parameters(
testing.parameter_combinations([{
'equation': ('abc,acd->abd', 'abcd,aecd->acbe'),
'shape_unknown': (True, False),
'activation_fn': (None, nn_ops.relu, nn_ops.relu6),
'has_bias': (True, False),
'use_kernel': (True, False),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_qat_einsum_model_with_xla(
self,
equation: str,
shape_unknown: bool,
activation_fn: Optional[ops.Operation],
has_bias: bool,
use_kernel: bool,
):
x_shape, y_shape, bias_shape, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes(
equation, shape_unknown, has_bias and not shape_unknown
)
)
model = self._create_einsum_model(
equation,
y_shape,
x_signature,
y_signature,
bias_shape,
activation_fn,
is_qat_model=True,
)
x = array_ops.constant(
np.random.uniform(size=x_shape), dtype=dtypes.float32
)
y = array_ops.constant(
np.random.uniform(size=y_shape), dtype=dtypes.float32
)
if use_kernel:
model.einsum = model.einsum_with_kernel
model_inputs = {'x': x}
else:
model.einsum = model.einsum_without_kernel
model_inputs = {'x': x, 'y': y}
saved_model_save.save(
model, self._input_saved_model_path, signatures=model.einsum
)
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model in the XLA opset.
expected_outputs = model.einsum(**model_inputs)
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.XLA,
enable_two_input_tensors=not use_kernel,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(self._output_saved_model_path)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_op(graphdef, 'XlaDotV2'))
outputs = converted_model.signatures[signature_key](**model_inputs)
self.assertAllClose(outputs, expected_outputs, atol=1e-1)
# Equations NOT supported for XLA operations.
@parameterized.parameters(
testing.parameter_combinations([{
'equation': ('aecd,abcd->acbe', 'abc,acd->adb'),
'use_kernel': (True, False),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_qat_einsum_model_not_supported_with_xla(
self,
equation: str,
use_kernel: bool,
):
_, y_shape, _, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes(equation)
)
model = self._create_einsum_model(
equation,
y_shape,
x_signature,
y_signature,
bias_shape=None,
activation_fn=None,
is_qat_model=True,
)
if use_kernel:
model.einsum = model.einsum_with_kernel
else:
model.einsum = model.einsum_without_kernel
saved_model_save.save(
model, self._input_saved_model_path, signatures=model.einsum
)
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model does NOT have XLA opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.XLA,
enable_two_input_tensors=not use_kernel,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertFalse(self._contains_op(graphdef, 'XlaDotV2'))
@test_util.run_in_graph_and_eager_modes
def test_qat_gather_and_conv_model(
self,
):
input_type = dtypes.int32
model = self._create_simple_gather_and_conv_model(
input_type,
filter_shape=(2, 3, 3, 1024),
is_qat_model=True,
)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.XLA,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.5,
)
def test_qat_vocab_table_lookup_model(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_vocab_table_lookup_qat_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
# Representative dataset is composed of a set of vocabs for table lookup.
repr_ds = [
{'input_vocabs': np.array([b'hello', b'model', b'quantization'])}
for _ in range(4)
]
signature_def_keys = [signature_def_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
op_set=quant_opts_pb2.TF,
)
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
# The graph should contain a quantized function call (it contains a
# single f32 matmul node).
self.assertTrue(
self._contains_quantized_function_call(
output_meta_graph_def.graph_def
)
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'model', b'quantization', b'hello'])
},
)
self.assertAllClose(lookup_val, [1.0, 2.0, 0.0])
def test_qat_file_init_hash_table_lookup_model_tf1(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_file_init_hash_table_qat_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
# Representative dataset is composed of a set of vocabs for table lookup.
repr_ds = [
{'input_vocabs': np.array([b'static', b'range', b'quantization'])}
for _ in range(4)
]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_def_key],
op_set=quant_opts_pb2.TF,
)
signature_def_keys = [signature_def_key]
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
# The graph should contain a quantized function call (it contains a
# single f32 matmul node).
self.assertTrue(
self._contains_quantized_function_call(
output_meta_graph_def.graph_def
)
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'dynamic', b'quantization', b'range'])
},
)
# "dynamic" is not in the table: -1 (default value)
self.assertAllClose(lookup_val, [-1.0, 2.0, 1.0])
# Run this test only with the eager mode.
@test_util.run_v2_only
def test_ptq_model_with_variable(self):
class ConvModelWithVariable(module.Module):
"""A simple model that performs a single convolution to the input tensor.
It keeps the filter as a tf.Variable.
"""
def __init__(self) -> None:
"""Initializes the filter variable."""
self.filters = variables.Variable(
random_ops.random_uniform(
shape=(2, 3, 3, 2), minval=-1.0, maxval=1.0
)
)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='input', shape=(1, 3, 4, 3), dtype=dtypes.float32
),
]
)
def __call__(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a 2D convolution operation.
Args:
x: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
out = nn_ops.conv2d(
x,
self.filters,
strides=[1, 1, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC',
)
return {'output': out}
def gen_data() -> repr_dataset.RepresentativeDataset:
"""Creates an interable of representative samples.
Yields:
Representative samples, which is basically a mapping of: input key ->
input value.
"""
for _ in range(8):
yield {
'input': random_ops.random_uniform(
shape=(1, 3, 4, 3), minval=0, maxval=150
)
}
model = ConvModelWithVariable()
saved_model_save.save(model, self._input_saved_model_path)
signature_keys = [signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=gen_data(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Check only the most simple case and the most complicated cases.
@parameterized.named_parameters(
{
'testcase_name': 'none',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'relu',
'activation_fn': nn_ops.relu,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_relu',
'activation_fn': nn_ops.relu,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'relu6',
'activation_fn': nn_ops.relu6,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'bn',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_bn',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias',
'activation_fn': None,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_with_bias',
'activation_fn': None,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_with_bias_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_bn_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_with_bias_and_bn_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_per_tensor',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'dilation_with_bias_and_relu6_to_xla',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'dilation_with_bias_and_relu6_to_xla_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'dilation_with_bias_and_bn_and_relu6_to_xla',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': (
'dilation_with_bias_and_bn_and_relu6_to_xla_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_dynamic_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'dilation_with_bias_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': (
'dilation_with_bias_and_relu6_to_xla_dynamic_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
},
{
'testcase_name': (
'with_bias_and_bn_and_relu6_to_xla_dynamic_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'dilation_with_bias_and_bn_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': (
'dilation_with_bias_and_bn_and_relu6_to_xla_dynamic_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_with_bias_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'dilation_with_bias_and_bn_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6_to_uq_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'dilation_with_bias_and_relu6_to_uq_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_uq_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': (
'dilation_with_bias_and_bn_and_relu6_to_uq_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
'dilations': [1, 2, 2, 1],
},
{
'testcase_name': 'with_bias_and_relu6_to_stablehlo_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.STABLEHLO,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
)
@test_util.run_in_graph_and_eager_modes
def test_conv_ptq_model(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
has_batch_norm: bool,
target_opset: quant_opts_pb2.OpSet,
input_shape_dynamic: bool,
enable_per_channel_quantization: bool,
dilations: Sequence[int] = None,
):
input_shape = [None, None, None, 3] if input_shape_dynamic else [1, 3, 4, 3]
filter_shape = [2, 3, 3, 2]
strides = [1, 1, 1, 1]
model = self._create_conv2d_model(
input_shape,
filter_shape,
has_bias,
has_batch_norm,
activation_fn,
strides,
dilations,
)
saved_model_save.save(model, self._input_saved_model_path)
# Generate model input data.
rng = np.random.default_rng(seed=1234)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
input_data = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype(
np.float32
)
)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(500):
yield {
'input_tensor': rng.uniform(
low=0.0, high=1.0, size=static_input_shape
).astype(np.float32)
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
enable_per_channel_quantization=enable_per_channel_quantization,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
# The difference between float model and target path quantized model is
# expected to be small.
# The atol value is arbitrary.
if not enable_per_channel_quantization:
expected_outputs = model.conv(input_data)
target_outputs = converted_model.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
self.assertAllClose(target_outputs, expected_outputs, atol=0.06)
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
if enable_per_channel_quantization:
per_channel_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=filter_shape[-1]
)
]
)
]
)
)
self.assertTrue(
self._contains_op(
output_graphdef,
'Const',
'_output_shapes',
per_channel_size_attr,
)
)
elif target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(output_graphdef, 'UniformQuantizedConvolution')
)
if enable_per_channel_quantization:
quantized_axis = 3
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=filter_shape[quantized_axis]
)
]
)
]
)
)
else:
quantized_axis = -1
# Empty dimension. Per-tensor quantization has singular channel.
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[tensor_shape_pb2.TensorShapeProto()]
)
)
quantized_axis_attr = attr_value_pb2.AttrValue(i=quantized_axis)
self.assertEqual(
self._count_ops(
output_graphdef,
_PER_CHANNEL_QUANTIZED_OPS,
'rhs_quantization_axis',
quantized_axis_attr,
),
self._count_ops(output_graphdef, _PER_CHANNEL_QUANTIZED_OPS),
)
self.assertEqual(
self._count_ops(
output_graphdef,
_PER_CHANNEL_OP_NAMES,
'_output_shapes',
quantized_dim_size_attr,
get_op_name=True,
),
self._count_ops(
output_graphdef,
_PER_CHANNEL_OP_NAMES,
get_op_name=True,
),
)
self.assertFalse(self._contains_op(output_graphdef, 'Conv2D'))
elif target_opset == quant_opts_pb2.STABLEHLO:
# This is to verify the invocation of StableHLO quantizer works. More
# thorough functional tests are in StableHLO quantizer directory.
self.assertTrue(self._contains_op(output_graphdef, 'XlaCallModule'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
self.assertFalse(self._contains_op(output_graphdef, 'FusedBatchNormV3'))
@parameterized.named_parameters(
('to_tf_with_int32_input_type', dtypes.int32, quant_opts_pb2.TF),
('to_xla_with_int32_input_type', dtypes.int32, quant_opts_pb2.XLA),
('to_xla_with_int64_input_type', dtypes.int64, quant_opts_pb2.XLA),
(
'to_uq_with_int32_input_type',
dtypes.int32,
quant_opts_pb2.UNIFORM_QUANTIZED,
),
)
@test_util.run_v2_only
def test_gather_and_conv_model(
self, input_type: dtypes, target_opset: quant_opts_pb2.OpSet
):
model = self._create_simple_gather_and_conv_model(
input_type, filter_shape=(2, 3, 3, 1024)
)
saved_model_save.save(model, self._input_saved_model_path)
data_gen = self._create_data_generator(
input_key='input_tensor',
shape=[500],
minval=0,
maxval=64,
dtype=input_type,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertGreater(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.68,
)
self.assertTrue(
self._contains_op(output_graphdef, 'UniformQuantizedConvolution')
)
else:
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
1 / 3,
)
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
test_data = np.random.uniform(low=0, high=64, size=(32)).astype(
input_type.as_numpy_dtype
)
original_outputs = model.model(test_data)['output']
quantized_output = converted_model.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(test_data)
)['output']
self.assertAllClose(original_outputs, quantized_output, atol=442.7)
@test_util.run_v2_only
def test_while_op_model(
self,
):
input_shape = (1, 5, 5, 32)
model = self._create_while_model(input_shape)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.XLA,
)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(2):
yield {
'input_tensor': ops.convert_to_tensor(
np.random.uniform(low=0, high=150, size=input_shape).astype(
'f4'
)
),
}
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
loader = saved_model_loader.SavedModelLoader(self._output_saved_model_path)
output_graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
# Convolution ouside the while op is quantized.
self.assertTrue(
self._contains_op(
output_graphdef,
op_name='XlaConvV2',
attr_name='RhsT',
attr_val=attr_value_pb2.AttrValue(type=types_pb2.DT_INT8),
)
)
# TODO: b/294783597 - [Converter][TF-Quantizer] Support quantization for the
# ops in the while op body for both SRQ and WO
# Convolution inside the while op is not quantized.
self.assertTrue(
self._contains_op(
output_graphdef,
op_name='Conv2D',
attr_name='T',
attr_val=attr_value_pb2.AttrValue(type=types_pb2.DT_FLOAT),
)
)
# Check only the most simple case and the most complicated cases.
@parameterized.named_parameters(
{
'testcase_name': 'none',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'relu',
'activation_fn': nn_ops.relu,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'relu6',
'activation_fn': nn_ops.relu6,
'has_bias': False,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'bn',
'activation_fn': None,
'has_bias': False,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias',
'activation_fn': None,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.TF,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_xla_dynamic_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_xla_dynamic',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': False,
},
{
'testcase_name': (
'with_bias_and_bn_and_relu6_to_xla_dynamic_per_channel'
),
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.XLA,
'input_shape_dynamic': True,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'with_bias_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_uq',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': False,
},
{
'testcase_name': 'with_bias_and_relu6_to_uq_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': False,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
{
'testcase_name': 'with_bias_and_bn_and_relu6_to_uq_per_channel',
'activation_fn': nn_ops.relu6,
'has_bias': True,
'has_batch_norm': True,
'target_opset': quant_opts_pb2.UNIFORM_QUANTIZED,
'input_shape_dynamic': False,
'enable_per_channel_quantization': True,
},
)
@test_util.run_in_graph_and_eager_modes
def test_depthwise_conv_ptq_model(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
has_batch_norm: bool,
target_opset: quant_opts_pb2.OpSet,
input_shape_dynamic: bool,
enable_per_channel_quantization: bool,
):
input_shape = [None, None, None, 3] if input_shape_dynamic else [1, 3, 4, 3]
filter_shape = [2, 3, 3, 1]
model = self._create_depthwise_conv2d_model(
input_shape, filter_shape, has_bias, has_batch_norm, activation_fn
)
saved_model_save.save(model, self._input_saved_model_path)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(8):
yield {
'input_tensor': ops.convert_to_tensor(
np.random.uniform(low=0, high=150, size=(1, 3, 4, 3)).astype(
'f4'
)
),
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
enable_per_channel_quantization=enable_per_channel_quantization,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(
self._contains_op(output_graphdef, 'DepthwiseConv2dNative')
)
if enable_per_channel_quantization:
per_channel_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=filter_shape[-1] * filter_shape[2]
)
]
)
]
)
)
self.assertTrue(
self._contains_op(
output_graphdef,
'Const',
'_output_shapes',
per_channel_size_attr,
)
)
elif target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(output_graphdef, 'UniformQuantizedConvolution')
)
if enable_per_channel_quantization:
quantized_axis = 3
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[
tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
# Depthwise conv is reshaped to [H,W,1,CxM].
size=filter_shape[quantized_axis]
* filter_shape[2]
)
]
)
]
)
)
else:
quantized_axis = -1
# Empty dimension. Per-tensor quantization has singular channel.
quantized_dim_size_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
shape=[tensor_shape_pb2.TensorShapeProto()]
)
)
quantized_axis_attr = attr_value_pb2.AttrValue(i=quantized_axis)
self.assertEqual(
self._count_ops(
output_graphdef,
_PER_CHANNEL_QUANTIZED_OPS,
'rhs_quantization_axis',
quantized_axis_attr,
),
self._count_ops(output_graphdef, _PER_CHANNEL_QUANTIZED_OPS),
)
self.assertEqual(
self._count_ops(
output_graphdef,
_PER_CHANNEL_OP_NAMES,
'_output_shapes',
quantized_dim_size_attr,
get_op_name=True,
),
self._count_ops(
output_graphdef,
_PER_CHANNEL_OP_NAMES,
get_op_name=True,
),
)
self.assertFalse(self._contains_op(output_graphdef, 'Conv2D'))
else:
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
self.assertFalse(self._contains_op(output_graphdef, 'FusedBatchNormV3'))
@parameterized.parameters(
*testing.parameter_combinations([
{
'activation_fn': [None, nn_ops.relu, nn_ops.relu6],
'has_bias': [True, False],
'batch_sizes': [([], []), ([10], [10]), ([2, 3], [2, 3])],
'target_opset': [quant_opts_pb2.XLA],
},
# Test broadcastable batch sizes.
{
'activation_fn': [None],
'has_bias': [True],
'batch_sizes': [
([2], []),
([], [2]),
([1], [2]),
([None], []),
],
'target_opset': [quant_opts_pb2.XLA],
},
{
'activation_fn': [None, nn_ops.relu, nn_ops.relu6],
'has_bias': [True, False],
'batch_sizes': [([], []), ([10], [10]), ([2, 3], [2, 3])],
'target_opset': [quant_opts_pb2.UNIFORM_QUANTIZED],
},
])
)
@test_util.run_in_graph_and_eager_modes
def test_matmul_ptq_model(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
batch_sizes: Sequence[int],
target_opset: quant_opts_pb2.OpSet,
):
lhs_batch_size, rhs_batch_size = batch_sizes
input_shape = (*lhs_batch_size, 1, 1024)
filter_shape = (*rhs_batch_size, 1024, 3)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
model = self._create_matmul_model(
input_shape,
filter_shape,
self._input_saved_model_path,
has_bias,
activation_fn,
)
rng = np.random.default_rng(seed=1234)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(500):
yield {
'input_tensor': rng.uniform(
low=0.0, high=1.0, size=static_input_shape
).astype(np.float32)
}
tags = {tag_constants.SERVING}
input_data = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype(
np.float32
)
)
expected_outputs = model.matmul(input_data)
# Check the converted model in the target opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
loader = saved_model_loader.SavedModelLoader(self._output_saved_model_path)
output_graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(
self._contains_op(
output_graphdef, 'XlaDotV2', node_name='sample/matmul.*'
)
)
elif target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertTrue(
self._contains_op(
output_graphdef,
'UniformQuantizedDot',
node_name='sample/matmul.*',
)
)
new_outputs = converted_model.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
# The difference between TF and target path is expected to be small.
# The atol value is arbitrary.
# Currently, Uniform Quantized Opset are producing non-optimal graphs:
# unnecessary requantization followed by dequantization, so the error will
# be higher.
if target_opset == quant_opts_pb2.UNIFORM_QUANTIZED:
self.assertAllClose(new_outputs, expected_outputs, atol=0.25)
else:
self.assertAllClose(new_outputs, expected_outputs, atol=0.13)
@parameterized.named_parameters(
{
'testcase_name': 'with_biasadd',
'input_shape': (32, 16),
'filter_shape': (16, 8),
'bias_size': 4,
'use_biasadd': True,
'activation_fn': nn_ops.relu,
},
{
'testcase_name': 'with_addv2',
'input_shape': (32, 16),
'filter_shape': (16, 8),
'bias_size': 4,
'use_biasadd': False,
'activation_fn': nn_ops.relu,
},
)
def test_matmul_with_reshape_and_bias_ptq_model(
self, input_shape, filter_shape, bias_size, activation_fn, use_biasadd
):
model = self._create_matmul_model(
input_shape,
filter_shape,
self._input_saved_model_path,
True,
activation_fn,
bias_size,
use_biasadd,
)
rng = np.random.default_rng(seed=1234)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(5):
yield {
'input_tensor': rng.uniform(
low=0.0, high=1.0, size=input_shape
).astype(np.float32)
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.OpSet.XLA,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
input_data = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=input_shape).astype(np.float32)
)
expected_outputs = model.matmul(input_data)
got_outputs = converted_model.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
self.assertAllClose(expected_outputs, got_outputs, atol=0.05)
@parameterized.parameters(
('abc,cde->abde', quant_opts_pb2.XLA),
('abc,dce->abde', quant_opts_pb2.XLA),
)
def test_einsum_ptq_model(
self,
equation: str,
target_opset: quant_opts_pb2.OpSet,
):
_, y_shape, bias_shape, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes(equation, use_bias=True)
)
model = self._create_einsum_model(
equation,
y_shape,
x_signature,
y_signature,
bias_shape,
activation_fn=nn_ops.relu,
)
signatures = {
'serving_default': model.einsum_with_kernel.get_concrete_function(),
}
saved_model_save.save(model, self._input_saved_model_path, signatures)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(4):
yield {
'x': ops.convert_to_tensor(
np.random.uniform(low=0.0, high=1.0, size=x_signature).astype(
'f4'
)
),
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
input_data = ops.convert_to_tensor(
np.random.uniform(low=0.0, high=1.0, size=x_signature).astype('f4')
)
expected_outputs = model.einsum_with_kernel(input_data)
got_outputs = converted_model.signatures['serving_default'](
x=ops.convert_to_tensor(input_data)
)
self.assertAllClose(expected_outputs, got_outputs, atol=0.097)
# Check the converted model in the target opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(output_graphdef, 'XlaDotV2'))
new_outputs = converted_model.signatures['serving_default'](
x=ops.convert_to_tensor(input_data)
)
# The difference between TF and target path is expected to be small.
self.assertAllClose(new_outputs, got_outputs, atol=0.097)
self.assertAllClose(new_outputs, expected_outputs, atol=0.057)
def test_reuse_calibration_data(self):
model = self._create_simple_gather_and_conv_model(
dtypes.int32, filter_shape=(2, 3, 3, 1024)
)
saved_model_save.save(model, self._input_saved_model_path)
data_gen = self._create_data_generator(
input_key='input_tensor',
shape=[50],
minval=0,
maxval=64,
dtype=dtypes.int32,
)
tags = {tag_constants.SERVING}
calibration_data_dir = self.create_tempdir('calibration_data').full_path
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.XLA,
force_graph_mode_calibration=True,
calibration_options=stablehlo_quant_config_pb2.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX,
calibration_data_dir=calibration_data_dir,
),
)
# Run quantization the first time, calibration is expected to be run.
with self.assertLogs(level='INFO') as info_logs:
# Save the logger verbosity.
prev_log_level = logging.get_verbosity()
logging.set_verbosity(logging.INFO)
try:
converted_model1 = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
finally:
# Restore the logger verbosity.
logging.set_verbosity(prev_log_level)
self.assertNotEmpty(info_logs.records)
self.assertTrue(
self._any_log_contains(
'Calibration step is executed in graph mode.',
info_logs.records,
)
)
self.assertIsNotNone(converted_model1)
self.assertCountEqual(
converted_model1.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(
tags
).graph_def
self.assertTrue(self._contains_op(output_graphdef, 'XlaConvV2'))
# Run quantization the first time, calibration is expected to be skipped.
with self.assertLogs(level='INFO') as info_logs:
# Save the logger verbosity.
prev_log_level = logging.get_verbosity()
logging.set_verbosity(logging.INFO)
try:
converted_model2 = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
overwrite_output_directory=True,
)
finally:
# Restore the logger verbosity.
logging.set_verbosity(prev_log_level)
self.assertNotEmpty(info_logs.records)
self.assertFalse(
self._any_log_contains(
'Calibration step is executed in graph mode.',
info_logs.records,
)
)
self.assertIsNotNone(converted_model2)
self.assertCountEqual(
converted_model2.signatures._signatures.keys(), {'serving_default'}
)
# Expect two models to produce the same results.
test_data = ops.convert_to_tensor(
np.random.uniform(low=0, high=64, size=(32)).astype(
dtypes.int32.as_numpy_dtype
)
)
new_outputs_1 = converted_model1.signatures['serving_default'](
input_tensor=test_data
)['output']
new_outputs_2 = converted_model2.signatures['serving_default'](
input_tensor=test_data
)['output']
self.assertAllClose(new_outputs_1, new_outputs_2)
@test_util.run_in_graph_and_eager_modes
def test_function_alias_preserved(self):
model = self._create_conv2d_model(
input_shape=(1, 3, 4, 3), filter_shape=(2, 3, 3, 2)
)
signatures = {
'serving_default': model.conv.get_concrete_function(),
}
save_opts = save_options.SaveOptions(
function_aliases={'conv_func': model.conv}
)
saved_model_save.save(
model, self._input_saved_model_path, signatures, save_opts
)
def data_gen() -> repr_dataset.RepresentativeDataset:
rng = np.random.default_rng(seed=123)
for _ in range(2):
yield {
'input_tensor': rng.uniform(
low=0, high=150, size=(1, 3, 4, 3)
).astype(np.float32),
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.OpSet.XLA,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
# Test whether the aliased function exists.
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
# Confirm that the function alias is preserved.
meta_graph_def = output_loader.get_meta_graph_def_from_tags(tags)
function_aliases = meta_graph_def.meta_info_def.function_aliases
self.assertNotEmpty(function_aliases)
self.assertCountEqual(function_aliases.values(), {'conv_func'})
# Test that the aliased function contains a quantized op.
for func_name, alias in function_aliases.items():
if alias == 'conv_func':
for func in meta_graph_def.graph_def.library.function:
if func.signature.name == func_name:
self.assertTrue(
self._contains_op_with_name_and_attribute(
func.node_def,
op_name='XlaConvV2',
attr_name='',
attr_val=None,
)
)
@test_util.run_in_graph_and_eager_modes
def test_function_alias_preserved_in_qat(self):
_, y_shape, _, x_signature, y_signature = (
self._prepare_sample_einsum_datashapes('ab,bc->ac')
)
model = self._create_einsum_model(
'ab,bc->ac', y_shape, x_signature, y_signature, is_qat_model=True
)
signatures = {
'serving_default': model.einsum_with_kernel.get_concrete_function(),
}
save_opts = save_options.SaveOptions(
function_aliases={'einsum_with_kernel': model.einsum_with_kernel}
)
saved_model_save.save(
model, self._input_saved_model_path, signatures, save_opts
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.OpSet.XLA,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
# Test whether the aliased function exists.
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
# Confirm that the function alias is preserved.
meta_graph_def = output_loader.get_meta_graph_def_from_tags(tags)
function_aliases = meta_graph_def.meta_info_def.function_aliases
self.assertNotEmpty(function_aliases)
self.assertCountEqual(function_aliases.values(), {'einsum_with_kernel'})
# Test that the aliased function contains a quantized op.
for func_name, alias in function_aliases.items():
if alias == 'einsum_with_kernel':
for func in meta_graph_def.graph_def.library.function:
if func.signature.name == func_name:
self.assertTrue(
self._contains_op_with_name_and_attribute(
func.node_def,
op_name='XlaDotV2',
attr_name='',
attr_val=None,
)
)
def test_matmul_ptq_model_with_unfreeze_constants(self):
# Uses large weight to exceed the constant size threshold of 64KiB
# (specified by `kDefaultConstantSizeThresholdInBytes`) for unfreezing.
self._create_matmul_model(
input_shape=(1, 20),
weight_shape=(20, 4096),
saved_model_path=self._input_saved_model_path,
)
repr_ds = self._create_data_generator(
input_key='input_tensor', shape=(1, 20), num_examples=2
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
freeze_all_variables=False,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
# Test that the quantized model successfully loads without error.
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = output_loader.load(sess, tags)
# Confirms that quantization is applied to the model.
output_graphdef = output_meta_graph_def.graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Tests that there are variables in the model.
variable_node_defs = _find_variables(output_graphdef)
self.assertLen(variable_node_defs, 1)
# Reads the variables from the checkpoint file and matches with the
# variables found in the graph.
checkpoint_path = os.path.join(
self._output_saved_model_path, 'variables', 'variables'
)
var_name_and_shapes = checkpoint_utils.list_variables(checkpoint_path)
# Checks that each variable's name and shape match.
self.assertEqual(len(variable_node_defs), len(var_name_and_shapes))
for var_name, shape in var_name_and_shapes:
self.assertIn(var_name, variable_node_defs)
self.assertEqual(
shape,
tensor_shape.TensorShape(
variable_node_defs[var_name].attr['shape'].shape
),
)
@parameterized.named_parameters(
('use_constant_with_int32_input', dtypes.int32, False, True),
('use_variable_with_int32_input', dtypes.int32, True, True),
('use_constant_with_int64_input', dtypes.int64, False, True),
('use_variable_with_int64_input', dtypes.int64, True, True),
('small_gather_use_constant', dtypes.int32, False, False),
('small_gather_use_variable', dtypes.int32, True, False),
)
@test_util.run_v2_only
def test_gather_model(
self, input_type, use_variable, expect_quantized_gather
):
model = self._create_gather_model(input_type, use_variable)
saved_model_save.save(model, self._input_saved_model_path)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.XLA,
# Gather op is opt-outed if the size is smaller than the threshold.
min_num_elements_for_weights=1024 if expect_quantized_gather else 8192,
)
data_gen = self._create_data_generator(
input_key='input_tensor',
shape=[6],
minval=0,
maxval=10,
dtype=input_type,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
if expect_quantized_gather:
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
1 / 3,
)
else:
self.assertGreater(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
2 / 3,
)
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_use_representative_samples_list(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
representative_dataset: repr_dataset.RepresentativeDataset = [
{
'input_tensor': random_ops.random_uniform(shape=(1, 1024)),
}
for _ in range(8)
]
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_use_ndarray_representative_dataset(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
# Use np.ndarrays instead of tf.Tensors for the representative dataset.
rng = np.random.default_rng(seed=1234)
representative_dataset = [
{'input_tensor': rng.uniform(size=(1, 1024)).astype(np.float32)}
for _ in range(4)
]
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_use_python_list_representative_dataset(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
# Use plain python lists as representative samples.
representative_dataset = [
{
'input_tensor': [[i * 0.1 for i in range(1024)]],
}
for _ in range(4)
]
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_use_representative_samples_file(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
rng = np.random.default_rng(seed=1234)
representative_dataset: repr_dataset.RepresentativeDataset = [
{'input_tensor': rng.uniform(size=(1, 1024)).astype(np.float32)}
for _ in range(4)
]
dataset_file_map = repr_dataset.TfRecordRepresentativeDatasetSaver(
{'serving_default': os.path.join(self._input_saved_model_path, 'repr')}
).save({'serving_default': representative_dataset})
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
representative_datasets=dataset_file_map,
)
with self.assertRaisesRegex(
ValueError,
'Do not specify both the `representative_dataset` argument and the'
' `representative_datasets` field in `QuantizationOptions`',
):
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_call_twice(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
signature_def_keys = [signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
op_set=quant_opts_pb2.TF,
)
representative_dataset: repr_dataset.RepresentativeDataset = [
{
'input_tensor': random_ops.random_uniform(shape=(1, 1024)),
}
for _ in range(8)
]
# Test the first run.
converted_model_1 = quantize_model.quantize(
self._input_saved_model_path,
output_directory=self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model_1)
self.assertCountEqual(
converted_model_1.signatures._signatures.keys(), signature_def_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Test the second run on the same model.
converted_model_2 = quantize_model.quantize(
self._input_saved_model_path,
output_directory=self._output_saved_model_path_2,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model_2)
self.assertCountEqual(
converted_model_2.signatures._signatures.keys(), signature_def_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
def test_model_ptq_preserving_assets_extra(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
asset_filename = 'assets.extra/tf_serving_warmup_requests'
file_io.create_dir_v2(
os.path.join(self._input_saved_model_path, 'assets.extra')
)
file_io.write_string_to_file(
filename=os.path.join(self._input_saved_model_path, asset_filename),
file_content='Test content',
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
# Use plain python lists as representative samples.
representative_dataset = [
{
'input_tensor': [[i * 0.1 for i in range(1024)]],
}
for _ in range(4)
]
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model)
# Check if the assets.extra file exists in the output model.
self.assertTrue(
file_io.file_exists_v2(
os.path.join(self._output_saved_model_path, asset_filename)
)
)
# tf.data.Dataset is as an Iterable (thus can be used as representative
# dataset) only in TF2 (eager mode).
@test_util.run_v2_only
def test_model_ptq_use_tf_dataset_for_representative_dataset(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
representative_samples = [
{
'input_tensor': random_ops.random_uniform(shape=(1, 1024)),
}
for _ in range(8)
]
# Construct a tf.data.Dataset from the representative samples.
representative_dataset = dataset_ops.DatasetV2.from_generator(
lambda: representative_samples,
output_signature={
'input_tensor': tensor_spec.TensorSpec(
shape=(1, 1024), dtype=dtypes.float32
),
},
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=representative_dataset,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_no_representative_sample_not_quantized(self):
self._create_matmul_model(
input_shape=(1, 1024),
weight_shape=(1024, 3),
saved_model_path=self._input_saved_model_path,
)
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
# Put no sample into the representative dataset to make calibration
# impossible.
representative_dataset=[],
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
# Model is not quantized because there was no sample data for calibration.
self.assertFalse(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_model_ptq_with_uncalibrated_subgraph(self):
class IfModel(module.Module):
"""A model that contains a branching op."""
def __init__(self):
self.filters_0 = np.random.uniform(
low=-1.0, high=1.0, size=(4, 3)
).astype('f4')
self.bias_0 = np.random.uniform(low=-1.0, high=1.0, size=(3,)).astype(
'f4'
)
self.filters_1 = np.random.uniform(
low=-1.0, high=1.0, size=(4, 3)
).astype('f4')
self.bias_1 = np.random.uniform(low=-1.0, high=1.0, size=(3,)).astype(
'f4'
)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=[1, 4], dtype=dtypes.float32)
]
)
def model_fn(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
"""Runs the input tensor to a branched operations.
The graph is branched by a condition whether the sum of elements of `x`
is greater than 10.
Args:
x: Input tensor.
Returns:
A map of: output key -> output result.
"""
if math_ops.reduce_sum(x) > 10.0:
out = math_ops.matmul(x, self.filters_0)
out = nn_ops.bias_add(out, self.bias_0)
return {'output': out}
out = math_ops.matmul(x, self.filters_1)
out = nn_ops.bias_add(out, self.bias_1)
return {'output': out}
model = IfModel()
saved_model_save.save(model, self._input_saved_model_path)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(8):
yield {
'x': ops.convert_to_tensor(
np.random.uniform(low=0.0, high=1.0, size=(1, 4)).astype('f4')
),
}
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['serving_default'],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Tests that the false branch contains a quantized function call whereas the
# true branch doesn't.
def _is_quantized_function_call_node(
node_def: node_def_pb2.NodeDef,
) -> bool:
return node_def.op == 'PartitionedCall' and node_def.attr[
'f'
].func.name.startswith('quantized_')
for func in output_graphdef.library.function:
if func.signature.name.startswith('cond_false'):
self.assertTrue(
any(map(_is_quantized_function_call_node, func.node_def))
)
elif func.signature.name.startswith('cond_true'):
self.assertFalse(
any(map(_is_quantized_function_call_node, func.node_def))
)
# Run this test only with the eager mode.
@test_util.run_v2_only
def test_ptq_model_with_multiple_signatures(self):
# Create and save a model having 2 signatures.
model = MultipleSignatureModel()
signatures = {
'sig1': model.matmul.get_concrete_function(
tensor_spec.TensorSpec(shape=(1, 4), dtype=dtypes.float32)
),
'sig2': model.conv.get_concrete_function(
tensor_spec.TensorSpec(shape=(1, 3, 4, 3), dtype=dtypes.float32)
),
}
saved_model_save.save(
model, self._input_saved_model_path, signatures=signatures
)
def data_gen_sig1() -> repr_dataset.RepresentativeDataset:
"""Generates tuple-style samples for signature 'sig1'.
The first element of the tuple identifies the signature key the input data
is for.
Yields:
Representative sample for 'sig1'.
"""
for _ in range(4):
yield {'matmul_input': random_ops.random_uniform(shape=(1, 4))}
def data_gen_sig2() -> repr_dataset.RepresentativeDataset:
"""Generates tuple-style samples for signature 'sig2'.
The first element of the tuple identifies the signature key the input data
is for.
Yields:
Representative sample for 'sig2'.
"""
for _ in range(4):
yield {'conv_input': random_ops.random_uniform(shape=(1, 3, 4, 3))}
dataset_file_map = repr_dataset.TfRecordRepresentativeDatasetSaver({
'sig1': os.path.join(self._input_saved_model_path, 'sig1_repr'),
'sig2': os.path.join(self._input_saved_model_path, 'sig2_repr'),
}).save({
'sig1': data_gen_sig1(),
'sig2': data_gen_sig2(),
})
tags = {tag_constants.SERVING}
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['sig1', 'sig2'],
op_set=quant_opts_pb2.TF,
representative_datasets=dataset_file_map,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
output_directory=self._output_saved_model_path,
quantization_options=quantization_options,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'sig1', 'sig2'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Run this test only with the eager mode.
@test_util.run_v2_only
def test_ptq_multiple_signatures_invalid_dataset_raises_value_error(self):
# Create and save a model having 2 signatures.
model = MultipleSignatureModel()
signatures = {
'sig1': model.matmul.get_concrete_function(
tensor_spec.TensorSpec(shape=(1, 4), dtype=dtypes.float32)
),
'sig2': model.conv.get_concrete_function(
tensor_spec.TensorSpec(shape=(1, 3, 4, 3), dtype=dtypes.float32)
),
}
saved_model_save.save(
model, self._input_saved_model_path, signatures=signatures
)
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags={tag_constants.SERVING},
signature_keys=['sig1', 'sig2'],
)
# Use a dict-style samples instead of tuple-style samples. This is invalid
# because for a model multiple signatures one must use tuple-style samples.
invalid_dataset: repr_dataset.RepresentativeDataset = [
{'matmul_input': random_ops.random_uniform(shape=(1, 4))}
for _ in range(8)
]
with self.assertRaisesRegex(
Exception, 'Representative dataset is not a mapping'
):
quantize_model.quantize(
self._input_saved_model_path,
output_directory=self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset=invalid_dataset,
)
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_tf1_saved_model_with_variable_for_conv2d(self):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
use_variable=True,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
data_gen = self._create_data_generator(
input_key='x', shape=input_placeholder.shape
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@parameterized.named_parameters(
('use_constant_with_int32_input', dtypes.int32, False),
('use_variable_with_int32_input', dtypes.int32, True),
('use_constant_with_int64_input', dtypes.int64, False),
('use_variable_with_int64_input', dtypes.int64, True),
)
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_tf1_saved_model_with_variable_for_gather(
self, input_type, use_variable
):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
input_placeholder = self._create_and_save_tf1_gather_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
input_type=input_type,
use_variable=use_variable,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
data_gen = self._create_data_generator(
input_key='x',
shape=input_placeholder.shape,
minval=0,
maxval=10,
dtype=input_type,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
def test_ptq_model_with_variable_tf1_saved_model_unfreeze_constants(self):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
input_shape=(1, 16, 16, 8),
# Uses large filter to exceed the constant size threshold of 64KiB
# (specified by `kDefaultConstantSizeThresholdInBytes`) for unfreezing.
filter_shape=(256, 8, 8, 16),
use_variable=True,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
freeze_all_variables=False,
)
repr_ds = self._create_data_generator(
input_key='x', shape=input_placeholder.shape, num_examples=2
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
# Confirm that the quantized model loads successfully.
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = output_loader.load(sess, tags)
# Checks that quantization is applied.
output_graphdef = output_meta_graph_def.graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Tests that there are variables in the model.
variable_node_defs = _find_variables(output_graphdef)
self.assertLen(variable_node_defs, 1)
# Reads the variables from the checkpoint file and matches with the
# variables found in the graph.
checkpoint_path = os.path.join(
self._output_saved_model_path, 'variables', 'variables'
)
var_name_and_shapes = checkpoint_utils.list_variables(checkpoint_path)
# Checks that each variable's name and shape match.
self.assertEqual(len(variable_node_defs), len(var_name_and_shapes))
for var_name, shape in var_name_and_shapes:
self.assertIn(var_name, variable_node_defs)
self.assertEqual(
shape,
tensor_shape.TensorShape(
variable_node_defs[var_name].attr['shape'].shape
),
)
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_tf1_saved_model(self):
tags = {tag_constants.SERVING}
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='p',
output_key='output',
use_variable=False,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
data_gen = self._create_data_generator(
input_key='p', shape=input_placeholder.shape
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_tf1_saved_model_multiple_signatures(self):
tags = {tag_constants.SERVING}
# Create two models and add them to a same SavedModel under different
# signature keys.
with ops.Graph().as_default(), session.Session() as sess:
in_placeholder_1, output_tensor_1 = self._create_simple_tf1_conv_model()
sig_def_1 = signature_def_utils_impl.predict_signature_def(
inputs={'x1': in_placeholder_1}, outputs={'output1': output_tensor_1}
)
in_placeholder_2, output_tensor_2 = self._create_simple_tf1_conv_model()
sig_def_2 = signature_def_utils_impl.predict_signature_def(
inputs={'x2': in_placeholder_2}, outputs={'output2': output_tensor_2}
)
v1_builder = builder.SavedModelBuilder(self._input_saved_model_path)
v1_builder.add_meta_graph_and_variables(
sess,
tags,
signature_def_map={
'sig1': sig_def_1,
'sig2': sig_def_2,
},
)
v1_builder.save()
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=['sig1', 'sig2'],
op_set=quant_opts_pb2.TF,
)
def data_gen_sig1() -> repr_dataset.RepresentativeDataset:
"""Generates tuple-style samples.
The first element of the tuple identifies the signature key the input data
is for.
Yields:
Representative samples for signature 'sig1'.
"""
for _ in range(4):
yield {'x1': random_ops.random_uniform(shape=in_placeholder_1.shape)}
def data_gen_sig2() -> repr_dataset.RepresentativeDataset:
"""Generates tuple-style samples.
The first element of the tuple identifies the signature key the input data
is for.
Yields:
Representative samples for signature 'sig2'.
"""
for _ in range(4):
yield {'x2': random_ops.random_uniform(shape=in_placeholder_2.shape)}
converted_model = quantize_model.quantize(
self._input_saved_model_path,
output_directory=self._output_saved_model_path,
quantization_options=quantization_options,
representative_dataset={
'sig1': data_gen_sig1(),
'sig2': data_gen_sig2(),
},
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'sig1', 'sig2'}
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_tf1_saved_model_invalid_input_key_raises_value_error(
self,
):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
use_variable=False,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
)
# Representative generator function that yields with an invalid input key.
invalid_data_gen = self._create_data_generator(
input_key='invalid_input_key', shape=input_placeholder.shape
)
with self.assertRaisesRegex(
Exception,
'Invalid input keys for representative sample.',
):
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=invalid_data_gen,
)
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_non_default_tags(self):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Use a different set of tags other than {"serve"}.
tags = {tag_constants.TRAINING, tag_constants.GPU}
# Non-default tags are usually used when saving multiple metagraphs in TF1.
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='input',
output_key='output',
use_variable=True,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
data_gen = self._create_data_generator(
input_key='input', shape=input_placeholder.shape
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_wrong_tags_raises_error(self):
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
save_tags = {tag_constants.TRAINING, tag_constants.GPU}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
save_tags,
input_key='input',
output_key='output',
use_variable=True,
)
# Try to use a different set of tags to quantize.
tags = {tag_constants.SERVING}
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
)
data_gen = self._create_data_generator(
input_key='input', shape=input_placeholder.shape
)
with self.assertRaisesRegex(
RuntimeError,
"MetaGraphDef associated with tags {'serve'} could not be found",
):
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
def test_ptq_vocab_table_lookup_model(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_vocab_table_lookup_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
# Representative dataset is composed of a set of vocabs for table lookup.
repr_ds = [
{'input_vocabs': np.array([b'hello', b'model', b'quantization'])}
for _ in range(4)
]
signature_def_keys = [signature_def_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
op_set=quant_opts_pb2.TF,
)
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
# The graph should contain a quantized function call (it contains a
# single f32 matmul node).
self.assertTrue(
self._contains_quantized_function_call(
output_meta_graph_def.graph_def
)
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'model', b'quantization', b'hello'])
},
)
self.assertAllClose(lookup_val, [1.0, 2.0, 0.0])
def test_ptq_file_init_hash_table_lookup_model(self):
tags = {tag_constants.SERVING}
signature_def_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
# Create and save a simple model that involves a hash table.
inputs, outputs = self._create_and_save_file_init_hash_table_model_tf1(
self._input_saved_model_path, tags, signature_def_key
)
# Make sure that the desired input key and output key is present.
self.assertIn('input_vocabs', inputs.keys())
self.assertIn('lookup', outputs.keys())
# Representative dataset is composed of a set of vocabs for table lookup.
repr_ds = [
{'input_vocabs': np.array([b'static', b'range', b'quantization'])}
for _ in range(4)
]
signature_def_keys = [signature_def_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_def_keys,
op_set=quant_opts_pb2.TF,
)
quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
# Tests table lookup to make sure the table has been initialized
# successfully.
with session.Session(graph=ops.Graph()) as sess:
output_meta_graph_def = saved_model_loader.load(
sess, tags=tags, export_dir=self._output_saved_model_path
)
# The graph should contain a quantized function call (it contains a
# single f32 matmul node).
self.assertTrue(
self._contains_quantized_function_call(
output_meta_graph_def.graph_def
)
)
self.assertCountEqual(
output_meta_graph_def.signature_def.keys(), signature_def_keys
)
signature_def = output_meta_graph_def.signature_def[signature_def_key]
input_tensor_name = signature_def.inputs['input_vocabs'].name
input_tensor = sess.graph.get_tensor_by_name(input_tensor_name)
lookup_tensor_name = signature_def.outputs['lookup'].name
lookup_tensor = sess.graph.get_tensor_by_name(lookup_tensor_name)
lookup_val = sess.run(
lookup_tensor,
feed_dict={
input_tensor: np.array([b'dynamic', b'quantization', b'range'])
},
)
# "dynamic" is not in the table: -1 (default value)
self.assertAllClose(lookup_val, [-1.0, 2.0, 1.0])
@parameterized.named_parameters(
('none', None, False, False, quant_opts_pb2.TF, False, 'SAME'),
('relu', nn_ops.relu, False, False, quant_opts_pb2.TF, False, 'SAME'),
('relu6', nn_ops.relu6, False, False, quant_opts_pb2.TF, False, 'SAME'),
('with_bias', None, True, False, quant_opts_pb2.TF, False, 'SAME'),
(
'with_bias_and_relu',
nn_ops.relu,
True,
False,
quant_opts_pb2.TF,
False,
'SAME',
),
(
'with_bias_and_relu6',
nn_ops.relu6,
True,
False,
quant_opts_pb2.TF,
False,
'SAME',
),
('none_to_xla', None, False, False, quant_opts_pb2.XLA, False, 'SAME'),
(
'with_bias_and_relu6_to_xla',
nn_ops.relu6,
True,
False,
quant_opts_pb2.XLA,
False,
'SAME',
),
(
'with_bias_to_xla_dynamic',
None,
True,
False,
quant_opts_pb2.XLA,
True,
'SAME',
),
(
'none_to_xla_padding_valid',
None,
False,
False,
quant_opts_pb2.XLA,
False,
'VALID',
),
(
'with_bias_and_relu6_to_xla_padding_valid',
nn_ops.relu6,
True,
False,
quant_opts_pb2.XLA,
False,
'VALID',
),
(
'with_bias_to_xla_dynamic_padding_valid',
None,
True,
False,
quant_opts_pb2.XLA,
True,
'VALID',
),
)
def test_conv3d_ptq_model(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
has_batch_norm: bool,
target_opset: quant_opts_pb2.OpSet,
input_shape_dynamic: bool,
padding: str,
):
input_shape = [1, 3, 4, 3, 3]
if input_shape_dynamic:
input_shape = [None, None, None, None, 3]
class ConvModel(module.Module):
def __init__(self):
self.filters = np.random.uniform(
low=-0.5, high=0.5, size=(2, 3, 3, 3, 2)
).astype('f4')
self.bias = np.random.uniform(low=0.0, high=0.2, size=(2)).astype('f4')
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=input_shape, dtype=dtypes.float32)
]
)
def conv3d(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a 3D convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
out = nn_ops.conv3d(
input_tensor,
self.filters,
strides=[1, 1, 2, 1, 1],
dilations=[1, 1, 1, 1, 1],
padding=padding,
data_format='NDHWC',
)
if has_bias:
out = nn_ops.bias_add(out, self.bias)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
model = ConvModel()
saved_model_save.save(model, self._input_saved_model_path)
repr_ds = []
for _ in range(500):
repr_ds.append({
'input_tensor': ops.convert_to_tensor(
np.random.uniform(
low=-0.1, high=0.2, size=(1, 3, 4, 3, 3)
).astype('f4')
),
})
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
tags = {tag_constants.SERVING}
# Check the converted model with TF opset as the baseline.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=quant_opts_pb2.TF,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=repr_ds,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
input_data = np.random.uniform(
low=-0.1, high=0.2, size=(1, 3, 4, 3, 3)
).astype('f4')
expected_outputs = model.conv3d(input_data)
got_outputs = converted_model.signatures[signature_key](
input_tensor=ops.convert_to_tensor(input_data)
)
self.assertAllClose(expected_outputs, got_outputs, atol=0.00494)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Check the converted model in the target opset.
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=[signature_key],
op_set=target_opset,
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path_2,
quantization_options,
representative_dataset=repr_ds,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {signature_key}
)
loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path_2
)
graphdef = loader.get_meta_graph_def_from_tags(tags).graph_def
if target_opset == quant_opts_pb2.XLA:
self.assertTrue(self._contains_op(graphdef, 'XlaConvV2'))
new_outputs = converted_model.signatures[signature_key](
input_tensor=ops.convert_to_tensor(input_data)
)
# The quantized model in XLA opset is expected to have similar fidelity
# compared to the quantized model in TF opset.
self.assertAllClose(new_outputs, got_outputs, atol=0.00306)
self.assertAllClose(new_outputs, expected_outputs, atol=0.00494)
# Tests the case of having a signature key of `main` because it is a
# special name in the TF quantizer's MLIR pipeline that should be treated
# with care.
@test_util.run_in_graph_and_eager_modes
def test_ptq_model_with_signature_key_main(self):
signature_key = 'main'
tags = {tag_constants.SERVING}
input_placeholder = self._create_and_save_tf1_conv_model(
self._input_saved_model_path,
signature_key,
tags,
input_key='x',
output_key='output',
use_variable=True,
)
signature_keys = [signature_key]
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
tags=tags,
signature_keys=signature_keys,
op_set=quant_opts_pb2.TF,
)
data_gen = self._create_data_generator(
input_key='x', shape=input_placeholder.shape
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen,
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), signature_keys
)
output_loader = saved_model_loader.SavedModelLoader(
self._output_saved_model_path
)
output_graphdef = output_loader.get_meta_graph_def_from_tags(tags).graph_def
self.assertTrue(self._contains_quantized_function_call(output_graphdef))
# Makes sure that the original function identified by the signature key
# `main` is renamed to `main_0` (see `InsertMainFunctionPass` for details).
self.assertTrue(
any(
map(
lambda func: func.signature.name == 'main_0',
output_graphdef.library.function,
)
)
)
| StaticRangeQuantizationTest |
python | PrefectHQ__prefect | src/prefect/server/utilities/database.py | {
"start": 25039,
"end": 26090
} | class ____(functions.ReturnTypeFromArgs[T]):
inherit_cache: bool = True
@compiles(greatest, "sqlite")
def sqlite_greatest_as_max(
element: greatest[Any], compiler: SQLCompiler, **kwargs: Any
) -> str:
# TODO: SQLite MAX() is very close to PostgreSQL GREATEST(), *except* when
# it comes to nulls: SQLite MAX() returns NULL if _any_ clause is NULL,
# whereas PostgreSQL GREATEST() only returns NULL if _all_ clauses are NULL.
#
# A work-around is to use MAX() as an aggregate function instead, in a
# subquery. This, however, would probably require a VALUES-like construct
# that SQLA doesn't currently support for SQLite. You can [provide
# compilation hooks for
# this](https://github.com/sqlalchemy/sqlalchemy/issues/7228#issuecomment-1746837960)
# but this would only be worth it if sa.func.greatest() starts being used on
# values that include NULLs. Up until the time of this comment this hasn't
# been an issue.
return compiler.process(sa.func.max(*element.clauses), **kwargs)
| greatest |
python | sympy__sympy | sympy/polys/series/ringpython.py | {
"start": 29804,
"end": 39380
} | class ____:
"""
Python implementation of power series ring over integers :ref:`ZZ`.
This class provides comprehensive power series operations over the integer ring,
supporting both series manipulations with precision handling and truncation.
Parameters
==========
prec : int, optional
The default precision for power series operations. Default is 6.
Examples
========
>>> from sympy.polys.series.ringpython import PythonPowerSeriesRingZZ
>>> R = PythonPowerSeriesRingZZ()
>>> s = R([1, 2, 3]) # 1 + 2*x + 3*x^2
>>> R.print(s)
1 + 2*x + 3*x**2
>>> s_pow = R.pow_int(s, 2) # Square the series
>>> R.print(s_pow)
1 + 4*x + 10*x**2 + 12*x**3 + 9*x**4
>>> s_inv = R.inverse(R([1, 1])) # Inverse of 1 + x
>>> R.print(s_inv)
1 - x + x**2 - x**3 + x**4 - x**5 + O(x**6)
Note
====
The recommended way to create a power series ring is using the factory function
which returns a new instance of the higher level PowerSeriesRing class with
the ring generator:
>>> from sympy.polys.series import power_series_ring
>>> from sympy import ZZ
>>> R, x = power_series_ring("x", ZZ, 6)
>>> R
Power Series Ring in x over ZZ of size 6
>>> type(x)
<class 'sympy.polys.series.ring.PowerSeriesElement'>
This function automatically uses the Flint implementation if available for better
performance, falling back to the Python implementation otherwise.
See Also
========
sympy.polys.series.ringpython.PythonPowerSeriesRingQQ
sympy.polys.series.ringflint.FlintPowerSeriesRingZZ
sympy.polys.series.ring.power_series_ring
sympy.polys.series.ring.PowerSeriesRingRing
sympy.polys.series.ring.PowerSeriesRingField
sympy.polys.series.ring.PowerSeriesElement
"""
_domain = ZZ
def __init__(self, prec: int = 6) -> None:
if prec < 0:
raise ValueError("Power series precision must be non-negative")
self._prec = prec
def __repr__(self) -> str:
return (
f"Python Power Series Ring over {self._domain} with precision {self._prec}"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PythonPowerSeriesRingZZ):
return NotImplemented
return self._prec == other.prec
def __hash__(self) -> int:
return hash((self._domain, self._prec))
def __call__(
self, coeffs: Sequence[MPZ | int], prec: int | None = None
) -> USeries[MPZ]:
"""
Create a power series from a list of coefficients.
If `prec` is not specified, it defaults to the ring's precision.
"""
s: list[MPZ] = []
for c in coeffs:
if isinstance(c, MPZ):
s.append(c)
elif isinstance(c, int):
s.append(self._domain(c))
else:
raise TypeError(f"Unsupported coefficient type: {type(c)}")
return self.from_list(s, prec)
@property
def domain(self) -> Domain[MPZ]:
"""Return the ground domain of the power series ring."""
return self._domain
@property
def prec(self) -> int:
"""Return the ring's precision."""
return self._prec
@property
def one(self) -> USeries[MPZ]:
if self._prec == 0:
return ([], 0)
return ([self._domain.one], None)
@property
def zero(self) -> USeries[MPZ]:
if self._prec == 0:
return ([], 0)
return ([], None)
@property
def gen(self) -> USeries[MPZ]:
if self._prec < 2:
return ([], self._prec)
return ([self._domain.one, self._domain.zero], None)
def pretty(
self, s: USeries[MPZ], *, symbol: str = "x", ascending: bool = True
) -> str:
"""Return a pretty-printed string representation of a power series."""
coeffs, prec = s
return series_pprint(coeffs, prec, sym=symbol, ascending=ascending)
def print(
self, s: USeries[MPZ], *, symbol: str = "x", ascending: bool = True
) -> None:
"""Print a pretty-printed representation of a power series."""
print(self.pretty(s, symbol=symbol, ascending=ascending))
def from_list(self, coeffs: list[MPZ], prec: int | None = None) -> USeries[MPZ]:
"""
Create a power series from a list of ground coefficients.
If `prec` is not specified, it defaults to the ring's precision.
"""
coeffs = dup_reverse(coeffs, ZZ)
if prec is None:
if len(coeffs) <= self._prec:
return coeffs, None
else:
prec = self._prec
if len(coeffs) > prec:
coeffs = dup_truncate(coeffs, prec, self._domain)
return coeffs, prec
def from_element(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Convert a power series element into the corresponding element of this ring."""
coeffs, prec = s
return _useries(coeffs, prec, self._domain, self._prec)
def to_list(self, s: USeries[MPZ]) -> list[MPZ]:
"""Returns the list of series coefficients."""
coeffs, _ = s
return coeffs[::-1]
def to_dense(self, s: USeries[MPZ]) -> dup[MPZ]:
"""Return the coefficients of a power series as a dense list."""
return list(s[0])
def series_prec(self, s: USeries[MPZ]) -> int | None:
"""Return the precision of a power series."""
_, prec = s
return prec
def equal(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> bool | None:
"""Check if two power series are equal up to their minimum precision."""
return _useries_equality(s1, s2, self._domain, self._prec)
def equal_repr(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> bool:
"""Check if two power series have the same representation."""
return _useries_equal_repr(s1, s2)
def is_ground(self, arg: USeries[MPZ]) -> bool | None:
"""Check if a arg is a ground element of the power series ring."""
if self.prec == 0:
return None
return len(self.to_list(arg)) <= 1
def constant_coefficient(self, s: USeries[MPZ]) -> MPZ:
"""Return the constant coefficient of a power series."""
coeffs, _ = s
if len(coeffs) > 0:
return coeffs[-1]
return self._domain.zero
def positive(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Return the unary positive of a power series, adjusted to the ring's precision."""
return _useries_pos(s, self._domain, self._prec)
def negative(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Return the unary negative of a power series."""
return _useries_neg(s, self._domain, self._prec)
def add(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> USeries[MPZ]:
"""Add two power series."""
return _useries_add(s1, s2, self._domain, self._prec)
def add_ground(self, s: USeries[MPZ], n: MPZ) -> USeries[MPZ]:
"""Add a ground element to a power series."""
return _useries_add_ground(s, n, self._domain, self._prec)
def subtract(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> USeries[MPZ]:
"""Subtract two power series."""
return _useries_sub(s1, s2, self._domain, self._prec)
def subtract_ground(self, s: USeries[MPZ], n: MPZ) -> USeries[MPZ]:
"""Subtract a ground element from a power series."""
return _useries_sub_ground(s, n, self._domain, self._prec)
def rsubtract_ground(self, s: USeries[MPZ], n: MPZ) -> USeries[MPZ]:
"""Subtract a power series from a ground element."""
return _useries_rsub_ground(s, n, self._domain, self._prec)
def multiply(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> USeries[MPZ]:
"""Multiply two power series."""
return _useries_mul(s1, s2, self._domain, self._prec)
def multiply_ground(self, s: USeries[MPZ], n: MPZ) -> USeries[MPZ]:
"""Multiply a power series by a ground element."""
return _useries_mul_ground(s, n, self._domain, self._prec)
def divide(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> USeries[MPZ]:
"""Divide two power series."""
return _useries_div(s1, s2, self._domain, self._prec)
def pow_int(self, s: USeries[MPZ], n: int) -> USeries[MPZ]:
"""Raise a power series to a integer power."""
return _useries_pow_int(s, n, self._domain, self._prec)
def square(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Compute the square of a power series."""
return _useries_mul(s, s, self._domain, self._prec)
def compose(self, s1: USeries[MPZ], s2: USeries[MPZ]) -> USeries[MPZ]:
"""Compose two power series, `s1(s2)`."""
return _useries_compose(s1, s2, self._domain, self._prec)
def inverse(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Compute the multiplicative inverse of a power series."""
return _useries_inverse(s, self._domain, self._prec)
def reversion(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Compute the compositional inverse of a power series."""
return _useries_reversion(s, self._domain, self._prec)
def truncate(self, s: USeries[MPZ], n: int) -> USeries[MPZ]:
"""Truncate a power series to `n` terms."""
return _useries_truncate(s, n, self._domain)
def differentiate(self, s: USeries[MPZ]) -> USeries[MPZ]:
"""Compute the derivative of a power series."""
return _useries_derivative(s, self._domain, self._prec)
| PythonPowerSeriesRingZZ |
python | donnemartin__system-design-primer | solutions/system_design/social_graph/social_graph_snippets.py | {
"start": 133,
"end": 729
} | class ____(object):
def bfs(self, source, dest):
if source is None:
return False
queue = deque()
queue.append(source)
source.visit_state = State.visited
while queue:
node = queue.popleft()
print(node)
if dest is node:
return True
for adjacent_node in node.adj_nodes.values():
if adjacent_node.visit_state == State.unvisited:
queue.append(adjacent_node)
adjacent_node.visit_state = State.visited
return False
| Graph |
python | openai__openai-python | src/openai/types/audio/transcription_text_done_event.py | {
"start": 834,
"end": 1323
} | class ____(BaseModel):
input_tokens: int
"""Number of input tokens billed for this request."""
output_tokens: int
"""Number of output tokens generated."""
total_tokens: int
"""Total number of tokens used (input + output)."""
type: Literal["tokens"]
"""The type of the usage object. Always `tokens` for this variant."""
input_token_details: Optional[UsageInputTokenDetails] = None
"""Details about the input tokens billed for this request."""
| Usage |
python | getsentry__sentry | tests/sentry/integrations/cursor/test_client.py | {
"start": 276,
"end": 5385
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.api_key = "test_api_key"
self.webhook_secret = "test_webhook_secret"
self.cursor_client = CursorAgentClient(
api_key=self.api_key, webhook_secret=self.webhook_secret
)
self.webhook_url = "https://example.com/webhook"
self.repo_definition = SeerRepoDefinition(
integration_id="111",
provider="github",
owner="getsentry",
name="sentry",
external_id="123456",
branch_name="main",
)
@patch.object(CursorAgentClient, "post")
def test_launch_with_auto_create_pr_true(self, mock_post: Mock) -> None:
"""Test that launch() correctly passes auto_create_pr=True to the API"""
# Setup mock response
mock_response = Mock()
mock_response.json = {
"id": "agent_123",
"status": "running",
"name": "Test Agent",
"createdAt": "2023-01-01T00:00:00Z",
"source": {
"repository": "https://github.com/getsentry/sentry",
"ref": "main",
},
"target": {
"url": "https://cursor.com/agent/123",
"autoCreatePr": True,
"branchName": "fix-bug-123",
},
}
mock_post.return_value = mock_response
# Create launch request with auto_create_pr=True
request = CodingAgentLaunchRequest(
prompt="Fix this bug",
repository=self.repo_definition,
branch_name="fix-bug-123",
auto_create_pr=True,
)
# Launch the agent
self.cursor_client.launch(webhook_url=self.webhook_url, request=request)
# Assert that post was called with correct parameters
mock_post.assert_called_once()
call_kwargs = mock_post.call_args[1]
# Verify the payload contains autoCreatePr=True
payload = call_kwargs["data"]
assert payload["target"]["autoCreatePr"] is True
@patch.object(CursorAgentClient, "post")
def test_launch_with_auto_create_pr_false(self, mock_post: Mock) -> None:
"""Test that launch() correctly passes auto_create_pr=False to the API"""
# Setup mock response
mock_response = Mock()
mock_response.json = {
"id": "agent_123",
"status": "running",
"name": "Test Agent",
"createdAt": "2023-01-01T00:00:00Z",
"source": {
"repository": "https://github.com/getsentry/sentry",
"ref": "main",
},
"target": {
"url": "https://cursor.com/agent/123",
"autoCreatePr": False,
"branchName": "fix-bug-123",
},
}
mock_post.return_value = mock_response
# Create launch request with auto_create_pr=False
request = CodingAgentLaunchRequest(
prompt="Fix this bug",
repository=self.repo_definition,
branch_name="fix-bug-123",
auto_create_pr=False,
)
# Launch the agent
self.cursor_client.launch(webhook_url=self.webhook_url, request=request)
# Assert that post was called with correct parameters
mock_post.assert_called_once()
call_kwargs = mock_post.call_args[1]
# Verify the payload contains autoCreatePr=False
payload = call_kwargs["data"]
assert payload["target"]["autoCreatePr"] is False
@patch.object(CursorAgentClient, "post")
def test_launch_default_auto_create_pr(self, mock_post: Mock) -> None:
"""Test that launch() defaults auto_create_pr to False when not specified"""
# Setup mock response
mock_response = Mock()
mock_response.json = {
"id": "agent_123",
"status": "running",
"name": "Test Agent",
"createdAt": "2023-01-01T00:00:00Z",
"source": {
"repository": "https://github.com/getsentry/sentry",
"ref": "main",
},
"target": {
"url": "https://cursor.com/agent/123",
"autoCreatePr": False,
"branchName": "fix-bug-123",
},
}
mock_post.return_value = mock_response
# Create launch request without specifying auto_create_pr (should default to False)
request = CodingAgentLaunchRequest(
prompt="Fix this bug",
repository=self.repo_definition,
branch_name="fix-bug-123",
)
# Launch the agent
self.cursor_client.launch(webhook_url=self.webhook_url, request=request)
# Assert that post was called with correct parameters
mock_post.assert_called_once()
call_kwargs = mock_post.call_args[1]
# Verify the payload contains autoCreatePr=False (the default)
payload = call_kwargs["data"]
assert payload["target"]["autoCreatePr"] is False
| CursorAgentClientTest |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 23969,
"end": 24325
} | class ____(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.slept = False
def __getitem__(self, idx):
if not self.slept:
time.sleep(self.sleep_sec)
self.slept = True
return idx
def __len__(self):
return self.size
| SleepDataset |
python | getsentry__sentry | src/sentry/preprod/pull_request/types.py | {
"start": 2218,
"end": 2389
} | class ____(BaseModel):
"""
Error response for pull request operations.
"""
error: str
message: str
details: str | None = None
| PullRequestErrorResponse |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1014488,
"end": 1015115
} | class ____(
sgqlc.types.Type,
Node,
Comment,
Deletable,
Reactable,
UniformResourceLocatable,
Updatable,
UpdatableComment,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("body_version", "discussion", "number")
body_version = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="bodyVersion"
)
discussion = sgqlc.types.Field(
sgqlc.types.non_null(TeamDiscussion), graphql_name="discussion"
)
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
| TeamDiscussionComment |
python | ray-project__ray | python/ray/tests/test_batch_node_provider_integration.py | {
"start": 742,
"end": 3738
} | class ____(BatchingNodeProvider):
"""Class for e2e local testing of BatchingNodeProvider.
Uses FakeMultiNodeProvider as a proxy for managing the nodes.
This node provider requires the "available_node_types" section of the
autoscaling config to be copied into the "provider" section.
That's needed so that node resources can be accessed as
provider_config["available_node_types"][node_type]["resources"].
See the create_node_with_resources call in submit_scale_request.
See class BatchingAutoscaler below.
"""
def __init__(self, provider_config, cluster_name):
BatchingNodeProvider.__init__(self, provider_config, cluster_name)
self.fake_multi_node_provider = FakeMultiNodeProvider(
provider_config, cluster_name
)
# Manually "inherit" internal utility functions.
# I prefer this over attempting multiple inheritance.
def _next_hex_node_id(self):
return self.fake_multi_node_provider._next_hex_node_id()
def _terminate_node(self, node):
return self.fake_multi_node_provider._terminate_node(node)
def get_node_data(self):
node_data_dict = {}
for node_id in self.fake_multi_node_provider._nodes:
tags = self.fake_multi_node_provider._nodes[node_id]["tags"]
node_data_dict[node_id] = NodeData(
kind=tags[TAG_RAY_NODE_KIND],
type=tags[TAG_RAY_USER_NODE_TYPE],
status=tags[TAG_RAY_NODE_STATUS],
ip=node_id,
)
return node_data_dict
def submit_scale_request(self, scale_request: ScaleRequest):
worker_counts = self.cur_num_workers()
for worker_to_delete in scale_request.workers_to_delete:
node_type = self.node_tags(worker_to_delete)[TAG_RAY_USER_NODE_TYPE]
FakeMultiNodeProvider.terminate_node(self, worker_to_delete)
worker_counts[node_type] -= 1
for node_type in scale_request.desired_num_workers:
diff = (
scale_request.desired_num_workers[node_type] - worker_counts[node_type]
)
# It is non-standard for "available_node_types" to be included in the
# provider config, but it is necessary for this node provider.
resources = self.provider_config["available_node_types"][node_type][
"resources"
]
labels = self.provider_config["available_node_types"][node_type].get(
"labels", {}
)
tags = {
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
TAG_RAY_USER_NODE_TYPE: node_type,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
}
self.fake_multi_node_provider.create_node_with_resources_and_labels(
node_config={},
tags=tags,
count=diff,
resources=resources,
labels=labels,
)
| FakeBatchingNodeProvider |
python | facebook__pyre-check | client/language_server/daemon_connection.py | {
"start": 1053,
"end": 1665
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
error_message: str
error_source: Optional[Exception] = None
def send_raw_request(socket_path: Path, raw_request: str) -> str:
with connections.connect(socket_path) as (
input_channel,
output_channel,
):
LOG.debug(f"Sending `{log.truncate(raw_request, 400)}`")
output_channel.write(f"{raw_request}\n")
raw_response = input_channel.readline().strip()
LOG.debug(f"Received `{log.truncate(raw_response, 400)}`")
return raw_response
@dataclasses.dataclass(frozen=True)
| DaemonConnectionFailure |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/app_identity/incoming/main.py | {
"start": 819,
"end": 1270
} | class ____(webapp2.RequestHandler):
allowed_app_ids = ["other-app-id", "other-app-id-2"]
def get(self):
incoming_app_id = self.request.headers.get("X-Appengine-Inbound-Appid", None)
if incoming_app_id not in self.allowed_app_ids:
self.abort(403)
self.response.write("This is a protected page.")
app = webapp2.WSGIApplication([("/", MainPage)], debug=True)
# [END gae_python_app_identity_incoming]
| MainPage |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 36220,
"end": 36319
} | class ____(Operator):
__slots__ = ()
_description = "less-or-equal"
_op = operator.le
| LtE |
python | google__python-fire | fire/fire_import_test.py | {
"start": 695,
"end": 1107
} | class ____(testutils.BaseTestCase):
"""Tests importing Fire."""
def testFire(self):
with mock.patch.object(sys, 'argv', ['commandname']):
fire.Fire()
def testFireMethods(self):
self.assertIsNotNone(fire.Fire)
def testNoPrivateMethods(self):
self.assertTrue(hasattr(fire, 'Fire'))
self.assertFalse(hasattr(fire, '_Fire'))
if __name__ == '__main__':
testutils.main()
| FireImportTest |
python | django__django | tests/model_fields/models.py | {
"start": 3330,
"end": 3416
} | class ____(models.Model):
value = models.BigAutoField(primary_key=True)
| BigAutoModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/if_stmt_min_max.py | {
"start": 1561,
"end": 1706
} | class ____:
def __init__(self):
self.value = 13
A1 = A()
if A1.value < 10:
A1.value = 10
if A1.value > 10:
A1.value = 10
| A |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 14873,
"end": 15418
} | class ____(InputRenderer):
tag = "vf-field-autocomplete-multi"
def create_root(self, context):
root = super().create_root(context)
field = self.bound_field.field
initial_values = [
{
'value': field.label_from_instance(item),
'data': {'id': field.prepare_value(item)},
}
for item in field.to_python(self.bound_field.value())
]
root.attrib["initial"] = json.dumps(initial_values)
return root
| AjaxMultipleModelSelectRenderer |
python | pypa__warehouse | tests/unit/admin/views/test_users.py | {
"start": 14298,
"end": 17420
} | class ____:
def test_deletes_user(self, db_request, monkeypatch):
user = UserFactory.create()
project = ProjectFactory.create()
another_project = ProjectFactory.create()
RoleFactory(project=project, user=user, role_name="Owner")
deleted_user = UserFactory.create(username="deleted-user")
# Create an extra JournalEntry by this user which should be
# updated with the deleted-user user.
JournalEntryFactory.create(submitted_by=user, action="some old journal")
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda a: "/foobar")
db_request.user = UserFactory.create()
result = views.user_delete(user, db_request)
db_request.db.flush()
assert not db_request.db.get(User, user.id)
assert db_request.db.query(Project).all() == [another_project]
assert db_request.route_path.calls == [pretend.call("admin.user.list")]
assert result.status_code == 303
assert result.location == "/foobar"
# Check that the correct journals were written/modified
old_journal = (
db_request.db.query(JournalEntry)
.options(joinedload(JournalEntry.submitted_by))
.filter(JournalEntry.action == "some old journal")
.one()
)
assert old_journal.submitted_by == deleted_user
remove_journal = (
db_request.db.query(JournalEntry)
.filter(JournalEntry.action == "remove project")
.one()
)
assert remove_journal.name == project.name
def test_deletes_user_bad_confirm(self, db_request, monkeypatch):
user = UserFactory.create()
project = ProjectFactory.create()
RoleFactory(project=project, user=user, role_name="Owner")
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": "wrong"}
db_request.route_path = pretend.call_recorder(lambda a, **k: "/foobar")
result = views.user_delete(user, db_request)
db_request.db.flush()
assert db_request.db.get(User, user.id)
assert db_request.db.query(Project).all() == [project]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
def test_user_delete_redirects_actual_name(self, db_request):
user = UserFactory.create(username="wu-tang")
db_request.matchdict["username"] = "Wu-Tang"
db_request.current_route_path = pretend.call_recorder(
lambda username: "/user/the-redirect/"
)
result = views.user_delete(user, db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.current_route_path.calls == [
pretend.call(username=user.username)
]
| TestUserDelete |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedules/ticks.py | {
"start": 1334,
"end": 1568
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneScheduleTickSuccessData,
GrapheneScheduleTickFailureData,
)
name = "ScheduleTickSpecificData"
| GrapheneScheduleTickSpecificData |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 25207,
"end": 26357
} | class ____:
def setUp(self):
self.owner = create_user(username="owner", password="test")
self.tester = create_user(username="tester", password="test")
self.pip = fixture.get(
Project,
slug="pip",
users=[self.owner],
main_language_project=None,
)
self.version = self.pip.versions.first()
self.seed = fixture.get(
Project,
slug="sub",
users=[self.owner],
main_language_project=None,
)
self.subproject = fixture.get(
Project,
slug="subproject",
language="ja",
users=[self.owner],
main_language_project=None,
)
self.translation = fixture.get(
Project,
slug="trans",
language="ja",
users=[self.owner],
main_language_project=None,
)
self.pip.add_subproject(self.subproject, alias="sub")
self.pip.translations.add(self.translation)
self.resolver = Resolver()
@override_settings(PUBLIC_DOMAIN="readthedocs.org")
| ResolverAltSetUp |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_repr_returned.py | {
"start": 1126,
"end": 1252
} | class ____:
"""Potential uninferable return value"""
def __repr__(self):
return str(Missing)
| AnotherAmbiguousRepr |
python | pypa__pip | tests/lib/wheel.py | {
"start": 6027,
"end": 11825
} | class ____:
"""A wheel that can be saved or converted to several formats."""
def __init__(self, name: str, files: Iterable[File]) -> None:
self._name = name
self._files = files
def save_to_dir(self, path: Path | str) -> str:
"""Generate wheel file with correct name and save into the provided
directory.
:returns the wheel file path
"""
p = Path(path) / self._name
p.write_bytes(self.as_bytes())
return str(p)
def save_to(self, path: Path | str) -> str:
"""Generate wheel file, saving to the provided path. Any parent
directories must already exist.
:returns the wheel file path
"""
path = Path(path)
path.write_bytes(self.as_bytes())
return str(path)
def as_bytes(self) -> bytes:
with BytesIO() as buf:
with ZipFile(buf, "w") as z:
for file in self._files:
z.writestr(file.name, file.contents)
return buf.getvalue()
def as_zipfile(self) -> ZipFile:
return ZipFile(BytesIO(self.as_bytes()))
def as_distribution(self, name: str) -> BaseDistribution:
stream = BytesIO(self.as_bytes())
return get_wheel_distribution(
MemoryWheel(self._name, stream), canonicalize_name(name)
)
def make_wheel(
name: str,
version: str,
wheel_metadata: Defaulted[AnyStr | None] = _default,
wheel_metadata_updates: Defaulted[dict[str, HeaderValue]] = _default,
metadata: Defaulted[AnyStr | None] = _default,
metadata_body: Defaulted[AnyStr] = _default,
metadata_updates: Defaulted[dict[str, HeaderValue]] = _default,
extra_files: Defaulted[dict[str, bytes | str]] = _default,
extra_metadata_files: Defaulted[dict[str, AnyStr]] = _default,
extra_data_files: Defaulted[dict[str, AnyStr]] = _default,
console_scripts: Defaulted[list[str]] = _default,
entry_points: Defaulted[dict[str, list[str]]] = _default,
record: Defaulted[AnyStr | None] = _default,
) -> WheelBuilder:
"""
Helper function for generating test wheels which are compliant by default.
Examples:
```
# Basic wheel, which will have valid metadata, RECORD, etc
make_wheel(name="foo", version="0.1.0")
# Wheel with custom metadata
make_wheel(
name="foo",
version="0.1.0",
metadata_updates={
# Overrides default
"Name": "hello",
# Expands into separate Requires-Dist entries
"Requires-Dist": ["a == 1.0", "b == 2.0; sys_platform == 'win32'"],
},
)
```
After specifying the wheel, it can be consumed in several ways:
```
# Normal case, valid wheel we want pip to pick up.
make_wheel(...).save_to_dir(tmpdir)
# For a test case, to check that pip validates contents against wheel name.
make_wheel(name="simple", ...).save_to(tmpdir / "notsimple-...")
# In-memory, for unit tests.
z = make_wheel(...).as_zipfile()
```
Below, any unicode value provided for AnyStr will be encoded as utf-8.
:param name: name of the distribution, propagated to the .dist-info
directory, METADATA, and wheel file name
:param version: version of the distribution, propagated to the .dist-info
directory, METADATA, and wheel file name
:param wheel_metadata: if provided and None, then no WHEEL metadata file
is generated; else if a string then sets the content of the WHEEL file
:param wheel_metadata_updates: override the default WHEEL metadata fields,
ignored if wheel_metadata is provided
:param metadata: if provided and None, then no METADATA file is generated;
else if a string then sets the content of the METADATA file
:param metadata_body: sets the value of the body text in METADATA, ignored
if metadata is provided
:param metadata_updates: override the default METADATA fields,
ignored if metadata is provided
:param extra_files: map from path to file contents for additional files to
be put in the wheel
:param extra_metadata_files: map from path (relative to .dist-info) to file
contents for additional files to be put in the wheel
:param extra_data_files: map from path (relative to .data) to file contents
for additional files to be put in the wheel
:param console_scripts: list of console scripts text to be put into
entry_points.txt - overrides any value set in entry_points
:param entry_points:
:param record: if provided and None, then no RECORD file is generated;
else if a string then sets the content of the RECORD file
"""
pythons = ["py2", "py3"]
abis = ["none"]
platforms = ["any"]
tags = list(itertools.product(pythons, abis, platforms))
possible_files = [
make_metadata_file(name, version, metadata, metadata_updates, metadata_body),
make_wheel_metadata_file(
name, version, wheel_metadata, tags, wheel_metadata_updates
),
make_entry_points_file(name, version, entry_points, console_scripts),
]
if extra_files is not _default:
possible_files.extend(make_files(extra_files))
if extra_metadata_files is not _default:
possible_files.extend(make_metadata_files(name, version, extra_metadata_files))
if extra_data_files is not _default:
possible_files.extend(make_data_files(name, version, extra_data_files))
actual_files = filter(None, possible_files)
files_and_record_file = record_file_maker_wrapper(
name, version, actual_files, record
)
wheel_file_name = wheel_name(name, version, pythons, abis, platforms)
return WheelBuilder(wheel_file_name, files_and_record_file)
| WheelBuilder |
python | doocs__leetcode | solution/0500-0599/0536.Construct Binary Tree from String/Solution.py | {
"start": 192,
"end": 951
} | class ____:
def str2tree(self, s: str) -> TreeNode:
def dfs(s):
if not s:
return None
p = s.find('(')
if p == -1:
return TreeNode(int(s))
root = TreeNode(int(s[:p]))
start = p
cnt = 0
for i in range(p, len(s)):
if s[i] == '(':
cnt += 1
elif s[i] == ')':
cnt -= 1
if cnt == 0:
if start == p:
root.left = dfs(s[start + 1 : i])
start = i + 1
else:
root.right = dfs(s[start + 1 : i])
return root
return dfs(s)
| Solution |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py | {
"start": 2555,
"end": 3897
} | class ____(DepthAnythingFeatureFusionLayer):
def __init__(self, config: PromptDepthAnythingConfig):
super().__init__(config)
self.prompt_depth_layer = PromptDepthAnythingLayer(config)
def forward(self, hidden_state, residual=None, size=None, prompt_depth=None):
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(
residual, size=hidden_state.shape[2:], mode="bilinear", align_corners=False
)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
if prompt_depth is not None:
prompt_depth = nn.functional.interpolate(
prompt_depth, size=hidden_state.shape[2:], mode="bilinear", align_corners=False
)
res = self.prompt_depth_layer(prompt_depth)
hidden_state = hidden_state + res
modifier = {"scale_factor": 2} if size is None else {"size": size}
hidden_state = nn.functional.interpolate(
hidden_state,
**modifier,
mode="bilinear",
align_corners=True,
)
hidden_state = self.projection(hidden_state)
return hidden_state
| PromptDepthAnythingFeatureFusionLayer |
python | fluentpython__example-code | attic/concurrency/wikipedia/daypicts.py | {
"start": 1260,
"end": 1350
} | class ____(Exception):
'''No Picture of the Day found for {iso_date}'''
| NoPictureForDate |
python | getsentry__sentry-python | sentry_sdk/integrations/cohere.py | {
"start": 1976,
"end": 9401
} | class ____(Integration):
identifier = "cohere"
origin = f"auto.ai.{identifier}"
def __init__(self, include_prompts=True):
# type: (CohereIntegration, bool) -> None
self.include_prompts = include_prompts
@staticmethod
def setup_once():
# type: () -> None
BaseCohere.chat = _wrap_chat(BaseCohere.chat, streaming=False)
Client.embed = _wrap_embed(Client.embed)
BaseCohere.chat_stream = _wrap_chat(BaseCohere.chat_stream, streaming=True)
def _capture_exception(exc):
# type: (Any) -> None
set_span_errored()
event, hint = event_from_exception(
exc,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "cohere", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def _wrap_chat(f, streaming):
# type: (Callable[..., Any], bool) -> Callable[..., Any]
def collect_chat_response_fields(span, res, include_pii):
# type: (Span, NonStreamedChatResponse, bool) -> None
if include_pii:
if hasattr(res, "text"):
set_data_normalized(
span,
SPANDATA.AI_RESPONSES,
[res.text],
)
for pii_attr in COLLECTED_PII_CHAT_RESP_ATTRS:
if hasattr(res, pii_attr):
set_data_normalized(span, "ai." + pii_attr, getattr(res, pii_attr))
for attr in COLLECTED_CHAT_RESP_ATTRS:
if hasattr(res, attr):
set_data_normalized(span, "ai." + attr, getattr(res, attr))
if hasattr(res, "meta"):
if hasattr(res.meta, "billed_units"):
record_token_usage(
span,
input_tokens=res.meta.billed_units.input_tokens,
output_tokens=res.meta.billed_units.output_tokens,
)
elif hasattr(res.meta, "tokens"):
record_token_usage(
span,
input_tokens=res.meta.tokens.input_tokens,
output_tokens=res.meta.tokens.output_tokens,
)
if hasattr(res.meta, "warnings"):
set_data_normalized(span, SPANDATA.AI_WARNINGS, res.meta.warnings)
@wraps(f)
def new_chat(*args, **kwargs):
# type: (*Any, **Any) -> Any
integration = sentry_sdk.get_client().get_integration(CohereIntegration)
if (
integration is None
or "message" not in kwargs
or not isinstance(kwargs.get("message"), str)
):
return f(*args, **kwargs)
message = kwargs.get("message")
span = sentry_sdk.start_span(
op=consts.OP.COHERE_CHAT_COMPLETIONS_CREATE,
name="cohere.client.Chat",
origin=CohereIntegration.origin,
)
span.__enter__()
try:
res = f(*args, **kwargs)
except Exception as e:
_capture_exception(e)
span.__exit__(None, None, None)
raise e from None
with capture_internal_exceptions():
if should_send_default_pii() and integration.include_prompts:
set_data_normalized(
span,
SPANDATA.AI_INPUT_MESSAGES,
list(
map(
lambda x: {
"role": getattr(x, "role", "").lower(),
"content": getattr(x, "message", ""),
},
kwargs.get("chat_history", []),
)
)
+ [{"role": "user", "content": message}],
)
for k, v in COLLECTED_PII_CHAT_PARAMS.items():
if k in kwargs:
set_data_normalized(span, v, kwargs[k])
for k, v in COLLECTED_CHAT_PARAMS.items():
if k in kwargs:
set_data_normalized(span, v, kwargs[k])
set_data_normalized(span, SPANDATA.AI_STREAMING, False)
if streaming:
old_iterator = res
def new_iterator():
# type: () -> Iterator[StreamedChatResponse]
with capture_internal_exceptions():
for x in old_iterator:
if isinstance(x, ChatStreamEndEvent) or isinstance(
x, StreamEndStreamedChatResponse
):
collect_chat_response_fields(
span,
x.response,
include_pii=should_send_default_pii()
and integration.include_prompts,
)
yield x
span.__exit__(None, None, None)
return new_iterator()
elif isinstance(res, NonStreamedChatResponse):
collect_chat_response_fields(
span,
res,
include_pii=should_send_default_pii()
and integration.include_prompts,
)
span.__exit__(None, None, None)
else:
set_data_normalized(span, "unknown_response", True)
span.__exit__(None, None, None)
return res
return new_chat
def _wrap_embed(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_embed(*args, **kwargs):
# type: (*Any, **Any) -> Any
integration = sentry_sdk.get_client().get_integration(CohereIntegration)
if integration is None:
return f(*args, **kwargs)
with sentry_sdk.start_span(
op=consts.OP.COHERE_EMBEDDINGS_CREATE,
name="Cohere Embedding Creation",
origin=CohereIntegration.origin,
) as span:
if "texts" in kwargs and (
should_send_default_pii() and integration.include_prompts
):
if isinstance(kwargs["texts"], str):
set_data_normalized(span, SPANDATA.AI_TEXTS, [kwargs["texts"]])
elif (
isinstance(kwargs["texts"], list)
and len(kwargs["texts"]) > 0
and isinstance(kwargs["texts"][0], str)
):
set_data_normalized(
span, SPANDATA.AI_INPUT_MESSAGES, kwargs["texts"]
)
if "model" in kwargs:
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
try:
res = f(*args, **kwargs)
except Exception as e:
_capture_exception(e)
raise e from None
if (
hasattr(res, "meta")
and hasattr(res.meta, "billed_units")
and hasattr(res.meta.billed_units, "input_tokens")
):
record_token_usage(
span,
input_tokens=res.meta.billed_units.input_tokens,
total_tokens=res.meta.billed_units.input_tokens,
)
return res
return new_embed
| CohereIntegration |
python | getsentry__sentry | src/sentry/api/endpoints/catchall.py | {
"start": 245,
"end": 1308
} | class ____(Endpoint):
permission_classes = ()
@csrf_exempt
@allow_cors_options
def dispatch(self, request: Request, *args, **kwargs) -> HttpResponse:
"""
This endpoint handles routes that did not match
"""
# Let the user know they may have forgotten a trailing slash
if not request.path.endswith("/"):
help = "Route not found, did you forget a trailing slash?"
suggestion = f"try: {request.path}/"
# Don't break JSON parsers
if request.META.get("CONTENT_TYPE", "").startswith("application/json"):
return JsonResponse(data={"info": f"{help} {suggestion}"}, status=404)
# Produce error message with a pointer to the trailing slash in plain text
arrow_offset = len(suggestion) - 1
arrow = f"{' ' * arrow_offset}^"
message = f"{help}\n\n{suggestion}\n{arrow}\n"
return HttpResponse(message, status=404, content_type="text/plain")
return HttpResponse(status=404)
| CatchallEndpoint |
python | networkx__networkx | networkx/drawing/nx_pylab.py | {
"start": 54621,
"end": 103405
} | class ____:
"""Draw arrows with `matplotlib.patches.FancyarrowPatch`"""
class ConnectionStyleFactory:
def __init__(self, connectionstyles, selfloop_height, ax=None):
import matplotlib as mpl
import matplotlib.path # call as mpl.path
import numpy as np
self.ax = ax
self.mpl = mpl
self.np = np
self.base_connection_styles = [
mpl.patches.ConnectionStyle(cs) for cs in connectionstyles
]
self.n = len(self.base_connection_styles)
self.selfloop_height = selfloop_height
def curved(self, edge_index):
return self.base_connection_styles[edge_index % self.n]
def self_loop(self, edge_index):
def self_loop_connection(posA, posB, *args, **kwargs):
if not self.np.all(posA == posB):
raise nx.NetworkXError(
"`self_loop` connection style method"
"is only to be used for self-loops"
)
# this is called with _screen space_ values
# so convert back to data space
data_loc = self.ax.transData.inverted().transform(posA)
v_shift = 0.1 * self.selfloop_height
h_shift = v_shift * 0.5
# put the top of the loop first so arrow is not hidden by node
path = self.np.asarray(
[
# 1
[0, v_shift],
# 4 4 4
[h_shift, v_shift],
[h_shift, 0],
[0, 0],
# 4 4 4
[-h_shift, 0],
[-h_shift, v_shift],
[0, v_shift],
]
)
# Rotate self loop 90 deg. if more than 1
# This will allow for maximum of 4 visible self loops
if edge_index % 4:
x, y = path.T
for _ in range(edge_index % 4):
x, y = y, -x
path = self.np.array([x, y]).T
return self.mpl.path.Path(
self.ax.transData.transform(data_loc + path), [1, 4, 4, 4, 4, 4, 4]
)
return self_loop_connection
def __init__(
self,
edge_pos,
edgelist,
nodelist,
edge_indices,
node_size,
selfloop_height,
connectionstyle="arc3",
node_shape="o",
arrowstyle="-",
arrowsize=10,
edge_color="k",
alpha=None,
linewidth=1.0,
style="solid",
min_source_margin=0,
min_target_margin=0,
ax=None,
):
import matplotlib as mpl
import matplotlib.patches # call as mpl.patches
import matplotlib.pyplot as plt
import numpy as np
if isinstance(connectionstyle, str):
connectionstyle = [connectionstyle]
elif np.iterable(connectionstyle):
connectionstyle = list(connectionstyle)
else:
msg = "ConnectionStyleFactory arg `connectionstyle` must be str or iterable"
raise nx.NetworkXError(msg)
self.ax = ax
self.mpl = mpl
self.np = np
self.edge_pos = edge_pos
self.edgelist = edgelist
self.nodelist = nodelist
self.node_shape = node_shape
self.min_source_margin = min_source_margin
self.min_target_margin = min_target_margin
self.edge_indices = edge_indices
self.node_size = node_size
self.connectionstyle_factory = self.ConnectionStyleFactory(
connectionstyle, selfloop_height, ax
)
self.arrowstyle = arrowstyle
self.arrowsize = arrowsize
self.arrow_colors = mpl.colors.colorConverter.to_rgba_array(edge_color, alpha)
self.linewidth = linewidth
self.style = style
if isinstance(arrowsize, list) and len(arrowsize) != len(edge_pos):
raise ValueError("arrowsize should have the same length as edgelist")
def __call__(self, i):
(x1, y1), (x2, y2) = self.edge_pos[i]
shrink_source = 0 # space from source to tail
shrink_target = 0 # space from head to target
if (
self.np.iterable(self.min_source_margin)
and not isinstance(self.min_source_margin, str)
and not isinstance(self.min_source_margin, tuple)
):
min_source_margin = self.min_source_margin[i]
else:
min_source_margin = self.min_source_margin
if (
self.np.iterable(self.min_target_margin)
and not isinstance(self.min_target_margin, str)
and not isinstance(self.min_target_margin, tuple)
):
min_target_margin = self.min_target_margin[i]
else:
min_target_margin = self.min_target_margin
if self.np.iterable(self.node_size): # many node sizes
source, target = self.edgelist[i][:2]
source_node_size = self.node_size[self.nodelist.index(source)]
target_node_size = self.node_size[self.nodelist.index(target)]
shrink_source = self.to_marker_edge(source_node_size, self.node_shape)
shrink_target = self.to_marker_edge(target_node_size, self.node_shape)
else:
shrink_source = self.to_marker_edge(self.node_size, self.node_shape)
shrink_target = shrink_source
shrink_source = max(shrink_source, min_source_margin)
shrink_target = max(shrink_target, min_target_margin)
# scale factor of arrow head
if isinstance(self.arrowsize, list):
mutation_scale = self.arrowsize[i]
else:
mutation_scale = self.arrowsize
if len(self.arrow_colors) > i:
arrow_color = self.arrow_colors[i]
elif len(self.arrow_colors) == 1:
arrow_color = self.arrow_colors[0]
else: # Cycle through colors
arrow_color = self.arrow_colors[i % len(self.arrow_colors)]
if self.np.iterable(self.linewidth):
if len(self.linewidth) > i:
linewidth = self.linewidth[i]
else:
linewidth = self.linewidth[i % len(self.linewidth)]
else:
linewidth = self.linewidth
if (
self.np.iterable(self.style)
and not isinstance(self.style, str)
and not isinstance(self.style, tuple)
):
if len(self.style) > i:
linestyle = self.style[i]
else: # Cycle through styles
linestyle = self.style[i % len(self.style)]
else:
linestyle = self.style
if x1 == x2 and y1 == y2:
connectionstyle = self.connectionstyle_factory.self_loop(
self.edge_indices[i]
)
else:
connectionstyle = self.connectionstyle_factory.curved(self.edge_indices[i])
if (
self.np.iterable(self.arrowstyle)
and not isinstance(self.arrowstyle, str)
and not isinstance(self.arrowstyle, tuple)
):
arrowstyle = self.arrowstyle[i]
else:
arrowstyle = self.arrowstyle
return self.mpl.patches.FancyArrowPatch(
(x1, y1),
(x2, y2),
arrowstyle=arrowstyle,
shrinkA=shrink_source,
shrinkB=shrink_target,
mutation_scale=mutation_scale,
color=arrow_color,
linewidth=linewidth,
connectionstyle=connectionstyle,
linestyle=linestyle,
zorder=1, # arrows go behind nodes
)
def to_marker_edge(self, marker_size, marker):
if marker in "s^>v<d": # `large` markers need extra space
return self.np.sqrt(2 * marker_size) / 2
else:
return self.np.sqrt(marker_size) / 2
def draw_networkx_edges(
G,
pos,
edgelist=None,
width=1.0,
edge_color="k",
style="solid",
alpha=None,
arrowstyle=None,
arrowsize=10,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=None,
label=None,
node_size=300,
nodelist=None,
node_shape="o",
connectionstyle="arc3",
min_source_margin=0,
min_target_margin=0,
hide_ticks=True,
):
r"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples (default=G.edges())
Draw only specified edges
width : float or array of floats (default=1.0)
Line width of edges
edge_color : color or array of colors (default='k')
Edge color. Can be a single color or a sequence of colors with the same
length as edgelist. Color can be string or rgb (or rgba) tuple of
floats from 0-1. If numeric values are specified they will be
mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string or array of strings (default='solid')
Edge line style e.g.: '-', '--', '-.', ':'
or words like 'solid' or 'dashed'.
Can be a single style or a sequence of styles with the same
length as the edge list.
If less styles than edges are given the styles will cycle.
If more styles than edges are given the styles will be used sequentially
and not be exhausted.
Also, `(offset, onoffseq)` tuples can be used as style instead of a strings.
(See `matplotlib.patches.FancyArrowPatch`: `linestyle`)
alpha : float or array of floats (default=None)
The edge transparency. This can be a single alpha value,
in which case it will be applied to all specified edges. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
edge_cmap : Matplotlib colormap, optional
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional
Minimum and maximum for edge colormap scaling
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool or None, optional (default=None)
If `None`, directed graphs draw arrowheads with
`~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges
via `~matplotlib.collections.LineCollection` for speed.
If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish).
If `False`, draw edges using LineCollection (linear and fast).
Note: Arrowheads will be the same color as edges.
arrowstyle : str or list of strs (default='-\|>' for directed graphs)
For directed graphs and `arrows==True` defaults to '-\|>',
For undirected graphs default to '-'.
See `matplotlib.patches.ArrowStyle` for more options.
arrowsize : int or list of ints(default=10)
For directed graphs, choose the size of the arrow head's length and
width. See `matplotlib.patches.FancyArrowPatch` for attribute
`mutation_scale` for more info.
connectionstyle : string or iterable of strings (default="arc3")
Pass the connectionstyle parameter to create curved arc of rounding
radius rad. For example, connectionstyle='arc3,rad=0.2'.
See `matplotlib.patches.ConnectionStyle` and
`matplotlib.patches.FancyArrowPatch` for more info.
If Iterable, index indicates i'th edge key of MultiGraph
node_size : scalar or array (default=300)
Size of nodes. Though the nodes are not drawn with this function, the
node size is used in determining edge positioning.
nodelist : list, optional (default=G.nodes())
This provides the node order for the `node_size` array (if it is an array).
node_shape : string (default='o')
The marker used for nodes, used in determining edge positioning.
Specification is as a `matplotlib.markers` marker, e.g. one of 'so^>v<dph8'.
label : None or string
Label for legend
min_source_margin : int or list of ints (default=0)
The minimum margin (gap) at the beginning of the edge at the source.
min_target_margin : int or list of ints (default=0)
The minimum margin (gap) at the end of the edge at the target.
hide_ticks : bool, optional
Hide ticks of axes. When `True` (the default), ticks and ticklabels
are removed from the axes. To set ticks and tick labels to the pyplot default,
use ``hide_ticks=False``.
Returns
-------
matplotlib.collections.LineCollection or a list of matplotlib.patches.FancyArrowPatch
If ``arrows=True``, a list of FancyArrowPatches is returned.
If ``arrows=False``, a LineCollection is returned.
If ``arrows=None`` (the default), then a LineCollection is returned if
`G` is undirected, otherwise returns a list of FancyArrowPatches.
Notes
-----
For directed graphs, arrows are drawn at the head end. Arrows can be
turned off with keyword arrows=False or by passing an arrowstyle without
an arrow on the end.
Be sure to include `node_size` as a keyword argument; arrows are
drawn considering the size of nodes.
Self-loops are always drawn with `~matplotlib.patches.FancyArrowPatch`
regardless of the value of `arrows` or whether `G` is directed.
When ``arrows=False`` or ``arrows=None`` and `G` is undirected, the
FancyArrowPatches corresponding to the self-loops are not explicitly
returned. They should instead be accessed via the ``Axes.patches``
attribute (see examples).
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G))
>>> G = nx.DiGraph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3)])
>>> arcs = nx.draw_networkx_edges(G, pos=nx.spring_layout(G))
>>> alphas = [0.3, 0.4, 0.5]
>>> for i, arc in enumerate(arcs): # change alpha values of arcs
... arc.set_alpha(alphas[i])
The FancyArrowPatches corresponding to self-loops are not always
returned, but can always be accessed via the ``patches`` attribute of the
`matplotlib.Axes` object.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> G = nx.Graph([(0, 1), (0, 0)]) # Self-loop at node 0
>>> edge_collection = nx.draw_networkx_edges(G, pos=nx.circular_layout(G), ax=ax)
>>> self_loop_fap = ax.patches[0]
Also see the NetworkX drawing examples at
https://networkx.org/documentation/latest/auto_examples/index.html
See Also
--------
draw
draw_networkx
draw_networkx_nodes
draw_networkx_labels
draw_networkx_edge_labels
"""
import warnings
import matplotlib as mpl
import matplotlib.collections # call as mpl.collections
import matplotlib.colors # call as mpl.colors
import matplotlib.pyplot as plt
import numpy as np
# The default behavior is to use LineCollection to draw edges for
# undirected graphs (for performance reasons) and use FancyArrowPatches
# for directed graphs.
# The `arrows` keyword can be used to override the default behavior
if arrows is None:
use_linecollection = not (G.is_directed() or G.is_multigraph())
else:
if not isinstance(arrows, bool):
raise TypeError("Argument `arrows` must be of type bool or None")
use_linecollection = not arrows
if isinstance(connectionstyle, str):
connectionstyle = [connectionstyle]
elif np.iterable(connectionstyle):
connectionstyle = list(connectionstyle)
else:
msg = "draw_networkx_edges arg `connectionstyle` must be str or iterable"
raise nx.NetworkXError(msg)
# Some kwargs only apply to FancyArrowPatches. Warn users when they use
# non-default values for these kwargs when LineCollection is being used
# instead of silently ignoring the specified option
if use_linecollection:
msg = (
"\n\nThe {0} keyword argument is not applicable when drawing edges\n"
"with LineCollection.\n\n"
"To make this warning go away, either specify `arrows=True` to\n"
"force FancyArrowPatches or use the default values.\n"
"Note that using FancyArrowPatches may be slow for large graphs.\n"
)
if arrowstyle is not None:
warnings.warn(msg.format("arrowstyle"), category=UserWarning, stacklevel=2)
if arrowsize != 10:
warnings.warn(msg.format("arrowsize"), category=UserWarning, stacklevel=2)
if min_source_margin != 0:
warnings.warn(
msg.format("min_source_margin"), category=UserWarning, stacklevel=2
)
if min_target_margin != 0:
warnings.warn(
msg.format("min_target_margin"), category=UserWarning, stacklevel=2
)
if any(cs != "arc3" for cs in connectionstyle):
warnings.warn(
msg.format("connectionstyle"), category=UserWarning, stacklevel=2
)
# NOTE: Arrowstyle modification must occur after the warnings section
if arrowstyle is None:
arrowstyle = "-|>" if G.is_directed() else "-"
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges) # (u, v, k) for multigraph (u, v) otherwise
if len(edgelist):
if G.is_multigraph():
key_count = collections.defaultdict(lambda: itertools.count(0))
edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist]
else:
edge_indices = [0] * len(edgelist)
else: # no edges!
return []
if nodelist is None:
nodelist = list(G.nodes())
# FancyArrowPatch handles color=None different from LineCollection
if edge_color is None:
edge_color = "k"
# set edge positions
edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
# Check if edge_color is an array of floats and map to edge_cmap.
# This is the only case handled differently from matplotlib
if (
np.iterable(edge_color)
and (len(edge_color) == len(edge_pos))
and np.all([isinstance(c, Number) for c in edge_color])
):
if edge_cmap is not None:
assert isinstance(edge_cmap, mpl.colors.Colormap)
else:
edge_cmap = plt.get_cmap()
if edge_vmin is None:
edge_vmin = min(edge_color)
if edge_vmax is None:
edge_vmax = max(edge_color)
color_normal = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax)
edge_color = [edge_cmap(color_normal(e)) for e in edge_color]
# compute initial view
minx = np.amin(np.ravel(edge_pos[:, :, 0]))
maxx = np.amax(np.ravel(edge_pos[:, :, 0]))
miny = np.amin(np.ravel(edge_pos[:, :, 1]))
maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
w = maxx - minx
h = maxy - miny
# Self-loops are scaled by view extent, except in cases the extent
# is 0, e.g. for a single node. In this case, fall back to scaling
# by the maximum node size
selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max()
fancy_arrow_factory = FancyArrowFactory(
edge_pos,
edgelist,
nodelist,
edge_indices,
node_size,
selfloop_height,
connectionstyle,
node_shape,
arrowstyle,
arrowsize,
edge_color,
alpha,
width,
style,
min_source_margin,
min_target_margin,
ax=ax,
)
# Draw the edges
if use_linecollection:
edge_collection = mpl.collections.LineCollection(
edge_pos,
colors=edge_color,
linewidths=width,
antialiaseds=(1,),
linestyle=style,
alpha=alpha,
)
edge_collection.set_cmap(edge_cmap)
edge_collection.set_clim(edge_vmin, edge_vmax)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
edge_viz_obj = edge_collection
# Make sure selfloop edges are also drawn
# ---------------------------------------
selfloops_to_draw = [loop for loop in nx.selfloop_edges(G) if loop in edgelist]
if selfloops_to_draw:
edgelist_tuple = list(map(tuple, edgelist))
arrow_collection = []
for loop in selfloops_to_draw:
i = edgelist_tuple.index(loop)
arrow = fancy_arrow_factory(i)
arrow_collection.append(arrow)
ax.add_patch(arrow)
else:
edge_viz_obj = []
for i in range(len(edgelist)):
arrow = fancy_arrow_factory(i)
ax.add_patch(arrow)
edge_viz_obj.append(arrow)
# update view after drawing
padx, pady = 0.05 * w, 0.05 * h
corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
ax.update_datalim(corners)
ax.autoscale_view()
if hide_ticks:
ax.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
labelbottom=False,
labelleft=False,
)
return edge_viz_obj
def draw_networkx_labels(
G,
pos,
labels=None,
font_size=12,
font_color="k",
font_family="sans-serif",
font_weight="normal",
alpha=None,
bbox=None,
horizontalalignment="center",
verticalalignment="center",
ax=None,
clip_on=True,
hide_ticks=True,
):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary (default={n: n for n in G})
Node labels in a dictionary of text labels keyed by node.
Node-keys in labels should appear as keys in `pos`.
If needed use: `{n:lab for n,lab in labels.items() if n in pos}`
font_size : int or dictionary of nodes to ints (default=12)
Font size for text labels.
font_color : color or dictionary of nodes to colors (default='k' black)
Font color string. Color can be string or rgb (or rgba) tuple of
floats from 0-1.
font_weight : string or dictionary of nodes to strings (default='normal')
Font weight.
font_family : string or dictionary of nodes to strings (default='sans-serif')
Font family.
alpha : float or None or dictionary of nodes to floats (default=None)
The text transparency.
bbox : Matplotlib bbox, (default is Matplotlib's ax.text default)
Specify text box properties (e.g. shape, color etc.) for node labels.
horizontalalignment : string or array of strings (default='center')
Horizontal alignment {'center', 'right', 'left'}. If an array is
specified it must be the same length as `nodelist`.
verticalalignment : string (default='center')
Vertical alignment {'center', 'top', 'bottom', 'baseline', 'center_baseline'}.
If an array is specified it must be the same length as `nodelist`.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
clip_on : bool (default=True)
Turn on clipping of node labels at axis boundaries
hide_ticks : bool, optional
Hide ticks of axes. When `True` (the default), ticks and ticklabels
are removed from the axes. To set ticks and tick labels to the pyplot default,
use ``hide_ticks=False``.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> labels = nx.draw_networkx_labels(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.org/documentation/latest/auto_examples/index.html
See Also
--------
draw
draw_networkx
draw_networkx_nodes
draw_networkx_edges
draw_networkx_edge_labels
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if labels is None:
labels = {n: n for n in G.nodes()}
individual_params = set()
def check_individual_params(p_value, p_name):
if isinstance(p_value, dict):
if len(p_value) != len(labels):
raise ValueError(f"{p_name} must have the same length as labels.")
individual_params.add(p_name)
def get_param_value(node, p_value, p_name):
if p_name in individual_params:
return p_value[node]
return p_value
check_individual_params(font_size, "font_size")
check_individual_params(font_color, "font_color")
check_individual_params(font_weight, "font_weight")
check_individual_params(font_family, "font_family")
check_individual_params(alpha, "alpha")
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
t = ax.text(
x,
y,
label,
size=get_param_value(n, font_size, "font_size"),
color=get_param_value(n, font_color, "font_color"),
family=get_param_value(n, font_family, "font_family"),
weight=get_param_value(n, font_weight, "font_weight"),
alpha=get_param_value(n, alpha, "alpha"),
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=clip_on,
)
text_items[n] = t
if hide_ticks:
ax.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
labelbottom=False,
labelleft=False,
)
return text_items
def draw_networkx_edge_labels(
G,
pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color="k",
font_family="sans-serif",
font_weight="normal",
alpha=None,
bbox=None,
horizontalalignment="center",
verticalalignment="center",
ax=None,
rotate=True,
clip_on=True,
node_size=300,
nodelist=None,
connectionstyle="arc3",
hide_ticks=True,
):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edge_labels : dictionary (default=None)
Edge labels in a dictionary of labels keyed by edge two-tuple.
Only labels for the keys in the dictionary are drawn.
label_pos : float (default=0.5)
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int (default=10)
Font size for text labels
font_color : color (default='k' black)
Font color string. Color can be string or rgb (or rgba) tuple of
floats from 0-1.
font_weight : string (default='normal')
Font weight
font_family : string (default='sans-serif')
Font family
alpha : float or None (default=None)
The text transparency
bbox : Matplotlib bbox, optional
Specify text box properties (e.g. shape, color etc.) for edge labels.
Default is {boxstyle='round', ec=(1.0, 1.0, 1.0), fc=(1.0, 1.0, 1.0)}.
horizontalalignment : string (default='center')
Horizontal alignment {'center', 'right', 'left'}
verticalalignment : string (default='center')
Vertical alignment {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
rotate : bool (default=True)
Rotate edge labels to lie parallel to edges
clip_on : bool (default=True)
Turn on clipping of edge labels at axis boundaries
node_size : scalar or array (default=300)
Size of nodes. If an array it must be the same length as nodelist.
nodelist : list, optional (default=G.nodes())
This provides the node order for the `node_size` array (if it is an array).
connectionstyle : string or iterable of strings (default="arc3")
Pass the connectionstyle parameter to create curved arc of rounding
radius rad. For example, connectionstyle='arc3,rad=0.2'.
See `matplotlib.patches.ConnectionStyle` and
`matplotlib.patches.FancyArrowPatch` for more info.
If Iterable, index indicates i'th edge key of MultiGraph
hide_ticks : bool, optional
Hide ticks of axes. When `True` (the default), ticks and ticklabels
are removed from the axes. To set ticks and tick labels to the pyplot default,
use ``hide_ticks=False``.
Returns
-------
dict
`dict` of labels keyed by edge
Examples
--------
>>> G = nx.dodecahedral_graph()
>>> edge_labels = nx.draw_networkx_edge_labels(G, pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
https://networkx.org/documentation/latest/auto_examples/index.html
See Also
--------
draw
draw_networkx
draw_networkx_nodes
draw_networkx_edges
draw_networkx_labels
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
class CurvedArrowText(CurvedArrowTextBase, mpl.text.Text):
pass
# use default box of white with white border
if bbox is None:
bbox = {"boxstyle": "round", "ec": (1.0, 1.0, 1.0), "fc": (1.0, 1.0, 1.0)}
if isinstance(connectionstyle, str):
connectionstyle = [connectionstyle]
elif np.iterable(connectionstyle):
connectionstyle = list(connectionstyle)
else:
raise nx.NetworkXError(
"draw_networkx_edges arg `connectionstyle` must be"
"string or iterable of strings"
)
if ax is None:
ax = plt.gca()
if edge_labels is None:
kwds = {"keys": True} if G.is_multigraph() else {}
edge_labels = {tuple(edge): d for *edge, d in G.edges(data=True, **kwds)}
# NOTHING TO PLOT
if not edge_labels:
return {}
edgelist, labels = zip(*edge_labels.items())
if nodelist is None:
nodelist = list(G.nodes())
# set edge positions
edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if G.is_multigraph():
key_count = collections.defaultdict(lambda: itertools.count(0))
edge_indices = [next(key_count[tuple(e[:2])]) for e in edgelist]
else:
edge_indices = [0] * len(edgelist)
# Used to determine self loop mid-point
# Note, that this will not be accurate,
# if not drawing edge_labels for all edges drawn
h = 0
if edge_labels:
miny = np.amin(np.ravel(edge_pos[:, :, 1]))
maxy = np.amax(np.ravel(edge_pos[:, :, 1]))
h = maxy - miny
selfloop_height = h if h != 0 else 0.005 * np.array(node_size).max()
fancy_arrow_factory = FancyArrowFactory(
edge_pos,
edgelist,
nodelist,
edge_indices,
node_size,
selfloop_height,
connectionstyle,
ax=ax,
)
individual_params = {}
def check_individual_params(p_value, p_name):
# TODO should this be list or array (as in a numpy array)?
if isinstance(p_value, list):
if len(p_value) != len(edgelist):
raise ValueError(f"{p_name} must have the same length as edgelist.")
individual_params[p_name] = p_value.iter()
# Don't need to pass in an edge because these are lists, not dicts
def get_param_value(p_value, p_name):
if p_name in individual_params:
return next(individual_params[p_name])
return p_value
check_individual_params(font_size, "font_size")
check_individual_params(font_color, "font_color")
check_individual_params(font_weight, "font_weight")
check_individual_params(alpha, "alpha")
check_individual_params(horizontalalignment, "horizontalalignment")
check_individual_params(verticalalignment, "verticalalignment")
check_individual_params(rotate, "rotate")
check_individual_params(label_pos, "label_pos")
text_items = {}
for i, (edge, label) in enumerate(zip(edgelist, labels)):
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
n1, n2 = edge[:2]
arrow = fancy_arrow_factory(i)
if n1 == n2:
connectionstyle_obj = arrow.get_connectionstyle()
posA = ax.transData.transform(pos[n1])
path_disp = connectionstyle_obj(posA, posA)
path_data = ax.transData.inverted().transform_path(path_disp)
x, y = path_data.vertices[0]
text_items[edge] = ax.text(
x,
y,
label,
size=get_param_value(font_size, "font_size"),
color=get_param_value(font_color, "font_color"),
family=get_param_value(font_family, "font_family"),
weight=get_param_value(font_weight, "font_weight"),
alpha=get_param_value(alpha, "alpha"),
horizontalalignment=get_param_value(
horizontalalignment, "horizontalalignment"
),
verticalalignment=get_param_value(
verticalalignment, "verticalalignment"
),
rotation=0,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=clip_on,
)
else:
text_items[edge] = CurvedArrowText(
arrow,
label,
size=get_param_value(font_size, "font_size"),
color=get_param_value(font_color, "font_color"),
family=get_param_value(font_family, "font_family"),
weight=get_param_value(font_weight, "font_weight"),
alpha=get_param_value(alpha, "alpha"),
horizontalalignment=get_param_value(
horizontalalignment, "horizontalalignment"
),
verticalalignment=get_param_value(
verticalalignment, "verticalalignment"
),
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=clip_on,
label_pos=get_param_value(label_pos, "label_pos"),
labels_horizontal=not get_param_value(rotate, "rotate"),
ax=ax,
)
if hide_ticks:
ax.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
labelbottom=False,
labelleft=False,
)
return text_items
def draw_bipartite(G, **kwargs):
"""Draw the graph `G` with a bipartite layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.bipartite_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Raises
------
NetworkXError :
If `G` is not bipartite.
Notes
-----
The layout is computed each time this function is called. For
repeated drawing it is much more efficient to call
`~networkx.drawing.layout.bipartite_layout` directly and reuse the result::
>>> G = nx.complete_bipartite_graph(3, 3)
>>> pos = nx.bipartite_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.complete_bipartite_graph(2, 5)
>>> nx.draw_bipartite(G)
See Also
--------
:func:`~networkx.drawing.layout.bipartite_layout`
"""
draw(G, pos=nx.bipartite_layout(G), **kwargs)
def draw_circular(G, **kwargs):
"""Draw the graph `G` with a circular layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.circular_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
The layout is computed each time this function is called. For
repeated drawing it is much more efficient to call
`~networkx.drawing.layout.circular_layout` directly and reuse the result::
>>> G = nx.complete_graph(5)
>>> pos = nx.circular_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.draw_circular(G)
See Also
--------
:func:`~networkx.drawing.layout.circular_layout`
"""
draw(G, pos=nx.circular_layout(G), **kwargs)
def draw_kamada_kawai(G, **kwargs):
"""Draw the graph `G` with a Kamada-Kawai force-directed layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.kamada_kawai_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.kamada_kawai_layout` directly and reuse the
result::
>>> G = nx.complete_graph(5)
>>> pos = nx.kamada_kawai_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.draw_kamada_kawai(G)
See Also
--------
:func:`~networkx.drawing.layout.kamada_kawai_layout`
"""
draw(G, pos=nx.kamada_kawai_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph `G` with a random layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.random_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.random_layout` directly and reuse the result::
>>> G = nx.complete_graph(5)
>>> pos = nx.random_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> nx.draw_random(G)
See Also
--------
:func:`~networkx.drawing.layout.random_layout`
"""
draw(G, pos=nx.random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph `G` with a spectral 2D layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.spectral_layout(G), **kwargs)
For more information about how node positions are determined, see
`~networkx.drawing.layout.spectral_layout`.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.spectral_layout` directly and reuse the result::
>>> G = nx.complete_graph(5)
>>> pos = nx.spectral_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(5)
>>> nx.draw_spectral(G)
See Also
--------
:func:`~networkx.drawing.layout.spectral_layout`
"""
draw(G, pos=nx.spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph `G` with a spring layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.spring_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
`~networkx.drawing.layout.spring_layout` is also the default layout for
`draw`, so this function is equivalent to `draw`.
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.spring_layout` directly and reuse the result::
>>> G = nx.complete_graph(5)
>>> pos = nx.spring_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(20)
>>> nx.draw_spring(G)
See Also
--------
draw
:func:`~networkx.drawing.layout.spring_layout`
"""
draw(G, pos=nx.spring_layout(G), **kwargs)
def draw_shell(G, nlist=None, **kwargs):
"""Draw networkx graph `G` with shell layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.shell_layout(G, nlist=nlist), **kwargs)
Parameters
----------
G : graph
A networkx graph
nlist : list of list of nodes, optional
A list containing lists of nodes representing the shells.
Default is `None`, meaning all nodes are in a single shell.
See `~networkx.drawing.layout.shell_layout` for details.
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Notes
-----
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.shell_layout` directly and reuse the result::
>>> G = nx.complete_graph(5)
>>> pos = nx.shell_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(4)
>>> shells = [[0], [1, 2, 3]]
>>> nx.draw_shell(G, nlist=shells)
See Also
--------
:func:`~networkx.drawing.layout.shell_layout`
"""
draw(G, pos=nx.shell_layout(G, nlist=nlist), **kwargs)
def draw_planar(G, **kwargs):
"""Draw a planar networkx graph `G` with planar layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.planar_layout(G), **kwargs)
Parameters
----------
G : graph
A planar networkx graph
kwargs : optional keywords
See `draw_networkx` for a description of optional keywords.
Raises
------
NetworkXException
When `G` is not planar
Notes
-----
The layout is computed each time this function is called.
For repeated drawing it is much more efficient to call
`~networkx.drawing.layout.planar_layout` directly and reuse the result::
>>> G = nx.path_graph(5)
>>> pos = nx.planar_layout(G)
>>> nx.draw(G, pos=pos) # Draw the original graph
>>> # Draw a subgraph, reusing the same node positions
>>> nx.draw(G.subgraph([0, 1, 2]), pos=pos, node_color="red")
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.draw_planar(G)
See Also
--------
:func:`~networkx.drawing.layout.planar_layout`
"""
draw(G, pos=nx.planar_layout(G), **kwargs)
def draw_forceatlas2(G, **kwargs):
"""Draw a networkx graph with forceatlas2 layout.
This is a convenience function equivalent to::
nx.draw(G, pos=nx.forceatlas2_layout(G), **kwargs)
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, pos=nx.forceatlas2_layout(G), **kwargs)
def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
"""Apply an alpha (or list of alphas) to the colors provided.
Parameters
----------
colors : color string or array of floats (default='r')
Color of element. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
alpha : float or array of floats
Alpha values for elements. This can be a single alpha value, in
which case it will be applied to all the elements of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
elem_list : array of networkx objects
The list of elements which are being colored. These could be nodes,
edges or labels.
cmap : matplotlib colormap
Color map for use if colors is a list of floats corresponding to points
on a color mapping.
vmin, vmax : float
Minimum and maximum values for normalizing colors if a colormap is used
Returns
-------
rgba_colors : numpy ndarray
Array containing RGBA format values for each of the node colours.
"""
from itertools import cycle, islice
import matplotlib as mpl
import matplotlib.cm # call as mpl.cm
import matplotlib.colors # call as mpl.colors
import numpy as np
# If we have been provided with a list of numbers as long as elem_list,
# apply the color mapping.
if len(colors) == len(elem_list) and isinstance(colors[0], Number):
mapper = mpl.cm.ScalarMappable(cmap=cmap)
mapper.set_clim(vmin, vmax)
rgba_colors = mapper.to_rgba(colors)
# Otherwise, convert colors to matplotlib's RGB using the colorConverter
# object. These are converted to numpy ndarrays to be consistent with the
# to_rgba method of ScalarMappable.
else:
try:
rgba_colors = np.array([mpl.colors.colorConverter.to_rgba(colors)])
except ValueError:
rgba_colors = np.array(
[mpl.colors.colorConverter.to_rgba(color) for color in colors]
)
# Set the final column of the rgba_colors to have the relevant alpha values
try:
# If alpha is longer than the number of colors, resize to the number of
# elements. Also, if rgba_colors.size (the number of elements of
# rgba_colors) is the same as the number of elements, resize the array,
# to avoid it being interpreted as a colormap by scatter()
if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
rgba_colors = np.resize(rgba_colors, (len(elem_list), 4))
rgba_colors[1:, 0] = rgba_colors[0, 0]
rgba_colors[1:, 1] = rgba_colors[0, 1]
rgba_colors[1:, 2] = rgba_colors[0, 2]
rgba_colors[:, 3] = list(islice(cycle(alpha), len(rgba_colors)))
except TypeError:
rgba_colors[:, -1] = alpha
return rgba_colors
| FancyArrowFactory |
python | huggingface__transformers | tests/models/paligemma/test_modeling_paligemma.py | {
"start": 13136,
"end": 25656
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = PaliGemmaProcessor.from_pretrained("google/paligemma-3b-pt-224")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
prompt = ""
image_file = (
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
)
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt")
EXPECTED_INPUT_IDS = torch.tensor([[257152] * 256 + [2, 108]])
self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS))
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = "\ncow on the beach" # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_multiimage(self):
model_id = "google/paligemma-3b-ft-nlvr2-448" # checkpoint tuned for multiple images
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
processor = PaliGemmaProcessor.from_pretrained(model_id)
prompt = "answer en There is no snowman in any of the images. Is this true or false?"
stop_sign_image = Image.open(
requests.get(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
stream=True,
).raw
)
snow_image = Image.open(
requests.get(
"https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg", stream=True
).raw
)
inputs = processor(text=prompt, images=[[snow_image, snow_image]], return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = "answer en There is no snowman in any of the images. Is this true or false?\nFalse"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
# try another prompt with two different image this time
prompt = "answer en There is exactly one snowman. Is this true or false?"
inputs = processor(text=prompt, images=[[snow_image, stop_sign_image]], return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = "answer en There is exactly one snowman. Is this true or false?\nTrue"
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_paligemma_VQA(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
prompt = "answer en Where is the cow standing?"
image_file = (
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
)
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(torch.float16)
output = model.generate(**inputs, max_new_tokens=900, do_sample=False)
EXPECTED_DECODED_TEXT = "answer en Where is the cow standing?\nbeach" # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_paligemma_empty_prompt(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
prompt = ""
image_file = (
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
)
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(torch.float16)
output = model.generate(**inputs, max_new_tokens=900, do_sample=False)
EXPECTED_DECODED_TEXT = "\ncow on the beach" # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_paligemma_batched(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
prompts = [
"answer en Where is the cow standing?",
"",
]
image1 = Image.open(
requests.get(
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
stream=True,
).raw
)
image2 = image1
inputs = self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip
self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT)
def test_small_model_integration_test_paligemma_batched_bf16(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(
model_id, revision="bfloat16", dtype=torch.bfloat16
).to(torch_device)
# The first batch is longer in terms of text, the second will be padded.
prompts = [
"answer en Where is the cow standing?",
"",
]
image1 = Image.open(
requests.get(
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
stream=True,
).raw
)
image2 = image1
inputs = (
self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True)
.to(torch.bfloat16)
.to(torch_device)
)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip
self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT)
def test_small_model_integration_test_paligemma_batched_f16(self):
# Let' s make sure we test the preprocessing to replace what is used
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(
model_id, revision="float16", dtype=torch.float16
).to(torch_device)
# The first batch is longer in terms of text, the second will be padded.
prompts = [
"answer en Where is the cow standing?",
"",
]
image1 = Image.open(
requests.get(
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
stream=True,
).raw
)
image2 = image1
inputs = (
self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True)
.to(torch.float16)
.to(torch_device)
)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ["answer en Where is the cow standing?\nbeach", "\ncow on the beach"] # fmt: skip
self.assertEqual(self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT)
def test_integration_detection_bug(self):
# this is a reproducer of https://github.com/huggingface/transformers/issues/31425 where not enough context
# impacted negatively segmentation generations.
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(
model_id, revision="bfloat16", dtype=torch.bfloat16
).to(torch_device)
prompt = ("detect shoe",)
image = Image.open(
requests.get(
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/shoe.png",
stream=True,
).raw
)
inputs = self.processor(images=image, text=prompt, return_tensors="pt").to(torch.bfloat16).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20)
expected_decoded_texts = Expectations(
{
("rocm", (9, 5)): "detect shoe\n<loc0051><loc0309><loc0708><loc0644> shoe",
(None, None): "detect shoe\n<loc0051><loc0309><loc0708><loc0646> shoe",
("cuda", 8): "detect shoe\n<loc0051><loc0309><loc0708><loc0646> shoe",
}
) # fmt: skip
EXPECTED_DECODED_TEXT = expected_decoded_texts.get_expectation()
self.assertEqual(self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT)
def test_paligemma_index_error_bug(self):
# This is a reproducer of https://github.com/huggingface/transformers/pull/28032 and makes sure it does not happen anymore
# Please refer to that PR, or specifically https://github.com/huggingface/transformers/pull/28032#issuecomment-1860650043 for
# more details
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
# Simulate a super long prompt
prompt = "\n" * 200
image_file = (
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
)
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = self.processor(
images=raw_image,
text=prompt,
return_tensors="pt",
).to(torch.float16)
# Make sure that `generate` works
_ = model.generate(**inputs, max_new_tokens=20)
def test_paligemma_finetuning_with_suffixes_bf16(self):
# this is a supplementary test to ensure paligemma fine-tuning that relies on token_type_ids is robust to future changes
model_id = "google/paligemma-3b-pt-224"
model = PaliGemmaForConditionalGeneration.from_pretrained(
model_id, revision="bfloat16", dtype=torch.bfloat16
).to(torch_device)
# The first batch is longer in terms of text, the second will be padded.
prompts = [
"answer en Where is the cow standing?",
"",
]
suffixes = ["beach", "cow standing on the beach"]
image1 = Image.open(
requests.get(
"https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
stream=True,
).raw
)
image2 = image1
inputs = (
self.processor(images=[image1, image2], text=prompts, suffix=suffixes, return_tensors="pt", padding=True)
.to(torch.bfloat16)
.to(torch_device)
)
expected_labels = torch.tensor(
[266 * [-100] + [54901, 1], 262 * [-100] + [14706, 9980, 611, 573, 8318, 1]]
).to(torch_device)
assert torch.equal(inputs["labels"], expected_labels)
expected_token_type_ids = torch.tensor([266 * [0] + 2 * [1], 262 * [0] + 6 * [1]]).to(torch_device)
assert torch.equal(inputs["token_type_ids"], expected_token_type_ids)
output = model(**inputs)
# check that loss does not error out
_ = output.loss
| PaliGemmaForConditionalGenerationIntegrationTest |
python | vyperlang__vyper | vyper/venom/context.py | {
"start": 569,
"end": 849
} | class ____:
label: IRLabel
data_items: list[DataItem] = field(default_factory=list)
def __str__(self):
ret = [f"dbsection {self.label.value}:"]
for item in self.data_items:
ret.append(f" db {item}")
return "\n".join(ret)
| DataSection |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 11773,
"end": 13040
} | class ____(Benchmark):
r"""
Mishra 6 objective function.
This class defines the Mishra 6 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra06}}(x) = -\log{\left [ \sin^2 ((\cos(x_1)
+ \cos(x_2))^2) - \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2}
+ 0.01 \left[(x_1 -1)^2 + (x_2 - 1)^2 \right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -2.28395` for :math:`x = [2.88631, 1.82326]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 397
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.88631, 1.82326]]
self.fglob = -2.28395
def fun(self, x, *args):
self.nfev += 1
a = 0.1 * ((x[0] - 1) ** 2 + (x[1] - 1) ** 2)
u = (cos(x[0]) + cos(x[1])) ** 2
v = (sin(x[0]) + sin(x[1])) ** 2
return a - log((sin(u) ** 2 - cos(v) ** 2 + x[0]) ** 2)
| Mishra06 |
python | gevent__gevent | src/gevent/tests/lock_tests.py | {
"start": 1507,
"end": 1743
} | class ____(TimeAssertMixin, unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
def tearDown(self):
support.threading_cleanup(*self._threads)
support.reap_children()
| BaseTestCase |
python | kamyu104__LeetCode-Solutions | Python/maximum-calories-burnt-from-jumps.py | {
"start": 48,
"end": 601
} | class ____(object):
def maxCaloriesBurnt(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
heights.sort()
left, right = 0, len(heights)-1
result = (0-heights[right])**2
while left != right:
result += (heights[right]-heights[left])**2
right -= 1
if left == right:
break
result += (heights[left]-heights[right])**2
left += 1
return result
# Time: O(nlogn)
# Space: O(1)
# sort, greedy
| Solution |
python | ray-project__ray | python/ray/data/tests/test_map.py | {
"start": 28356,
"end": 33840
} | class ____:
def __init__(self):
self.data = large_object
def __call__(self, batch):
return batch
ds = ray.data.range(1)
ds = ds.map_batches(LargeUDF, concurrency=1)
assert ds.take_all() == [{"id": 0}]
"""
output = run_string_as_driver(driver)
assert "The UDF of operator MapBatches(LargeUDF) is too large" in output
# NOTE: All tests above share a Ray cluster, while the tests below do not. These
# tests should only be carefully reordered to retain this invariant!
def test_actor_udf_cleanup(
shutdown_only,
tmp_path,
restore_data_context,
target_max_block_size_infinite_or_default,
):
"""Test that for the actor map operator, the UDF object is deleted properly."""
ray.shutdown()
ray.init(num_cpus=2)
ctx = DataContext.get_current()
ctx._enable_actor_pool_on_exit_hook = True
test_file = tmp_path / "test.txt"
# Simulate the case that the UDF depends on some external resources that
# need to be cleaned up.
class StatefulUDF:
def __init__(self):
with open(test_file, "w") as f:
f.write("test")
def __call__(self, row):
return row
def __del__(self):
# Delete the file when the UDF is deleted.
os.remove(test_file)
ds = ray.data.range(10)
ds = ds.map(StatefulUDF, concurrency=1)
assert sorted(extract_values("id", ds.take_all())) == list(range(10))
wait_for_condition(lambda: not os.path.exists(test_file))
def test_actor_pool_strategy_default_num_actors(
shutdown_only, target_max_block_size_infinite_or_default
):
import time
class UDFClass:
def __call__(self, x):
time.sleep(1)
return x
num_cpus = 5
ray.shutdown()
ray.init(num_cpus=num_cpus)
compute_strategy = ray.data.ActorPoolStrategy()
ray.data.range(10, override_num_blocks=10).map_batches(
UDFClass, compute=compute_strategy, batch_size=1
).materialize()
def test_actor_pool_strategy_bundles_to_max_actors(
shutdown_only, target_max_block_size_infinite_or_default
):
"""Tests that blocks are bundled up to the specified max number of actors."""
class UDFClass:
def __call__(self, x):
return x
max_size = 2
ds = (
ray.data.range(10, override_num_blocks=10)
.map_batches(UDFClass, batch_size=None, concurrency=max_size)
.materialize()
)
# Check batch size is still respected.
ds = (
ray.data.range(10, override_num_blocks=10)
.map_batches(UDFClass, batch_size=10, concurrency=max_size)
.materialize()
)
assert "1 blocks" in ds.stats()
def test_nonserializable_map_batches(
shutdown_only, target_max_block_size_infinite_or_default
):
import threading
lock = threading.Lock()
x = ray.data.range(10)
# Check that the `inspect_serializability` trace was printed
with pytest.raises(TypeError, match=r".*was found to be non-serializable.*"):
x.map_batches(lambda _: lock).take(1)
@pytest.mark.parametrize("udf_kind", ["coroutine", "async_gen"])
def test_async_map_batches(
shutdown_only, udf_kind, target_max_block_size_infinite_or_default
):
ray.shutdown()
ray.init(num_cpus=10)
class AsyncActor:
def __init__(self):
pass
if udf_kind == "async_gen":
async def __call__(self, batch):
for i in batch["id"]:
await asyncio.sleep((i % 5) / 100)
yield {"input": [i], "output": [2**i]}
elif udf_kind == "coroutine":
async def __call__(self, batch):
await asyncio.sleep(random.randint(0, 5) / 100)
return {
"input": list(batch["id"]),
"output": [2**i for i in batch["id"]],
}
else:
pytest.fail(f"Unknown udf_kind: {udf_kind}")
n = 10
ds = ray.data.range(n, override_num_blocks=2)
ds = ds.map(lambda x: x)
ds = ds.map_batches(AsyncActor, batch_size=1, concurrency=1, max_concurrency=2)
start_t = time.time()
output = ds.take_all()
runtime = time.time() - start_t
assert runtime < sum(range(n)), runtime
expected_output = [{"input": i, "output": 2**i} for i in range(n)]
assert sorted(output, key=lambda row: row["input"]) == expected_output, (
output,
expected_output,
)
@pytest.mark.parametrize("udf_kind", ["coroutine", "async_gen"])
def test_async_flat_map(
shutdown_only, udf_kind, target_max_block_size_infinite_or_default
):
class AsyncActor:
def __init__(self):
pass
if udf_kind == "async_gen":
async def __call__(self, row):
id = row["id"]
yield {"id": id}
await asyncio.sleep(random.randint(0, 5) / 100)
yield {"id": id + 1}
elif udf_kind == "coroutine":
async def __call__(self, row):
id = row["id"]
await asyncio.sleep(random.randint(0, 5) / 100)
return [{"id": id}, {"id": id + 1}]
else:
pytest.fail(f"Unknown udf_kind: {udf_kind}")
n = 10
ds = ray.data.from_items([{"id": i} for i in range(0, n, 2)])
ds = ds.flat_map(AsyncActor, concurrency=1, max_concurrency=2)
output = ds.take_all()
assert sorted(extract_values("id", output)) == list(range(n))
| LargeUDF |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_validators.py | {
"start": 6166,
"end": 10947
} | class ____(BaseValidatorTest):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.context = {
"organization": self.project.organization,
"project": self.project,
"request": self.make_request(user=self.user),
}
self.valid_data = {
"name": "Test Detector",
"type": MetricIssue.slug,
"dataSources": [
{
"field1": "test",
"field2": 123,
}
],
"conditionGroup": {
"id": self.data_condition_group.id,
"organizationId": self.organization.id,
"logicType": self.data_condition_group.logic_type,
"conditions": [
{
"type": Condition.GREATER_OR_EQUAL,
"comparison": 100,
"condition_result": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
}
],
},
"config": {
"threshold_period": 1,
"detection_type": AlertRuleDetectionType.STATIC.value,
},
}
# TODO - Refactor into multiple tests - basically where there are comment blocks
@mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry")
def test_create_with_mock_validator(self, mock_audit: mock.MagicMock) -> None:
validator = MockDetectorValidator(data=self.valid_data, context=self.context)
assert validator.is_valid(), validator.errors
detector = validator.save()
# Verify detector in DB
detector = Detector.objects.get(id=detector.id)
assert detector.name == "Test Detector"
assert detector.type == MetricIssue.slug
assert detector.project_id == self.project.id
assert detector.created_by_id == self.user.id
# Verify data source in DB
data_source = DataSource.objects.get(detector=detector)
assert data_source.type == data_source_type_registry.get_key(
QuerySubscriptionDataSourceHandler
)
assert data_source.organization_id == self.project.organization_id
# Verify condition group in DB
condition_group = DataConditionGroup.objects.get(id=detector.workflow_condition_group_id)
assert condition_group.logic_type == DataConditionGroup.Type.ANY
assert condition_group.organization_id == self.project.organization_id
# Verify conditions in DB
conditions = list(DataCondition.objects.filter(condition_group=condition_group))
assert len(conditions) == 1
condition = conditions[0]
assert condition.type == "gte"
assert condition.comparison == 100
assert condition.condition_result == DetectorPriorityLevel.HIGH
mock_audit.assert_called_once_with(
request=self.context["request"],
organization=self.project.organization,
target_object=detector.id,
event=audit_log.get_event_id("DETECTOR_ADD"),
data=detector.get_audit_log_data(),
)
def test_validate_type_unknown(self) -> None:
validator = MockDetectorValidator(data={**self.valid_data, "type": "unknown_type"})
assert not validator.is_valid()
assert validator.errors.get("type") == [
ErrorDetail(string="Unknown detector type 'unknown_type'", code="invalid")
], validator.errors
def test_validate_type_incompatible(self) -> None:
with mock.patch("sentry.issues.grouptype.registry.get_by_slug") as mock_get:
mock_get.return_value = mock.Mock(detector_settings=None)
validator = MockDetectorValidator(data={**self.valid_data, "type": "incompatible_type"})
assert not validator.is_valid()
assert validator.errors.get("type") == [
ErrorDetail(string="Detector type not compatible with detectors", code="invalid")
]
def test_delete(self) -> None:
"""Test that delete() schedules the detector for deletion"""
validator = MockDetectorValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
detector = validator.save()
delete_validator = MockDetectorValidator(instance=detector, data={}, context=self.context)
delete_validator.delete()
assert RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=detector.id
).exists()
detector.refresh_from_db()
assert detector.status == ObjectStatus.PENDING_DELETION
| DetectorValidatorTest |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 90099,
"end": 91421
} | class ____(TestCase, BaseIncidentsTest):
def setUp(self) -> None:
self.alert_rule = self.create_alert_rule()
def test_enable(self) -> None:
with self.tasks():
disable_alert_rule(self.alert_rule)
alert_rule = AlertRule.objects.get(id=self.alert_rule.id)
assert alert_rule.status == AlertRuleStatus.DISABLED.value
for subscription in alert_rule.snuba_query.subscriptions.all():
assert subscription.status == QuerySubscription.Status.DISABLED.value
enable_alert_rule(self.alert_rule)
alert_rule = AlertRule.objects.get(id=self.alert_rule.id)
assert alert_rule.status == AlertRuleStatus.PENDING.value
for subscription in alert_rule.snuba_query.subscriptions.all():
assert subscription.status == QuerySubscription.Status.ACTIVE.value
def test_disable(self) -> None:
with self.tasks():
disable_alert_rule(self.alert_rule)
alert_rule = AlertRule.objects.get(id=self.alert_rule.id)
assert alert_rule.status == AlertRuleStatus.DISABLED.value
for subscription in alert_rule.snuba_query.subscriptions.all():
assert subscription.status == QuerySubscription.Status.DISABLED.value
| EnableDisableAlertRuleTest |
python | etianen__django-reversion | reversion/management/commands/__init__.py | {
"start": 209,
"end": 2046
} | class ____(BaseCommand):
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"app_label",
metavar="app_label",
nargs="*",
help="Optional app_label or app_label.model_name list.",
)
parser.add_argument(
"--using",
default=None,
help="The database to query for revision data.",
)
parser.add_argument(
"--model-db",
default=None,
help="The database to query for model data.",
)
def get_models(self, options):
# Load admin classes.
if "django.contrib.admin" in settings.INSTALLED_APPS:
admin.autodiscover()
# Get options.
app_labels = options["app_label"]
# Parse model classes.
if len(app_labels) == 0:
selected_models = apps.get_models()
else:
selected_models = set()
for label in app_labels:
if "." in label:
# This is an app.Model specifier.
try:
model = apps.get_model(label)
except LookupError:
raise CommandError(f"Unknown model: {label}")
selected_models.add(model)
else:
# This is just an app - no model qualifier.
app_label = label
try:
app = apps.get_app_config(app_label)
except LookupError:
raise CommandError(f"Unknown app: {app_label}")
selected_models.update(app.get_models())
for model in selected_models:
if is_registered(model):
yield model
| BaseRevisionCommand |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/data.py | {
"start": 13567,
"end": 14377
} | class ____(SpanProperty):
def __init__(self, spans: "Spans") -> None:
super().__init__(spans)
self.groups: dict[int, set[tuple[int, int]]] = defaultdict(set)
def start_span(self, i: int, label_index: int) -> None:
# TODO should we discard start == end cases? occurs for eg st.data()
# which is conditionally or never drawn from. arguably swapping
# nodes with the empty list is a useful mutation enabled by start == end?
key = (self.spans[i].start, self.spans[i].end)
self.groups[label_index].add(key)
def finish(self) -> Iterable[set[tuple[int, int]]]:
# Discard groups with only one span, since the mutator can't
# do anything useful with them.
return [g for g in self.groups.values() if len(g) >= 2]
| _mutator_groups |
python | huggingface__transformers | src/transformers/models/cohere/modeling_cohere.py | {
"start": 2347,
"end": 3155
} | class ____(nn.Module):
def __init__(self, hidden_size=None, eps=1e-5, bias=False):
"""The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
mean = hidden_states.mean(-1, keepdim=True)
variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.to(torch.float32) * hidden_states
return hidden_states.to(input_dtype)
| CohereLayerNorm |
python | pandas-dev__pandas | pandas/tests/indexes/test_setops.py | {
"start": 6154,
"end": 23057
} | class ____:
# Set operation tests shared by all indexes in the `index` fixture
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(self, case, method, index):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(index, method)(case)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_intersection_base(self, index):
if isinstance(index, CategoricalIndex):
pytest.skip(f"Not relevant for {type(index).__name__}")
first = index[:5].unique()
second = index[:3].unique()
intersect = first.intersection(second)
tm.assert_index_equal(intersect, second)
if isinstance(index.dtype, DatetimeTZDtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.intersection(case)
assert equal_contents(result, second)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_union_base(self, index):
index = index.unique()
first = index[3:]
second = index[:5]
everything = index
union = first.union(second)
tm.assert_index_equal(union.sort_values(), everything.sort_values())
if isinstance(index.dtype, DatetimeTZDtype):
# The second.values below will drop tz, so the rest of this test
# is not applicable.
return
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.union(case)
assert equal_contents(result, everything)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3])
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_difference_base(self, sort, index):
first = index[2:]
second = index[:4]
if index.inferred_type == "boolean":
# i think (TODO: be sure) there assumptions baked in about
# the index fixture that don't hold here?
answer = set(first).difference(set(second))
elif isinstance(index, CategoricalIndex):
answer = []
else:
answer = index[4:]
result = first.difference(second, sort)
assert equal_contents(result, answer)
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.difference(case, sort)
assert equal_contents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3], sort)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_symmetric_difference(self, index, using_infer_string, request):
if (
using_infer_string
and index.dtype == "object"
and index.inferred_type == "string"
):
request.applymarker(pytest.mark.xfail(reason="TODO: infer_string"))
if isinstance(index, CategoricalIndex):
pytest.skip(f"Not relevant for {type(index).__name__}")
if len(index) < 2:
pytest.skip("Too few values for test")
if index[0] in index[1:] or index[-1] in index[:-1]:
# index fixture has e.g. an index of bools that does not satisfy this,
# another with [0, 0, 1, 1, 2, 2]
pytest.skip("Index values no not satisfy test condition.")
first = index[1:]
second = index[:-1]
answer = index[[0, -1]]
result = first.symmetric_difference(second)
tm.assert_index_equal(result.sort_values(), answer.sort_values())
# GH#10149
cases = [second.to_numpy(), second.to_series(), second.to_list()]
for case in cases:
result = first.symmetric_difference(case)
assert equal_contents(result, answer)
if isinstance(index, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3])
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index_flat, fname, sname, expected_name):
# GH#9943, GH#9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if not index_flat.is_unique:
index = index_flat.unique()
else:
index = index_flat
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index_flat, fname, sname, expected_name):
if not index_flat.is_unique:
index = index_flat.unique()
else:
index = index_flat
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index_flat, fname, sname, expected_name):
# GH#35847
# Test intersections with various name combinations
if not index_flat.is_unique:
index = index_flat.unique()
else:
index = index_flat
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index_flat, fname, sname, expected_name):
if not index_flat.is_unique:
index = index_flat.unique()
else:
index = index_flat
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_intersection_name_retention_with_nameless(self, index):
if isinstance(index, MultiIndex):
index = index.rename(list(range(index.nlevels)))
else:
index = index.rename("foo")
other = np.asarray(index)
result = index.intersection(other)
assert result.name == index.name
# empty other, same dtype
result = index.intersection(other[:0])
assert result.name == index.name
# empty `self`
result = index[:0].intersection(other)
assert result.name == index.name
def test_difference_preserves_type_empty(self, index, sort):
# GH#20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
pytest.skip("Not relevant since index is not unique")
result = index.difference(index, sort=sort)
expected = index[:0]
tm.assert_index_equal(result, expected, exact=True)
def test_difference_name_retention_equals(self, index, names):
if isinstance(index, MultiIndex):
names = [[x] * index.nlevels for x in names]
index = index.rename(names[0])
other = index.rename(names[1])
assert index.equals(other)
result = index.difference(other)
expected = index[:0].rename(names[2])
tm.assert_index_equal(result, expected)
def test_intersection_difference_match_empty(self, index, sort):
# GH#20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
if not index.is_unique:
pytest.skip("Not relevant because index is not unique")
inter = index.intersection(index[:0])
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff, exact=True)
@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_setop_with_categorical(index_flat, sort, method, using_infer_string):
# MultiIndex tested separately in tests.indexes.multi.test_setops
index = index_flat
other = index.astype("category")
exact = "equiv" if isinstance(index, RangeIndex) else True
result = getattr(index, method)(other, sort=sort)
expected = getattr(index, method)(index, sort=sort)
if (
using_infer_string
and index.empty
and method in ("union", "symmetric_difference")
):
expected = expected.astype("category")
tm.assert_index_equal(result, expected, exact=exact)
result = getattr(index, method)(other[:5], sort=sort)
expected = getattr(index, method)(index[:5], sort=sort)
if (
using_infer_string
and index.empty
and method in ("union", "symmetric_difference")
):
expected = expected.astype("category")
tm.assert_index_equal(result, expected, exact=exact)
def test_intersection_duplicates_all_indexes(index):
# GH#38743
if index.empty:
# No duplicates in empty indexes
pytest.skip("Not relevant for empty Index")
idx = index
idx_non_unique = idx[[0, 0, 1, 2]]
assert idx.intersection(idx_non_unique).equals(idx_non_unique.intersection(idx))
assert idx.intersection(idx_non_unique).is_unique
def test_union_duplicate_index_subsets_of_each_other(
any_dtype_for_small_pos_integer_indexes,
):
# GH#31326
dtype = any_dtype_for_small_pos_integer_indexes
a = Index([1, 2, 2, 3], dtype=dtype)
b = Index([3, 3, 4], dtype=dtype)
expected = Index([1, 2, 2, 3, 3, 4], dtype=dtype)
if isinstance(a, CategoricalIndex):
expected = Index([1, 2, 2, 3, 3, 4])
result = a.union(b)
tm.assert_index_equal(result, expected)
result = a.union(b, sort=False)
tm.assert_index_equal(result, expected)
def test_union_with_duplicate_index_and_non_monotonic(
any_dtype_for_small_pos_integer_indexes,
):
# GH#36289
dtype = any_dtype_for_small_pos_integer_indexes
a = Index([1, 0, 0], dtype=dtype)
b = Index([0, 1], dtype=dtype)
expected = Index([0, 0, 1], dtype=dtype)
result = a.union(b)
tm.assert_index_equal(result, expected)
result = b.union(a)
tm.assert_index_equal(result, expected)
def test_union_duplicate_index_different_dtypes():
# GH#36289
a = Index([1, 2, 2, 3])
b = Index(["1", "0", "0"])
expected = Index([1, 2, 2, 3, "1", "0", "0"])
result = a.union(b, sort=False)
tm.assert_index_equal(result, expected)
def test_union_same_value_duplicated_in_both():
# GH#36289
a = Index([0, 0, 1])
b = Index([0, 0, 1, 2])
result = a.union(b)
expected = Index([0, 0, 1, 2])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dup", [1, np.nan])
def test_union_nan_in_both(dup):
# GH#36289
a = Index([np.nan, 1, 2, 2])
b = Index([np.nan, dup, 1, 2])
result = a.union(b, sort=False)
expected = Index([np.nan, dup, 1.0, 2.0, 2.0])
tm.assert_index_equal(result, expected)
def test_union_rangeindex_sort_true():
# GH 53490
idx1 = RangeIndex(1, 100, 6)
idx2 = RangeIndex(1, 50, 3)
result = idx1.union(idx2, sort=True)
expected = Index(
[
1,
4,
7,
10,
13,
16,
19,
22,
25,
28,
31,
34,
37,
40,
43,
46,
49,
55,
61,
67,
73,
79,
85,
91,
97,
]
)
tm.assert_index_equal(result, expected)
def test_union_with_duplicate_index_not_subset_and_non_monotonic(
any_dtype_for_small_pos_integer_indexes,
):
# GH#36289
dtype = any_dtype_for_small_pos_integer_indexes
a = Index([1, 0, 2], dtype=dtype)
b = Index([0, 0, 1], dtype=dtype)
expected = Index([0, 0, 1, 2], dtype=dtype)
if isinstance(a, CategoricalIndex):
expected = Index([0, 0, 1, 2])
result = a.union(b)
tm.assert_index_equal(result, expected)
result = b.union(a)
tm.assert_index_equal(result, expected)
def test_union_int_categorical_with_nan():
ci = CategoricalIndex([1, 2, np.nan])
assert ci.categories.dtype.kind == "i"
idx = Index([1, 2])
result = idx.union(ci)
expected = Index([1, 2, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = ci.union(idx)
tm.assert_index_equal(result, expected)
| TestSetOps |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 25525,
"end": 25766
} | class ____(Bnb4BitTest):
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
EXPECTED_RELATIVE_DIFFERENCE = 2.9461410686392764
@require_bitsandbytes
@require_accelerate
@require_torch
@slow
@apply_skip_if_not_implemented
| Bnb4BitLlamaTest |
python | fastai__fastai | fastai/torch_core.py | {
"start": 4954,
"end": 5221
} | class ____(ArrayBase):
"Base class for arrays representing images"
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
# %% ../nbs/00_torch_core.ipynb 29
| ArrayImageBase |
python | cython__cython | Cython/Tests/xmlrunner.py | {
"start": 1677,
"end": 2937
} | class ____:
"""This class is used to keep useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR) = range(3)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
"Create a new instance of _TestInfo."
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.err = err
self.stdout = test_result.stdout and test_result.stdout.getvalue().strip() or ''
self.stderr = test_result.stdout and test_result.stderr.getvalue().strip() or ''
def get_elapsed_time(self):
"""Return the time that shows how long the test method took to
execute.
"""
return self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"Return a text representation of the test method."
return self.test_result.getDescription(self.test_method)
def get_error_info(self):
"""Return a text representation of an exception thrown by a test
method.
"""
if not self.err:
return ''
return self.test_result._exc_info_to_string(
self.err, self.test_method)
| _TestInfo |
python | django-import-export__django-import-export | tests/core/tests/test_resources/test_modelresource/test_fields.py | {
"start": 205,
"end": 1879
} | class ____(TestCase):
def setUp(self):
self.resource = BookResource()
self.book = Book.objects.create(name="Some book")
self.dataset = tablib.Dataset(headers=["id", "name", "author_email", "price"])
row = [self.book.pk, "Some book", "test@example.com", "10.25"]
self.dataset.append(row)
def test_get_instance(self):
instance_loader = self.resource._meta.instance_loader_class(self.resource)
self.resource._meta.import_id_fields = ["id"]
instance = self.resource.get_instance(instance_loader, self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_import_id_fields(self):
class BookResource(resources.ModelResource):
name = fields.Field(attribute="name", widget=widgets.CharWidget())
class Meta:
model = Book
import_id_fields = ["name"]
resource = BookResource()
instance_loader = resource._meta.instance_loader_class(resource)
instance = resource.get_instance(instance_loader, self.dataset.dict[0])
self.assertEqual(instance, self.book)
def test_get_instance_usually_defers_to_instance_loader(self):
self.resource._meta.import_id_fields = ["id"]
instance_loader = self.resource._meta.instance_loader_class(self.resource)
with mock.patch.object(instance_loader, "get_instance") as mocked_method:
row = self.dataset.dict[0]
self.resource.get_instance(instance_loader, row)
# instance_loader.get_instance() should have been called
mocked_method.assert_called_once_with(row)
| FieldHandlingTest |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 19207,
"end": 19469
} | class ____(StateMachineEvent):
""":class:`GatherDep` instruction terminated (abstract base class)"""
__slots__ = ("worker", "total_nbytes")
worker: str
total_nbytes: int # Must be the same as in GatherDep instruction
@dataclass
| GatherDepDoneEvent |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/random_forest.py | {
"start": 582,
"end": 6151
} | class ____(
IterativeComponent,
AutoSklearnRegressionAlgorithm,
):
def __init__(
self,
criterion,
max_features,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
bootstrap,
max_leaf_nodes,
min_impurity_decrease,
random_state=None,
n_jobs=1,
):
self.n_estimators = self.get_max_iter()
self.criterion = criterion
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.n_jobs = n_jobs
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_estimators
def iterative_fit(self, X, y, n_iter=1, refit=False):
from sklearn.ensemble import RandomForestRegressor
if refit:
self.estimator = None
if self.estimator is None:
self.n_estimators = int(self.n_estimators)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
self.max_features = float(self.max_features)
self.bootstrap = check_for_bool(self.bootstrap)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.estimator = RandomForestRegressor(
n_estimators=n_iter,
criterion=self.criterion,
max_features=self.max_features,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
bootstrap=self.bootstrap,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
random_state=self.random_state,
n_jobs=self.n_jobs,
warm_start=True,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.n_estimators
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
return not len(self.estimator.estimators_) < self.n_estimators
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "RF",
"name": "Random Forest Regressor",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"prefers_data_normalized": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["mse", "friedman_mse", "mae"]
)
# In contrast to the random forest classifier we want to use more max_features
# and therefore have this not on a sqrt scale
max_features = UniformFloatHyperparameter(
"max_features", 0.1, 1.0, default_value=1.0
)
max_depth = UnParametrizedHyperparameter("max_depth", "None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = UnParametrizedHyperparameter(
"min_weight_fraction_leaf", 0.0
)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_impurity_decrease = UnParametrizedHyperparameter(
"min_impurity_decrease", 0.0
)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="True"
)
cs.add_hyperparameters(
[
criterion,
max_features,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
min_impurity_decrease,
bootstrap,
]
)
return cs
| RandomForest |
python | huggingface__transformers | src/transformers/trainer_callback.py | {
"start": 26124,
"end": 29250
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation.
You can modify `max_str_len` to control how long strings are truncated when logging.
"""
def __init__(self, max_str_len: int = 100):
"""
Initialize the callback with optional max_str_len parameter to control string truncation length.
Args:
max_str_len (`int`):
Maximum length of strings to display in logs.
Longer strings will be truncated with a message.
"""
self.training_bar = None
self.prediction_bar = None
self.max_str_len = max_str_len
def on_train_begin(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_world_process_zero and has_length(eval_dataloader):
if self.prediction_bar is None:
self.prediction_bar = tqdm(
total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True
)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_predict(self, args, state, control, **kwargs):
if state.is_world_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_world_process_zero and self.training_bar is not None:
# make a shallow copy of logs so we can mutate the fields copied
# but avoid doing any value pickling.
shallow_logs = {}
for k, v in logs.items():
if isinstance(v, str) and len(v) > self.max_str_len:
shallow_logs[k] = (
f"[String too long to display, length: {len(v)} > {self.max_str_len}. "
"Consider increasing `max_str_len` if needed.]"
)
if isinstance(v, float):
# Format floats for better readability
shallow_logs[k] = f"{v:.4g}"
else:
shallow_logs[k] = v
_ = shallow_logs.pop("total_flos", None)
self.training_bar.write(str(shallow_logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_world_process_zero:
self.training_bar.close()
self.training_bar = None
| ProgressCallback |
python | numba__numba | numba/cuda/tests/cudadrv/test_cuda_ndarray.py | {
"start": 205,
"end": 17652
} | class ____(CUDATestCase):
def test_device_array_interface(self):
dary = cuda.device_array(shape=100)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.empty(100)
dary = cuda.to_device(ary)
devicearray.verify_cuda_ndarray_interface(dary)
ary = np.asarray(1.234)
dary = cuda.to_device(ary)
self.assertEqual(dary.ndim, 0)
devicearray.verify_cuda_ndarray_interface(dary)
def test_device_array_from_readonly(self):
ary = np.arange(100, dtype=np.float32)
# Make the array readonly
ary.flags.writeable = False
self.assertFalse(ary.flags.writeable)
# Ensure that we can copy the readonly array
dary = cuda.to_device(ary)
retr = dary.copy_to_host()
np.testing.assert_array_equal(retr, ary)
def test_devicearray_dtype(self):
dary = cuda.device_array(shape=(100,), dtype="f4")
self.assertEqual(dary.dtype, np.dtype("f4"))
def test_devicearray_no_copy(self):
array = np.arange(100, dtype=np.float32)
cuda.to_device(array, copy=False)
def test_devicearray_shape(self):
ary = np.arange(2 * 3 * 4).reshape(2, 3, 4)
dary = cuda.to_device(ary)
self.assertEqual(ary.shape, dary.shape)
self.assertEqual(ary.shape[1:], dary.shape[1:])
def test_devicearray(self):
array = np.arange(100, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
array[:] = 0
gpumem.copy_to_host(array)
np.testing.assert_array_equal(array, original)
def test_stream_bind(self):
stream = cuda.stream()
with stream.auto_synchronize():
arr = cuda.device_array(
(3, 3),
dtype=np.float64,
stream=stream)
self.assertEqual(arr.bind(stream).stream, stream)
self.assertEqual(arr.stream, stream)
def test_len_1d(self):
ary = np.empty((3,))
dary = cuda.device_array(3)
self.assertEqual(len(ary), len(dary))
def test_len_2d(self):
ary = np.empty((3, 5))
dary = cuda.device_array((3, 5))
self.assertEqual(len(ary), len(dary))
def test_len_3d(self):
ary = np.empty((3, 5, 7))
dary = cuda.device_array((3, 5, 7))
self.assertEqual(len(ary), len(dary))
def test_devicearray_partition(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
left, right = gpumem.split(N // 2)
array[:] = 0
self.assertTrue(np.all(array == 0))
right.copy_to_host(array[N // 2:])
left.copy_to_host(array[:N // 2])
self.assertTrue(np.all(array == original))
def test_devicearray_replace(self):
N = 100
array = np.arange(N, dtype=np.int32)
original = array.copy()
gpumem = cuda.to_device(array)
cuda.to_device(array * 2, to=gpumem)
gpumem.copy_to_host(array)
np.testing.assert_array_equal(array, original * 2)
@skip_on_cudasim('This works in the simulator')
def test_devicearray_transpose_wrongdim(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4, 1))
with self.assertRaises(NotImplementedError) as e:
np.transpose(gpumem)
self.assertEqual(
"transposing a non-2D DeviceNDArray isn't supported",
str(e.exception))
def test_devicearray_transpose_identity(self):
# any-shape identities should work
original = np.array(np.arange(24)).reshape(3, 4, 2)
array = np.transpose(cuda.to_device(original),
axes=(0, 1, 2)).copy_to_host()
self.assertTrue(np.all(array == original))
def test_devicearray_transpose_duplicatedaxis(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
with self.assertRaises(ValueError) as e:
np.transpose(gpumem, axes=(0, 0))
self.assertIn(
str(e.exception),
container=[
'invalid axes list (0, 0)', # GPU
'repeated axis in transpose', # sim
])
def test_devicearray_transpose_wrongaxis(self):
gpumem = cuda.to_device(np.array(np.arange(12)).reshape(3, 4))
with self.assertRaises(ValueError) as e:
np.transpose(gpumem, axes=(0, 2))
self.assertIn(
str(e.exception),
container=[
'invalid axes list (0, 2)', # GPU
'invalid axis for this array',
'axis 2 is out of bounds for array of dimension 2', # sim
])
def test_devicearray_view_ok(self):
original = np.array(np.arange(12), dtype="i2").reshape(3, 4)
array = cuda.to_device(original)
for dtype in ("i4", "u4", "i8", "f8"):
with self.subTest(dtype=dtype):
np.testing.assert_array_equal(
array.view(dtype).copy_to_host(),
original.view(dtype)
)
def test_devicearray_view_ok_not_c_contig(self):
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
array = cuda.to_device(original)[:, ::2]
original = original[:, ::2]
np.testing.assert_array_equal(
array.view("u2").copy_to_host(),
original.view("u2")
)
def test_devicearray_view_bad_not_c_contig(self):
original = np.array(np.arange(32), dtype="i2").reshape(4, 8)
array = cuda.to_device(original)[:, ::2]
with self.assertRaises(ValueError) as e:
array.view("i4")
msg = str(e.exception)
self.assertIn('To change to a dtype of a different size,', msg)
contiguous_pre_np123 = 'the array must be C-contiguous' in msg
contiguous_post_np123 = 'the last axis must be contiguous' in msg
self.assertTrue(contiguous_pre_np123 or contiguous_post_np123,
'Expected message to mention contiguity')
def test_devicearray_view_bad_itemsize(self):
original = np.array(np.arange(12), dtype="i2").reshape(4, 3)
array = cuda.to_device(original)
with self.assertRaises(ValueError) as e:
array.view("i4")
self.assertEqual(
"When changing to a larger dtype,"
" its size must be a divisor of the total size in bytes"
" of the last axis of the array.",
str(e.exception))
def test_devicearray_transpose_ok(self):
original = np.array(np.arange(12)).reshape(3, 4)
array = np.transpose(cuda.to_device(original)).copy_to_host()
self.assertTrue(np.all(array == original.T))
def test_devicearray_transpose_T(self):
original = np.array(np.arange(12)).reshape(3, 4)
array = cuda.to_device(original).T.copy_to_host()
self.assertTrue(np.all(array == original.T))
def test_devicearray_contiguous_slice(self):
# memcpys are dumb ranges of bytes, so trying to
# copy to a non-contiguous range shouldn't work!
a = np.arange(25).reshape(5, 5, order='F')
s = np.full(fill_value=5, shape=(5,))
d = cuda.to_device(a)
a[2] = s
# d is in F-order (not C-order), so d[2] is not contiguous
# (40-byte strides). This means we can't memcpy to it!
with self.assertRaises(ValueError) as e:
d[2].copy_to_device(s)
self.assertEqual(
devicearray.errmsg_contiguous_buffer,
str(e.exception))
# if d[2].copy_to_device(s), then this would pass:
# self.assertTrue((a == d.copy_to_host()).all())
def _test_devicearray_contiguous_host_copy(self, a_c, a_f):
"""
Checks host->device memcpys
"""
self.assertTrue(a_c.flags.c_contiguous)
self.assertTrue(a_f.flags.f_contiguous)
for original, copy in [
(a_f, a_f),
(a_f, a_c),
(a_c, a_f),
(a_c, a_c),
]:
msg = '%s => %s' % (
'C' if original.flags.c_contiguous else 'F',
'C' if copy.flags.c_contiguous else 'F',
)
d = cuda.to_device(original)
d.copy_to_device(copy)
self.assertTrue(np.all(d.copy_to_host() == a_c), msg=msg)
self.assertTrue(np.all(d.copy_to_host() == a_f), msg=msg)
def test_devicearray_contiguous_copy_host_3d(self):
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
a_f = np.array(a_c, order='F')
self._test_devicearray_contiguous_host_copy(a_c, a_f)
def test_devicearray_contiguous_copy_host_1d(self):
a_c = np.arange(5)
a_f = np.array(a_c, order='F')
self._test_devicearray_contiguous_host_copy(a_c, a_f)
def test_devicearray_contiguous_copy_device(self):
a_c = np.arange(5 * 5 * 5).reshape(5, 5, 5)
a_f = np.array(a_c, order='F')
self.assertTrue(a_c.flags.c_contiguous)
self.assertTrue(a_f.flags.f_contiguous)
d = cuda.to_device(a_c)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(a_f))
self.assertEqual(
"incompatible strides: {} vs. {}".format(a_c.strides, a_f.strides),
str(e.exception))
d.copy_to_device(cuda.to_device(a_c))
self.assertTrue(np.all(d.copy_to_host() == a_c))
d = cuda.to_device(a_f)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(a_c))
self.assertEqual(
"incompatible strides: {} vs. {}".format(a_f.strides, a_c.strides),
str(e.exception))
d.copy_to_device(cuda.to_device(a_f))
self.assertTrue(np.all(d.copy_to_host() == a_f))
def test_devicearray_broadcast_host_copy(self):
broadsize = 4
coreshape = (2, 3)
coresize = np.prod(coreshape)
core_c = np.arange(coresize).reshape(coreshape, order='C')
core_f = np.arange(coresize).reshape(coreshape, order='F')
for dim in range(len(coreshape)):
newindex = (slice(None),) * dim + (np.newaxis,)
broadshape = coreshape[:dim] + (broadsize,) + coreshape[dim:]
broad_c = np.broadcast_to(core_c[newindex], broadshape)
broad_f = np.broadcast_to(core_f[newindex], broadshape)
dbroad_c = cuda.to_device(broad_c)
dbroad_f = cuda.to_device(broad_f)
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_c)
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_f)
# Also test copying across different core orderings
dbroad_c.copy_to_device(broad_f)
dbroad_f.copy_to_device(broad_c)
np.testing.assert_array_equal(dbroad_c.copy_to_host(), broad_f)
np.testing.assert_array_equal(dbroad_f.copy_to_host(), broad_c)
def test_devicearray_contiguous_host_strided(self):
a_c = np.arange(10)
d = cuda.to_device(a_c)
arr = np.arange(20)[::2]
d.copy_to_device(arr)
np.testing.assert_array_equal(d.copy_to_host(), arr)
def test_devicearray_contiguous_device_strided(self):
d = cuda.to_device(np.arange(20))
arr = np.arange(20)
with self.assertRaises(ValueError) as e:
d.copy_to_device(cuda.to_device(arr)[::2])
self.assertEqual(
devicearray.errmsg_contiguous_buffer,
str(e.exception))
@skip_on_cudasim('DeviceNDArray class not present in simulator')
def test_devicearray_relaxed_strides(self):
# From the reproducer in Issue #6824.
# Construct a device array that is contiguous even though
# the strides for the first axis (800) are not equal to
# the strides * size (10 * 8 = 80) for the previous axis,
# because the first axis size is 1.
arr = devicearray.DeviceNDArray((1, 10), (800, 8), np.float64)
# Ensure we still believe the array to be contiguous because
# strides checking is relaxed.
self.assertTrue(arr.flags['C_CONTIGUOUS'])
self.assertTrue(arr.flags['F_CONTIGUOUS'])
def test_c_f_contiguity_matches_numpy(self):
# From the reproducer in Issue #4943.
shapes = ((1, 4), (4, 1))
orders = ('C', 'F')
for shape, order in itertools.product(shapes, orders):
arr = np.ndarray(shape, order=order)
d_arr = cuda.to_device(arr)
self.assertEqual(arr.flags['C_CONTIGUOUS'],
d_arr.flags['C_CONTIGUOUS'])
self.assertEqual(arr.flags['F_CONTIGUOUS'],
d_arr.flags['F_CONTIGUOUS'])
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_simple_c(self):
# C-order 1D array
a = np.zeros(10, order='C')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_simple_f(self):
# F-order array that is also C layout.
a = np.zeros(10, order='F')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_2d_c(self):
# C-order 2D array
a = np.zeros((2, 10), order='C')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_2d_f(self):
# F-order array that can only be F layout
a = np.zeros((2, 10), order='F')
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'F')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_noncontig_slice_c(self):
# Non-contiguous slice of C-order array
a = np.zeros((5, 5), order='C')
d = cuda.to_device(a)[:,2]
self.assertEqual(d._numba_type_.layout, 'A')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_noncontig_slice_f(self):
# Non-contiguous slice of F-order array
a = np.zeros((5, 5), order='F')
d = cuda.to_device(a)[2,:]
self.assertEqual(d._numba_type_.layout, 'A')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_contig_slice_c(self):
# Contiguous slice of C-order array
a = np.zeros((5, 5), order='C')
d = cuda.to_device(a)[2,:]
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_contig_slice_f(self):
# Contiguous slice of F-order array - is both C- and F-contiguous, so
# types as 'C' layout
a = np.zeros((5, 5), order='F')
d = cuda.to_device(a)[:,2]
self.assertEqual(d._numba_type_.layout, 'C')
@skip_on_cudasim('Typing not done in the simulator')
def test_devicearray_typing_order_broadcasted(self):
# Broadcasted array, similar to that used for passing scalars to ufuncs
a = np.broadcast_to(np.array([1]), (10,))
d = cuda.to_device(a)
self.assertEqual(d._numba_type_.layout, 'A')
def test_bug6697(self):
ary = np.arange(10, dtype=np.int16)
dary = cuda.to_device(ary)
got = np.asarray(dary)
self.assertEqual(got.dtype, dary.dtype)
@skip_on_cudasim('DeviceNDArray class not present in simulator')
def test_issue_8477(self):
# Ensure that we can copy a zero-length device array to a zero-length
# host array when the strides of the device and host arrays differ -
# this should be possible because the strides are irrelevant when the
# length is zero. For more info see
# https://github.com/numba/numba/issues/8477.
# Create a device array with shape (0,) and strides (8,)
dev_array = devicearray.DeviceNDArray(shape=(0,), strides=(8,),
dtype=np.int8)
# Create a host array with shape (0,) and strides (0,)
host_array = np.ndarray(shape=(0,), strides=(0,), dtype=np.int8)
# Sanity check for this test - ensure our destination has the strides
# we expect, because strides can be ignored in some cases by the
# ndarray constructor - checking here ensures that we haven't failed to
# account for unexpected behaviour across different versions of NumPy
self.assertEqual(host_array.strides, (0,))
# Ensure that the copy succeeds in both directions
dev_array.copy_to_host(host_array)
dev_array.copy_to_device(host_array)
# Ensure that a device-to-device copy also succeeds when the strides
# differ - one way of doing this is to copy the host array across and
# use that for copies in both directions.
dev_array_from_host = cuda.to_device(host_array)
self.assertEqual(dev_array_from_host.shape, (0,))
self.assertEqual(dev_array_from_host.strides, (0,))
dev_array.copy_to_device(dev_array_from_host)
dev_array_from_host.copy_to_device(dev_array)
| TestCudaNDArray |
python | walkccc__LeetCode | solutions/1910. Remove All Occurrences of a Substring/1910.py | {
"start": 0,
"end": 298
} | class ____:
def removeOccurrences(self, s: str, part: str) -> str:
n = len(s)
k = len(part)
t = [' '] * n
j = 0 # t's index
for i, c in enumerate(s):
t[j] = c
j += 1
if j >= k and ''.join(t[j - k:j]) == part:
j -= k
return ''.join(t[:j])
| Solution |
python | scipy__scipy | scipy/integrate/_ivp/rk.py | {
"start": 10386,
"end": 15417
} | class ____(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits), while `atol` controls
absolute accuracy (number of correct decimal places). To achieve the
desired `rtol`, set `atol` to be smaller than the smallest value that
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
number of correct digits is not guaranteed. Conversely, to achieve the
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
than `atol`. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian.
Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
"""
order = 5
error_estimator_order = 4
n_stages = 6
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
# Corresponds to the optimum value of c_6 from [2]_.
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
| RK45 |
python | realpython__materials | python-sqlite-sqlalchemy/project/examples/example_3/app/albums/routes.py | {
"start": 707,
"end": 2050
} | class ____(FlaskForm):
artist = HiddenField("artist")
title = StringField(
label="Albums's Name", validators=[InputRequired(), does_album_exist]
)
@albums_bp.route("/albums", methods=["GET", "POST"])
@albums_bp.route("/albums/<int:artist_id>", methods=["GET", "POST"])
def albums(artist_id=None):
form = CreateAlbumForm()
# did we get an artist id?
if artist_id is not None:
# Get the artist
artist = (
db.session.query(Artist)
.filter(Artist.artist_id == artist_id)
.one_or_none()
)
form.artist.data = artist.name
# otherwise, no artist
else:
artist = None
# Is the form valid?
if form.validate_on_submit():
# Create new Album
album = Album(title=form.title.data)
artist.albums.append(album)
db.session.add(artist)
db.session.commit()
return redirect(url_for("albums_bp.albums", artist_id=artist_id))
# Start the query for albums
query = db.session.query(Album)
# Display the albums for the artist passed?
if artist_id is not None:
query = query.filter(Album.artist_id == artist_id)
albums = query.order_by(Album.title).all()
return render_template(
"albums.html", artist=artist, albums=albums, form=form
)
| CreateAlbumForm |
python | yaml__pyyaml | tests/legacy_tests/test_recursive.py | {
"start": 14,
"end": 357
} | class ____:
def __init__(self, foo, bar):
self.foo = foo
self.bar = bar
def __repr__(self):
try:
return "%s(foo=%r, bar=%r)" % (self.__class__.__name__,
self.foo, self.bar)
except RuntimeError:
return "%s(foo=..., bar=...)" % self.__class__.__name__
| AnInstance |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_ddl.py | {
"start": 6998,
"end": 13010
} | class ____(fixtures.TestBase):
"""test the creation of a variety of DDL structures and ensure
label length limits pass on backends
"""
__sparse_driver_backend__ = True
def fk(self, metadata, connection):
convention = {
"fk": "foreign_key_%(table_name)s_"
"%(column_0_N_name)s_"
"%(referred_table_name)s_"
+ (
"_".join(
"".join(random.choice("abcdef") for j in range(20))
for i in range(10)
)
),
}
metadata.naming_convention = convention
Table(
"a_things_with_stuff",
metadata,
Column("id_long_column_name", Integer, primary_key=True),
test_needs_fk=True,
)
cons = ForeignKeyConstraint(
["aid"], ["a_things_with_stuff.id_long_column_name"]
)
Table(
"b_related_things_of_value",
metadata,
Column(
"aid",
),
cons,
test_needs_fk=True,
)
actual_name = cons.name
metadata.create_all(connection)
if testing.requires.foreign_key_constraint_name_reflection.enabled:
insp = inspect(connection)
fks = insp.get_foreign_keys("b_related_things_of_value")
reflected_name = fks[0]["name"]
return actual_name, reflected_name
else:
return actual_name, None
def pk(self, metadata, connection):
convention = {
"pk": "primary_key_%(table_name)s_"
"%(column_0_N_name)s"
+ (
"_".join(
"".join(random.choice("abcdef") for j in range(30))
for i in range(10)
)
),
}
metadata.naming_convention = convention
a = Table(
"a_things_with_stuff",
metadata,
Column("id_long_column_name", Integer, primary_key=True),
Column("id_another_long_name", Integer, primary_key=True),
)
cons = a.primary_key
actual_name = cons.name
metadata.create_all(connection)
insp = inspect(connection)
pk = insp.get_pk_constraint("a_things_with_stuff")
reflected_name = pk["name"]
return actual_name, reflected_name
def ix(self, metadata, connection):
convention = {
"ix": "index_%(table_name)s_"
"%(column_0_N_name)s"
+ (
"_".join(
"".join(random.choice("abcdef") for j in range(30))
for i in range(10)
)
),
}
metadata.naming_convention = convention
a = Table(
"a_things_with_stuff",
metadata,
Column("id_long_column_name", Integer, primary_key=True),
Column("id_another_long_name", Integer),
)
cons = Index(None, a.c.id_long_column_name, a.c.id_another_long_name)
actual_name = cons.name
metadata.create_all(connection)
insp = inspect(connection)
ix = insp.get_indexes("a_things_with_stuff")
reflected_name = ix[0]["name"]
return actual_name, reflected_name
def uq(self, metadata, connection):
convention = {
"uq": "unique_constraint_%(table_name)s_"
"%(column_0_N_name)s"
+ (
"_".join(
"".join(random.choice("abcdef") for j in range(30))
for i in range(10)
)
),
}
metadata.naming_convention = convention
cons = UniqueConstraint("id_long_column_name", "id_another_long_name")
Table(
"a_things_with_stuff",
metadata,
Column("id_long_column_name", Integer, primary_key=True),
Column("id_another_long_name", Integer),
cons,
)
actual_name = cons.name
metadata.create_all(connection)
insp = inspect(connection)
uq = insp.get_unique_constraints("a_things_with_stuff")
reflected_name = uq[0]["name"]
return actual_name, reflected_name
def ck(self, metadata, connection):
convention = {
"ck": "check_constraint_%(table_name)s"
+ (
"_".join(
"".join(random.choice("abcdef") for j in range(30))
for i in range(10)
)
),
}
metadata.naming_convention = convention
cons = CheckConstraint("some_long_column_name > 5")
Table(
"a_things_with_stuff",
metadata,
Column("id_long_column_name", Integer, primary_key=True),
Column("some_long_column_name", Integer),
cons,
)
actual_name = cons.name
metadata.create_all(connection)
insp = inspect(connection)
ck = insp.get_check_constraints("a_things_with_stuff")
reflected_name = ck[0]["name"]
return actual_name, reflected_name
@testing.combinations(
("fk",),
("pk",),
("ix",),
("ck", testing.requires.check_constraint_reflection.as_skips()),
("uq", testing.requires.unique_constraint_reflection.as_skips()),
argnames="type_",
)
def test_long_convention_name(self, type_, metadata, connection):
actual_name, reflected_name = getattr(self, type_)(
metadata, connection
)
assert len(actual_name) > 255
if reflected_name is not None:
overlap = actual_name[0 : len(reflected_name)]
if len(overlap) < len(actual_name):
eq_(overlap[0:-5], reflected_name[0 : len(overlap) - 5])
else:
eq_(overlap, reflected_name)
__all__ = ("TableDDLTest", "FutureTableDDLTest", "LongNameBlowoutTest")
| LongNameBlowoutTest |
python | pallets__click | src/click/_winconsole.py | {
"start": 3455,
"end": 3671
} | class ____(io.RawIOBase):
def __init__(self, handle: int | None) -> None:
self.handle = handle
def isatty(self) -> t.Literal[True]:
super().isatty()
return True
| _WindowsConsoleRawIOBase |
python | dask__dask | dask/array/_array_expr/_io.py | {
"start": 434,
"end": 1373
} | class ____(IO):
_parameters = ["layer", "_meta", "chunks", "keys", "name_prefix"]
@functools.cached_property
def _meta(self):
return self.operand("_meta")
@functools.cached_property
def chunks(self):
return self.operand("chunks")
@functools.cached_property
def _name(self):
return self.operand("name_prefix") + "-" + self.deterministic_token
def _layer(self):
dsk = dict(self.operand("layer"))
# The name may not actually match the layers name therefore rewrite this
# using an alias
for k in self.operand("keys"):
if not isinstance(k, tuple):
raise TypeError(f"Expected tuple, got {type(k)}")
orig = dsk[k]
if not istask(orig):
del dsk[k]
dsk[(self._name, *k[1:])] = orig
else:
dsk[(self._name, *k[1:])] = k
return dsk
| FromGraph |
python | zarr-developers__zarr-python | tests/test_codecs/test_codecs.py | {
"start": 889,
"end": 1112
} | class ____:
array: AnyAsyncArray
def __getitem__(self, selection: BasicSelection) -> _AsyncArraySelectionProxy:
return _AsyncArraySelectionProxy(self.array, selection)
@dataclass(frozen=True)
| _AsyncArrayProxy |
python | doocs__leetcode | solution/0100-0199/0122.Best Time to Buy and Sell Stock II/Solution.py | {
"start": 0,
"end": 130
} | class ____:
def maxProfit(self, prices: List[int]) -> int:
return sum(max(0, b - a) for a, b in pairwise(prices))
| Solution |
python | pypa__warehouse | tests/unit/captcha/test_recaptcha.py | {
"start": 7746,
"end": 8833
} | class ____:
def test_csp_policy(self):
scheme = "https"
request = pretend.stub(
scheme=scheme,
registry=pretend.stub(
settings={
"recaptcha.site_key": "foo",
"recaptcha.secret_key": "bar",
}
),
)
serv = recaptcha.Service.create_service(context=None, request=request)
assert serv.csp_policy == {
"script-src": [
"{request.scheme}://www.recaptcha.net/recaptcha/",
"{request.scheme}://www.gstatic.com/recaptcha/",
"{request.scheme}://www.gstatic.cn/recaptcha/",
],
"frame-src": ["{request.scheme}://www.recaptcha.net/recaptcha/"],
"style-src": ["'unsafe-inline'"],
}
def test_create_service(session_resetting_request):
svc = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
assert isinstance(svc, recaptcha.Service)
assert svc.request is session_resetting_request
| TestCSPPolicy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess3.py | {
"start": 192,
"end": 263
} | class ____:
pi = 3.1415
def __init__(self):
self.x = 1
| A |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 175587,
"end": 177604
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.true_negatives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('true_negatives/true_negatives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.true_negatives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn)
self.assertAllEqual((2, 5, 7), tn_update_op)
self.assertAllEqual((2, 5, 7), tn)
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.true_negatives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn)
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op)
self.assertAllEqual((5.0, 15.0, 23.0), tn)
| TrueNegativesAtThresholdsTest |
python | lazyprogrammer__machine_learning_examples | cnn_class2/tf_resnet_convblock.py | {
"start": 2310,
"end": 6240
} | class ____:
def __init__(self, mi, fm_sizes, stride=2, activation=tf.nn.relu):
# conv1, conv2, conv3
# note: # feature maps shortcut = # feauture maps conv 3
assert(len(fm_sizes) == 3)
# note: kernel size in 2nd conv is always 3
# so we won't bother including it as an arg
# note: stride only applies to conv 1 in main branch
# and conv in shortcut, otherwise stride is 1
self.session = None
self.f = tf.nn.relu
# init main branch
# Conv -> BN -> F() ---> Conv -> BN -> F() ---> Conv -> BN
self.conv1 = ConvLayer(1, mi, fm_sizes[0], stride)
self.bn1 = BatchNormLayer(fm_sizes[0])
self.conv2 = ConvLayer(3, fm_sizes[0], fm_sizes[1], 1, 'SAME')
self.bn2 = BatchNormLayer(fm_sizes[1])
self.conv3 = ConvLayer(1, fm_sizes[1], fm_sizes[2], 1)
self.bn3 = BatchNormLayer(fm_sizes[2])
# init shortcut branch
# Conv -> BN
self.convs = ConvLayer(1, mi, fm_sizes[2], stride)
self.bns = BatchNormLayer(fm_sizes[2])
# in case needed later
self.layers = [
self.conv1, self.bn1,
self.conv2, self.bn2,
self.conv3, self.bn3,
self.convs, self.bns
]
# this will not be used when input passed in from
# a previous layer
self.input_ = tf.placeholder(tf.float32, shape=(1, 224, 224, mi))
self.output = self.forward(self.input_)
def forward(self, X):
# main branch
FX = self.conv1.forward(X)
FX = self.bn1.forward(FX)
FX = self.f(FX)
FX = self.conv2.forward(FX)
FX = self.bn2.forward(FX)
FX = self.f(FX)
FX = self.conv3.forward(FX)
FX = self.bn3.forward(FX)
# shortcut branch
SX = self.convs.forward(X)
SX = self.bns.forward(SX)
return self.f(FX + SX)
def predict(self, X):
assert(self.session is not None)
return self.session.run(
self.output,
feed_dict={self.input_: X}
)
def set_session(self, session):
# need to make this a session
# so assignment happens on sublayers too
self.session = session
self.conv1.session = session
self.bn1.session = session
self.conv2.session = session
self.bn2.session = session
self.conv3.session = session
self.bn3.session = session
self.convs.session = session
self.bns.session = session
def copyFromKerasLayers(self, layers):
# [<keras.layers.convolutional.Conv2D at 0x117bd1978>,
# <keras.layers.normalization.BatchNormalization at 0x117bf84a8>,
# <keras.layers.core.Activation at 0x117c15fd0>,
# <keras.layers.convolutional.Conv2D at 0x117c23be0>,
# <keras.layers.normalization.BatchNormalization at 0x117c51978>,
# <keras.layers.core.Activation at 0x117c93518>,
# <keras.layers.convolutional.Conv2D at 0x117cc1518>,
# <keras.layers.convolutional.Conv2D at 0x117d21630>,
# <keras.layers.normalization.BatchNormalization at 0x117cd2a58>,
# <keras.layers.normalization.BatchNormalization at 0x117d44b00>,
# <keras.layers.merge.Add at 0x117dae748>,
# <keras.layers.core.Activation at 0x117da2eb8>]
self.conv1.copyFromKerasLayers(layers[0])
self.bn1.copyFromKerasLayers(layers[1])
self.conv2.copyFromKerasLayers(layers[3])
self.bn2.copyFromKerasLayers(layers[4])
self.conv3.copyFromKerasLayers(layers[6])
self.bn3.copyFromKerasLayers(layers[8])
self.convs.copyFromKerasLayers(layers[7])
self.bns.copyFromKerasLayers(layers[9])
def get_params(self):
params = []
for layer in self.layers:
params += layer.get_params()
return params
if __name__ == '__main__':
conv_block = ConvBlock(mi=3, fm_sizes=[64, 64, 256], stride=1)
# make a fake image
X = np.random.random((1, 224, 224, 3))
init = tf.global_variables_initializer()
with tf.Session() as session:
conv_block.set_session(session)
session.run(init)
output = conv_block.predict(X)
print("output.shape:", output.shape)
| ConvBlock |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F821_0.py | {
"start": 1463,
"end": 2320
} | class ____:
field: Annotated[
int,
"base64",
arbitrary_callable(),
123,
(1, 2, 3),
]
field_with_stringified_type: Annotated[
"PEP593Test",
123,
]
field_with_undefined_stringified_type: Annotated[
"PEP593Test123",
123,
]
field_with_nested_subscript: Annotated[
dict[Literal["foo"], str],
123,
]
field_with_undefined_nested_subscript: Annotated[
dict["foo", "bar"], # Expected to fail as undefined.
123,
]
def in_ipython_notebook() -> bool:
try:
# autoimported by notebooks
get_ipython() # type: ignore[name-defined]
except NameError:
return False # not in notebook
return True
def named_expr():
if any((key := (value := x)) for x in ["ok"]):
print(key)
| PEP593Test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.