language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | rllib/examples/envs/classes/correlated_actions_env.py | {
"start": 85,
"end": 3236
} | class ____(gym.Env):
"""Environment that can only be solved through an autoregressive action model.
In each step, the agent observes a random number (between -1 and 1) and has
to choose two actions, a1 (discrete, 0, 1, or 2) and a2 (cont. between -1 and 1).
The reward is constructed such that actions need to be correlated to succeed. It's
impossible for the network to learn each action head separately.
There are two reward components:
The first is the negative absolute value of the delta between 1.0 and the sum of
obs + a1. For example, if obs is -0.3 and a1 was sampled to be 1, then the value of
the first reward component is:
r1 = -abs(1.0 - [obs+a1]) = -abs(1.0 - (-0.3 + 1)) = -abs(0.3) = -0.3
The second reward component is computed as the negative absolute value
of `obs + a1 + a2`. For example, if obs is 0.5, a1 was sampled to be 0,
and a2 was sampled to be -0.7, then the value of the second reward component is:
r2 = -abs(obs + a1 + a2) = -abs(0.5 + 0 - 0.7)) = -abs(-0.2) = -0.2
Because of this specific reward function, the agent must learn to optimally sample
a1 based on the observation and to optimally sample a2, based on the observation
AND the sampled value of a1.
One way to effectively learn this is through correlated action
distributions, e.g., in examples/actions/auto_regressive_actions.py
The game ends after the first step.
"""
def __init__(self, config=None):
super().__init__()
# Observation space (single continuous value between -1. and 1.).
self.observation_space = gym.spaces.Box(-1.0, 1.0, shape=(1,), dtype=np.float32)
# Action space (discrete action a1 and continuous action a2).
self.action_space = gym.spaces.Tuple(
[gym.spaces.Discrete(3), gym.spaces.Box(-2.0, 2.0, (1,), np.float32)]
)
# Internal state for the environment (e.g., could represent a factor
# influencing the relationship)
self.obs = None
def reset(
self, seed: Optional[int] = None, options: Optional[Dict[str, Any]] = None
):
"""Reset the environment to an initial state."""
super().reset(seed=seed, options=options)
# Randomly initialize the observation between -1 and 1.
self.obs = np.random.uniform(-1, 1, size=(1,))
return self.obs, {}
def step(self, action):
"""Apply the autoregressive action and return step information."""
# Extract individual action components, a1 and a2.
a1, a2 = action
a2 = a2[0] # dissolve shape=(1,)
# r1 depends on how well a1 is aligned to obs:
r1 = -abs(1.0 - (self.obs[0] + a1))
# r2 depends on how well a2 is aligned to both, obs and a1.
r2 = -abs(self.obs[0] + a1 + a2)
reward = r1 + r2
# Optionally: add some noise or complexity to the reward function
# reward += np.random.normal(0, 0.01) # Small noise can be added
# Terminate after each step (no episode length in this simple example)
return self.obs, reward, True, False, {}
| CorrelatedActionsEnv |
python | xlwings__xlwings | tests/test_conversion.py | {
"start": 4764,
"end": 7648
} | class ____(TestBase):
def test_array(self):
# 1d array
array_1d = np.array([1.1, 2.2, np.nan, -4.4])
self.wb1.sheets[0].range("A1").value = array_1d
cells = self.wb1.sheets[0].range("A1:D1").options(np.array).value
assert_array_equal(cells, array_1d)
# 2d array
array_2d = np.array([[1.1, 2.2, 3.3], [-4.4, 5.5, np.nan]])
self.wb1.sheets[0].range("A4").value = array_2d
cells = self.wb1.sheets[0].range("A4").options(np.array, expand="table").value
assert_array_equal(cells, array_2d)
# 1d array (ndim=2)
self.wb1.sheets[0].range("A10").value = array_1d
cells = self.wb1.sheets[0].range("A10:D10").options(np.array, ndim=2).value
assert_array_equal(cells, np.atleast_2d(array_1d))
# 2d array (ndim=2)
self.wb1.sheets[0].range("A12").value = array_2d
cells = (
self.wb1.sheets[0]
.range("A12")
.options(np.array, ndim=2, expand="table")
.value
)
assert_array_equal(cells, array_2d)
def test_numpy_datetime(self):
self.wb1.sheets[0].range("A55").value = np.datetime64("2005-02-25T03:30Z")
self.assertEqual(
self.wb1.sheets[0].range("A55").value, dt.datetime(2005, 2, 25, 3, 30)
)
def test_scalar_nan(self):
"""test_scalar_nan: Covers GH Issue #15"""
self.wb1.sheets[0].range("A20").value = np.nan
self.assertEqual(None, self.wb1.sheets[0].range("A20").value)
def test_scalar_nan_float16(self):
"""test_scalar_nan: Covers GH Issue #1116"""
self.wb1.sheets[0].range("A20").value = np.float16(np.nan)
self.assertEqual(None, self.wb1.sheets[0].range("A20").value)
def test_scalar_nan_float32(self):
"""test_scalar_nan: Covers GH Issue #1116"""
self.wb1.sheets[0].range("A20").value = np.float32(np.nan)
self.assertEqual(None, self.wb1.sheets[0].range("A20").value)
def test_scalar_nan_float64(self):
"""test_scalar_nan: Covers GH Issue #1116"""
self.wb1.sheets[0].range("A20").value = np.float64(np.nan)
self.assertEqual(None, self.wb1.sheets[0].range("A20").value)
def test_ndim2_scalar_as_array(self):
"""test_atleast_2d_scalar_as_array: Covers GH Issue #53b"""
self.wb1.sheets[0].range("A50").value = 23
result = self.wb1.sheets[0].range("A50").options(np.array, ndim=2).value
self.assertEqual(np.array([[23]]), result)
def test_float64(self):
self.wb1.sheets[0].range("A1").value = np.float64(2)
self.assertEqual(self.wb1.sheets[0].range("A1").value, 2.0)
def test_int64(self):
self.wb1.sheets[0].range("A1").value = np.int64(2)
self.assertEqual(self.wb1.sheets[0].range("A1").value, 2.0)
@unittest.skipIf(pd is None, "pandas missing")
| TestNumpy |
python | sqlalchemy__sqlalchemy | test/orm/test_composites.py | {
"start": 32401,
"end": 33050
} | class ____(PrimaryKeyTest):
@classmethod
def setup_mappers(cls):
graphs = cls.tables.graphs
@dataclasses.dataclass
class Version:
id: int
version: int
cls.classes.Version = Version
class Graph(cls.Comparable):
def __init__(self, version):
self.version = version
cls.mapper_registry.map_imperatively(
Graph,
graphs,
properties={
"version": sa.orm.composite(
Version, graphs.c.id, graphs.c.version_id
)
},
)
| PrimaryKeyTestDataclasses |
python | pyqtgraph__pyqtgraph | pyqtgraph/console/Console.py | {
"start": 211,
"end": 7178
} | class ____(QtWidgets.QWidget):
"""
Widget displaying console output and accepting command input.
Implements:
- eval python expressions / exec python statements
- storable history of commands
- exception handling allowing commands to be interpreted in the context of any level in the exception stack frame
Why not just use python in an interactive shell (or ipython) ? There are a few reasons:
- pyside does not yet allow Qt event processing and interactive shell at the same time
- on some systems, typing in the console _blocks_ the qt event loop until the user presses enter. This can
be baffling and frustrating to users since it would appear the program has frozen.
- some terminals (eg windows cmd.exe) have notoriously unfriendly interfaces
- ability to add extra features like exception stack introspection
- ability to have multiple interactive prompts, including for spawned sub-processes
- ability to execute in either the GUI thread or a separate thread
"""
def __init__(self, parent=None, namespace=None, historyFile=None, text=None, editor=None, allowNonGuiExecution=False):
"""
============== =============================================================================
**Arguments:**
namespace dictionary containing the initial variables present in the default namespace
historyFile optional file for storing command history
text initial text to display in the console window
editor optional string for invoking code editor (called when stack trace entries are
double-clicked). May contain {fileName} and {lineNum} format keys. Example::
editorCommand --loadfile {fileName} --gotoline {lineNum}
============== =============================================================================
"""
QtWidgets.QWidget.__init__(self, parent)
self._allowNonGuiExecution = allowNonGuiExecution
self._setupUi()
if namespace is None:
namespace = {}
namespace['__console__'] = self
self.localNamespace = namespace
self.editor = editor
self.output = self.repl.output
self.input = self.repl.input
self.input.setFocus()
if text is not None:
self.output.insertPlainText(text)
self.historyFile = historyFile
try:
history = self.loadHistory()
except Exception as exc:
sys.excepthook(*sys.exc_info())
history = None
if history is not None:
self.input.history = [""] + history
self.historyList.addItems(history[::-1])
self.currentTraceback = None
def _setupUi(self):
self.layout = QtWidgets.QGridLayout(self)
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.splitter = QtWidgets.QSplitter(QtCore.Qt.Orientation.Vertical, self)
self.layout.addWidget(self.splitter, 0, 0)
self.repl = ReplWidget(self.globals, self.locals, self, allowNonGuiExecution=self._allowNonGuiExecution)
self.splitter.addWidget(self.repl)
self.historyList = QtWidgets.QListWidget(self)
self.historyList.hide()
self.splitter.addWidget(self.historyList)
self.historyBtn = QtWidgets.QPushButton('History', self)
self.historyBtn.setCheckable(True)
self.repl.inputLayout.addWidget(self.historyBtn)
self.repl.sigCommandEntered.connect(self._commandEntered)
self.repl.sigCommandRaisedException.connect(self._commandRaisedException)
self.excHandler = ExceptionHandlerWidget(self)
self.excHandler.hide()
self.splitter.addWidget(self.excHandler)
self.exceptionBtn = QtWidgets.QPushButton("Exceptions..", self)
self.exceptionBtn.setCheckable(True)
self.repl.inputLayout.addWidget(self.exceptionBtn)
self.excHandler.sigStackItemDblClicked.connect(self._stackItemDblClicked)
self.exceptionBtn.toggled.connect(self.excHandler.setVisible)
self.historyBtn.toggled.connect(self.historyList.setVisible)
self.historyList.itemClicked.connect(self.cmdSelected)
self.historyList.itemDoubleClicked.connect(self.cmdDblClicked)
def catchAllExceptions(self, catch=True):
if catch:
self.exceptionBtn.setChecked(True)
self.excHandler.catchAllExceptions(catch)
def catchNextException(self, catch=True):
if catch:
self.exceptionBtn.setChecked(True)
self.excHandler.catchNextException(catch)
def setStack(self, frame=None):
self.excHandler.setStack(frame)
def loadHistory(self):
"""Return the list of previously-invoked command strings (or None)."""
if self.historyFile is not None and os.path.exists(self.historyFile):
with open(self.historyFile, 'rb') as pf:
return pickle.load(pf)
def saveHistory(self, history):
"""Store the list of previously-invoked command strings."""
if self.historyFile is not None:
with open(self.historyFile, 'wb') as pf:
pickle.dump(history, pf)
def _commandEntered(self, repl, cmd):
self.historyList.addItem(cmd)
self.saveHistory(self.input.history[1:100])
sb = self.historyList.verticalScrollBar()
sb.setValue(sb.maximum())
def _commandRaisedException(self, repl, exc):
self.excHandler.exceptionHandler(exc)
def globals(self):
frame = self.excHandler.selectedFrame()
if frame is not None and self.excHandler.runSelectedFrameCheck.isChecked():
return frame.f_globals
else:
return self.localNamespace
def locals(self):
frame = self.excHandler.selectedFrame()
if frame is not None and self.excHandler.runSelectedFrameCheck.isChecked():
return frame.f_locals
else:
return self.localNamespace
def cmdSelected(self, item):
index = -(self.historyList.row(item)+1)
self.input.setHistory(index)
self.input.setFocus()
def cmdDblClicked(self, item):
index = -(self.historyList.row(item)+1)
self.input.setHistory(index)
self.input.execCmd()
def _stackItemDblClicked(self, handler, item):
editor = self.editor
if editor is None:
editor = getConfigOption('editorCommand')
if editor is None:
return
tb = self.excHandler.selectedFrame()
lineNum = tb.f_lineno
fileName = tb.f_code.co_filename
subprocess.Popen(self.editor.format(fileName=fileName, lineNum=lineNum), shell=True)
| ConsoleWidget |
python | spyder-ide__spyder | spyder/plugins/remoteclient/widgets/connectionpages.py | {
"start": 2066,
"end": 2312
} | class ____:
NewEnv = 1
ImportEnv = 2
NoEnv = 4
# =============================================================================
# ---- Pages
# =============================================================================
| CreateEnvMethods |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 57788,
"end": 57985
} | class ____(str, enum.Enum):
RATIO = "ratio"
FIRST_ORDER = "first_order"
SECOND_ORDER = "second_order"
# Beginning of everything related to segmented tensors
| AverageApproximationFunction |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 95335,
"end": 96610
} | class ____(TypedDict, total=False):
"""
:class:`altair.FeatureGeometryGeoJsonProperties` ``TypedDict`` wrapper.
Parameters
----------
geometry
The feature's geometry
properties
Properties associated with this feature.
type
Specifies the type of GeoJSON object.
bbox
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
id
A value that uniquely identifies this feature in a
https://tools.ietf.org/html/rfc7946#section-3.2.
"""
geometry: (
PointKwds
| PolygonKwds
| LineStringKwds
| MultiPointKwds
| MultiPolygonKwds
| MultiLineStringKwds
| GeometryCollectionKwds
)
properties: None
type: Literal["Feature"]
bbox: Sequence[float]
id: str | float
| FeatureGeometryGeoJsonPropertiesKwds |
python | python__mypy | mypy/config_parser.py | {
"start": 662,
"end": 25918
} | class ____(argparse.ArgumentTypeError):
"""Provide a fallback value if the Python version is unsupported."""
def __init__(self, *args: Any, fallback: tuple[int, int]) -> None:
self.fallback = fallback
super().__init__(*args)
def parse_version(v: str | float) -> tuple[int, int]:
m = re.match(r"\A(\d)\.(\d+)\Z", str(v))
if not m:
raise argparse.ArgumentTypeError(f"Invalid python version '{v}' (expected format: 'x.y')")
major, minor = int(m.group(1)), int(m.group(2))
if major == 2 and minor == 7:
pass # Error raised elsewhere
elif major == 3:
if minor < defaults.PYTHON3_VERSION_MIN[1]:
msg = "Python 3.{} is not supported (must be {}.{} or higher)".format(
minor, *defaults.PYTHON3_VERSION_MIN
)
if isinstance(v, float):
msg += ". You may need to put quotes around your Python version"
raise VersionTypeError(msg, fallback=defaults.PYTHON3_VERSION_MIN)
else:
raise argparse.ArgumentTypeError(
f"Python major version '{major}' out of range (must be 3)"
)
return major, minor
def try_split(v: str | Sequence[str] | object, split_regex: str = ",") -> list[str]:
"""Split and trim a str or sequence (eg: list) of str into a list of str.
If an element of the input is not str, a type error will be raised."""
def complain(x: object, additional_info: str = "") -> Never:
raise argparse.ArgumentTypeError(
f"Expected a list or a stringified version thereof, but got: '{x}', of type {type(x).__name__}.{additional_info}"
)
if isinstance(v, str):
items = [p.strip() for p in re.split(split_regex, v)]
if items and items[-1] == "":
items.pop(-1)
return items
elif isinstance(v, Sequence):
return [
(
p.strip()
if isinstance(p, str)
else complain(p, additional_info=" (As an element of the list.)")
)
for p in v
]
else:
complain(v)
def validate_package_allow_list(allow_list: list[str]) -> list[str]:
for p in allow_list:
msg = f"Invalid allow list entry: {p}"
if "*" in p:
raise argparse.ArgumentTypeError(
f"{msg} (entries are already prefixes so must not contain *)"
)
if "\\" in p or "/" in p:
raise argparse.ArgumentTypeError(
f"{msg} (entries must be packages like foo.bar not directories or files)"
)
return allow_list
def expand_path(path: str) -> str:
"""Expand the user home directory and any environment variables contained within
the provided path.
"""
return os.path.expandvars(os.path.expanduser(path))
def str_or_array_as_list(v: str | Sequence[str]) -> list[str]:
if isinstance(v, str):
return [v.strip()] if v.strip() else []
return [p.strip() for p in v if p.strip()]
def split_and_match_files_list(paths: Sequence[str]) -> list[str]:
"""Take a list of files/directories (with support for globbing through the glob library).
Where a path/glob matches no file, we still include the raw path in the resulting list.
Returns a list of file paths
"""
expanded_paths = []
for path in paths:
path = expand_path(path.strip())
globbed_files = fileglob.glob(path, recursive=True)
if globbed_files:
expanded_paths.extend(globbed_files)
else:
expanded_paths.append(path)
return expanded_paths
def split_and_match_files(paths: str) -> list[str]:
"""Take a string representing a list of files/directories (with support for globbing
through the glob library).
Where a path/glob matches no file, we still include the raw path in the resulting list.
Returns a list of file paths
"""
return split_and_match_files_list(split_commas(paths))
def check_follow_imports(choice: str) -> str:
choices = ["normal", "silent", "skip", "error"]
if choice not in choices:
raise argparse.ArgumentTypeError(
"invalid choice '{}' (choose from {})".format(
choice, ", ".join(f"'{x}'" for x in choices)
)
)
return choice
def check_junit_format(choice: str) -> str:
choices = ["global", "per_file"]
if choice not in choices:
raise argparse.ArgumentTypeError(
"invalid choice '{}' (choose from {})".format(
choice, ", ".join(f"'{x}'" for x in choices)
)
)
return choice
def split_commas(value: str) -> list[str]:
# Uses a bit smarter technique to allow last trailing comma
# and to remove last `""` item from the split.
items = value.split(",")
if items and items[-1] == "":
items.pop(-1)
return items
# For most options, the type of the default value set in options.py is
# sufficient, and we don't have to do anything here. This table
# exists to specify types for values initialized to None or container
# types.
ini_config_types: Final[dict[str, _INI_PARSER_CALLABLE]] = {
"python_version": parse_version,
"custom_typing_module": str,
"custom_typeshed_dir": expand_path,
"mypy_path": lambda s: [expand_path(p.strip()) for p in re.split("[,:]", s)],
"files": split_and_match_files,
"quickstart_file": expand_path,
"junit_xml": expand_path,
"junit_format": check_junit_format,
"follow_imports": check_follow_imports,
"no_site_packages": bool,
"plugins": lambda s: [p.strip() for p in split_commas(s)],
"always_true": lambda s: [p.strip() for p in split_commas(s)],
"always_false": lambda s: [p.strip() for p in split_commas(s)],
"untyped_calls_exclude": lambda s: validate_package_allow_list(
[p.strip() for p in split_commas(s)]
),
"enable_incomplete_feature": lambda s: [p.strip() for p in split_commas(s)],
"disable_error_code": lambda s: [p.strip() for p in split_commas(s)],
"enable_error_code": lambda s: [p.strip() for p in split_commas(s)],
"package_root": lambda s: [p.strip() for p in split_commas(s)],
"cache_dir": expand_path,
"python_executable": expand_path,
"strict": bool,
"exclude": lambda s: [s.strip()],
"packages": try_split,
"modules": try_split,
}
# Reuse the ini_config_types and overwrite the diff
toml_config_types: Final[dict[str, _INI_PARSER_CALLABLE]] = ini_config_types.copy()
toml_config_types.update(
{
"python_version": parse_version,
"mypy_path": lambda s: [expand_path(p) for p in try_split(s, "[,:]")],
"files": lambda s: split_and_match_files_list(try_split(s)),
"junit_format": lambda s: check_junit_format(str(s)),
"follow_imports": lambda s: check_follow_imports(str(s)),
"plugins": try_split,
"always_true": try_split,
"always_false": try_split,
"untyped_calls_exclude": lambda s: validate_package_allow_list(try_split(s)),
"enable_incomplete_feature": try_split,
"disable_error_code": lambda s: try_split(s),
"enable_error_code": lambda s: try_split(s),
"package_root": try_split,
"exclude": str_or_array_as_list,
"packages": try_split,
"modules": try_split,
}
)
def _parse_individual_file(
config_file: str, stderr: TextIO | None = None
) -> tuple[MutableMapping[str, Any], dict[str, _INI_PARSER_CALLABLE], str] | None:
if not os.path.exists(config_file):
return None
parser: MutableMapping[str, Any]
try:
if is_toml(config_file):
with open(config_file, "rb") as f:
toml_data = tomllib.load(f)
# Filter down to just mypy relevant toml keys
toml_data = toml_data.get("tool", {})
if "mypy" not in toml_data:
return None
toml_data = {"mypy": toml_data["mypy"]}
parser = destructure_overrides(toml_data)
config_types = toml_config_types
else:
parser = configparser.RawConfigParser()
parser.read(config_file)
config_types = ini_config_types
except (tomllib.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err:
print(f"{config_file}: {err}", file=stderr)
return None
if os.path.basename(config_file) in defaults.SHARED_CONFIG_NAMES and "mypy" not in parser:
return None
return parser, config_types, config_file
def _find_config_file(
stderr: TextIO | None = None,
) -> tuple[MutableMapping[str, Any], dict[str, _INI_PARSER_CALLABLE], str] | None:
current_dir = os.path.abspath(os.getcwd())
while True:
for name in defaults.CONFIG_NAMES + defaults.SHARED_CONFIG_NAMES:
config_file = os.path.relpath(os.path.join(current_dir, name))
ret = _parse_individual_file(config_file, stderr)
if ret is None:
continue
return ret
if any(
os.path.exists(os.path.join(current_dir, cvs_root)) for cvs_root in (".git", ".hg")
):
break
parent_dir = os.path.dirname(current_dir)
if parent_dir == current_dir:
break
current_dir = parent_dir
for config_file in defaults.USER_CONFIG_FILES:
ret = _parse_individual_file(config_file, stderr)
if ret is None:
continue
return ret
return None
def parse_config_file(
options: Options,
set_strict_flags: Callable[[], None],
filename: str | None,
stdout: TextIO | None = None,
stderr: TextIO | None = None,
) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
ret = (
_parse_individual_file(filename, stderr)
if filename is not None
else _find_config_file(stderr)
)
if ret is None:
return
parser, config_types, file_read = ret
options.config_file = file_read
os.environ["MYPY_CONFIG_FILE_DIR"] = os.path.dirname(os.path.abspath(file_read))
if "mypy" not in parser:
if filename or os.path.basename(file_read) not in defaults.SHARED_CONFIG_NAMES:
print(f"{file_read}: No [mypy] section in config file", file=stderr)
else:
section = parser["mypy"]
prefix = f"{file_read}: [mypy]: "
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr
)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith("mypy-"):
prefix = get_prefix(file_read, name)
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr
)
if report_dirs:
print(
prefix,
"Per-module sections should not specify reports ({})".format(
", ".join(s + "_report" for s in sorted(report_dirs))
),
file=stderr,
)
if set(updates) - PER_MODULE_OPTIONS:
print(
prefix,
"Per-module sections should only specify per-module flags ({})".format(
", ".join(sorted(set(updates) - PER_MODULE_OPTIONS))
),
file=stderr,
)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(","):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, ".")
if os.altsep:
glob = glob.replace(os.altsep, ".")
if any(c in glob for c in "?[]!") or any(
"*" in x and x != "*" for x in glob.split(".")
):
print(
prefix,
"Patterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)",
file=stderr,
)
else:
options.per_module_options[glob] = updates
def get_prefix(file_read: str, name: str) -> str:
if is_toml(file_read):
module_name_str = 'module = "%s"' % "-".join(name.split("-")[1:])
else:
module_name_str = name
return f"{file_read}: [{module_name_str}]:"
def is_toml(filename: str) -> bool:
return filename.lower().endswith(".toml")
def destructure_overrides(toml_data: dict[str, Any]) -> dict[str, Any]:
"""Take the new [[tool.mypy.overrides]] section array in the pyproject.toml file,
and convert it back to a flatter structure that the existing config_parser can handle.
E.g. the following pyproject.toml file:
[[tool.mypy.overrides]]
module = [
"a.b",
"b.*"
]
disallow_untyped_defs = true
[[tool.mypy.overrides]]
module = 'c'
disallow_untyped_defs = false
Would map to the following config dict that it would have gotten from parsing an equivalent
ini file:
{
"mypy-a.b": {
disallow_untyped_defs = true,
},
"mypy-b.*": {
disallow_untyped_defs = true,
},
"mypy-c": {
disallow_untyped_defs: false,
},
}
"""
if "overrides" not in toml_data["mypy"]:
return toml_data
if not isinstance(toml_data["mypy"]["overrides"], list):
raise ConfigTOMLValueError(
"tool.mypy.overrides sections must be an array. Please make "
"sure you are using double brackets like so: [[tool.mypy.overrides]]"
)
result = toml_data.copy()
for override in result["mypy"]["overrides"]:
if "module" not in override:
raise ConfigTOMLValueError(
"toml config file contains a [[tool.mypy.overrides]] "
"section, but no module to override was specified."
)
if isinstance(override["module"], str):
modules = [override["module"]]
elif isinstance(override["module"], list):
modules = override["module"]
else:
raise ConfigTOMLValueError(
"toml config file contains a [[tool.mypy.overrides]] "
"section with a module value that is not a string or a list of "
"strings"
)
for module in modules:
module_overrides = override.copy()
del module_overrides["module"]
old_config_name = f"mypy-{module}"
if old_config_name not in result:
result[old_config_name] = module_overrides
else:
for new_key, new_value in module_overrides.items():
if (
new_key in result[old_config_name]
and result[old_config_name][new_key] != new_value
):
raise ConfigTOMLValueError(
"toml config file contains "
"[[tool.mypy.overrides]] sections with conflicting "
f"values. Module '{module}' has two different values for '{new_key}'"
)
result[old_config_name][new_key] = new_value
del result["mypy"]["overrides"]
return result
def parse_section(
prefix: str,
template: Options,
set_strict_flags: Callable[[], None],
section: Mapping[str, Any],
config_types: dict[str, Any],
stderr: TextIO = sys.stderr,
) -> tuple[dict[str, object], dict[str, str]]:
"""Parse one section of a config file.
Returns a dict of option values encountered, and a dict of report directories.
"""
results: dict[str, object] = {}
report_dirs: dict[str, str] = {}
# Because these fields exist on Options, without proactive checking, we would accept them
# and crash later
invalid_options = {
"enabled_error_codes": "enable_error_code",
"disabled_error_codes": "disable_error_code",
}
for key in section:
invert = False
options_key = key
if key in config_types:
ct = config_types[key]
elif key in invalid_options:
print(
f"{prefix}Unrecognized option: {key} = {section[key]}"
f" (did you mean {invalid_options[key]}?)",
file=stderr,
)
continue
else:
dv = getattr(template, key, None)
if dv is None:
if key.endswith("_report"):
report_type = key[:-7].replace("_", "-")
if report_type in defaults.REPORTER_NAMES:
report_dirs[report_type] = str(section[key])
else:
print(f"{prefix}Unrecognized report type: {key}", file=stderr)
continue
if key.startswith("x_"):
pass # Don't complain about `x_blah` flags
elif key.startswith("no_") and hasattr(template, key[3:]):
options_key = key[3:]
invert = True
elif key.startswith("allow") and hasattr(template, "dis" + key):
options_key = "dis" + key
invert = True
elif key.startswith("disallow") and hasattr(template, key[3:]):
options_key = key[3:]
invert = True
elif key.startswith("show_") and hasattr(template, "hide_" + key[5:]):
options_key = "hide_" + key[5:]
invert = True
elif key == "strict":
pass # Special handling below
else:
print(f"{prefix}Unrecognized option: {key} = {section[key]}", file=stderr)
if invert:
dv = getattr(template, options_key, None)
else:
continue
ct = type(dv)
v: Any = None
try:
if ct is bool:
if isinstance(section, dict):
v = convert_to_boolean(section.get(key))
else:
v = section.getboolean(key) # type: ignore[attr-defined] # Until better stub
if invert:
v = not v
elif callable(ct):
if invert:
print(f"{prefix}Can not invert non-boolean key {options_key}", file=stderr)
continue
try:
v = ct(section.get(key))
except VersionTypeError as err_version:
print(f"{prefix}{key}: {err_version}", file=stderr)
v = err_version.fallback
except argparse.ArgumentTypeError as err:
print(f"{prefix}{key}: {err}", file=stderr)
continue
else:
print(f"{prefix}Don't know what type {key} should have", file=stderr)
continue
except ValueError as err:
print(f"{prefix}{key}: {err}", file=stderr)
continue
if key == "strict":
if v:
set_strict_flags()
continue
results[options_key] = v
# These two flags act as per-module overrides, so store the empty defaults.
if "disable_error_code" not in results:
results["disable_error_code"] = []
if "enable_error_code" not in results:
results["enable_error_code"] = []
return results, report_dirs
def convert_to_boolean(value: Any | None) -> bool:
"""Return a boolean value translating from other types if necessary."""
if isinstance(value, bool):
return value
if not isinstance(value, str):
value = str(value)
if value.lower() not in configparser.RawConfigParser.BOOLEAN_STATES:
raise ValueError(f"Not a boolean: {value}")
return configparser.RawConfigParser.BOOLEAN_STATES[value.lower()]
def split_directive(s: str) -> tuple[list[str], list[str]]:
"""Split s on commas, except during quoted sections.
Returns the parts and a list of error messages."""
parts = []
cur: list[str] = []
errors = []
i = 0
while i < len(s):
if s[i] == ",":
parts.append("".join(cur).strip())
cur = []
elif s[i] == '"':
i += 1
while i < len(s) and s[i] != '"':
cur.append(s[i])
i += 1
if i == len(s):
errors.append("Unterminated quote in configuration comment")
cur.clear()
else:
cur.append(s[i])
i += 1
if cur:
parts.append("".join(cur).strip())
return parts, errors
def mypy_comments_to_config_map(line: str, template: Options) -> tuple[dict[str, str], list[str]]:
"""Rewrite the mypy comment syntax into ini file syntax."""
options = {}
entries, errors = split_directive(line)
for entry in entries:
if "=" not in entry:
name = entry
value = None
else:
name, value = (x.strip() for x in entry.split("=", 1))
name = name.replace("-", "_")
if value is None:
value = "True"
options[name] = value
return options, errors
def parse_mypy_comments(
args: list[tuple[int, str]], template: Options
) -> tuple[dict[str, object], list[tuple[int, str]]]:
"""Parse a collection of inline mypy: configuration comments.
Returns a dictionary of options to be applied and a list of error messages
generated.
"""
errors: list[tuple[int, str]] = []
sections: dict[str, object] = {"enable_error_code": [], "disable_error_code": []}
for lineno, line in args:
# In order to easily match the behavior for bools, we abuse configparser.
# Oddly, the only way to get the SectionProxy object with the getboolean
# method is to create a config parser.
parser = configparser.RawConfigParser()
options, parse_errors = mypy_comments_to_config_map(line, template)
if "python_version" in options:
errors.append((lineno, "python_version not supported in inline configuration"))
del options["python_version"]
parser["dummy"] = options
errors.extend((lineno, x) for x in parse_errors)
stderr = StringIO()
strict_found = False
def set_strict_flags() -> None:
nonlocal strict_found
strict_found = True
new_sections, reports = parse_section(
"", template, set_strict_flags, parser["dummy"], ini_config_types, stderr=stderr
)
errors.extend((lineno, x) for x in stderr.getvalue().strip().split("\n") if x)
if reports:
errors.append((lineno, "Reports not supported in inline configuration"))
if strict_found:
errors.append(
(
lineno,
'Setting "strict" not supported in inline configuration: specify it in '
"a configuration file instead, or set individual inline flags "
'(see "mypy -h" for the list of flags enabled in strict mode)',
)
)
# Because this is currently special-cased
# (the new_sections for an inline config *always* includes 'disable_error_code' and
# 'enable_error_code' fields, usually empty, which overwrite the old ones),
# we have to manipulate them specially.
# This could use a refactor, but so could the whole subsystem.
if (
"enable_error_code" in new_sections
and isinstance(neec := new_sections["enable_error_code"], list)
and isinstance(eec := sections.get("enable_error_code", []), list)
):
new_sections["enable_error_code"] = sorted(set(neec + eec))
if (
"disable_error_code" in new_sections
and isinstance(ndec := new_sections["disable_error_code"], list)
and isinstance(dec := sections.get("disable_error_code", []), list)
):
new_sections["disable_error_code"] = sorted(set(ndec + dec))
sections.update(new_sections)
return sections, errors
def get_config_module_names(filename: str | None, modules: list[str]) -> str:
if not filename or not modules:
return ""
if not is_toml(filename):
return ", ".join(f"[mypy-{module}]" for module in modules)
return "module = ['%s']" % ("', '".join(sorted(modules)))
| VersionTypeError |
python | pytorch__pytorch | torch/__init__.py | {
"start": 72775,
"end": 73007
} | class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.cfloat
| ComplexFloatStorage |
python | numba__llvmlite | llvmlite/binding/ffi.py | {
"start": 11001,
"end": 13163
} | class ____(object):
"""
A wrapper around a ctypes pointer to a LLVM object ("resource").
"""
_closed = False
_as_parameter_ = _DeadPointer()
# Whether this object pointer is owned by another one.
_owned = False
def __init__(self, ptr):
if ptr is None:
raise ValueError("NULL pointer")
self._ptr = ptr
self._as_parameter_ = ptr
self._capi = lib
def close(self):
"""
Close this object and do any required clean-up actions.
"""
try:
if not self._closed and not self._owned:
self._dispose()
finally:
self.detach()
def detach(self):
"""
Detach the underlying LLVM resource without disposing of it.
"""
if not self._closed:
del self._as_parameter_
self._closed = True
self._ptr = None
def _dispose(self):
"""
Dispose of the underlying LLVM resource. Should be overriden
by subclasses. Automatically called by close(), __del__() and
__exit__() (unless the resource has been detached).
"""
@property
def closed(self):
"""
Whether this object has been closed. A closed object can't
be used anymore.
"""
return self._closed
def __enter__(self):
assert hasattr(self, "close")
if self._closed:
raise RuntimeError("%s instance already closed" % (self.__class__,))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self, _is_shutting_down=_is_shutting_down):
if not _is_shutting_down():
if self.close is not None:
self.close()
def __bool__(self):
return bool(self._ptr)
def __eq__(self, other):
if not hasattr(other, "_ptr"):
return False
return ctypes.addressof(self._ptr[0]) == \
ctypes.addressof(other._ptr[0])
__nonzero__ = __bool__
# XXX useful?
def __hash__(self):
return hash(ctypes.cast(self._ptr, ctypes.c_void_p).value)
| ObjectRef |
python | kamyu104__LeetCode-Solutions | Python/regions-cut-by-slashes.py | {
"start": 498,
"end": 1832
} | class ____(object):
def regionsBySlashes(self, grid):
"""
:type grid: List[str]
:rtype: int
"""
def index(n, i, j, k):
return (i*n + j)*4 + k
union_find = UnionFind(len(grid)**2 * 4)
N, E, S, W = range(4)
for i in xrange(len(grid)):
for j in xrange(len(grid)):
if i:
union_find.union_set(index(len(grid), i-1, j, S),
index(len(grid),i, j, N))
if j:
union_find.union_set(index(len(grid), i, j-1, E),
index(len(grid), i, j, W))
if grid[i][j] != "/":
union_find.union_set(index(len(grid), i, j, N),
index(len(grid), i, j, E))
union_find.union_set(index(len(grid), i, j, S),
index(len(grid), i, j, W))
if grid[i][j] != "\\":
union_find.union_set(index(len(grid), i, j, W),
index(len(grid), i, j, N))
union_find.union_set(index(len(grid), i, j, E),
index(len(grid), i, j, S))
return union_find.count
| Solution |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py | {
"start": 25843,
"end": 30164
} | class ____:
"""Test before_agent and after_agent middleware hooks."""
@pytest.mark.parametrize("is_async", [False, True])
@pytest.mark.parametrize("hook_type", ["before", "after"])
async def test_hook_execution(self, is_async: bool, hook_type: str) -> None:
"""Test that agent hooks are called in both sync and async modes."""
from langchain.agents.middleware import after_agent, before_agent
execution_log: list[str] = []
if is_async:
if hook_type == "before":
@before_agent
async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
else:
@after_agent
async def log_hook(state: AgentState, runtime) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
else:
if hook_type == "before":
@before_agent
def log_hook(state: AgentState, runtime) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
else:
@after_agent
def log_hook(state: AgentState, runtime) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[log_hook])
if is_async:
await agent.ainvoke({"messages": [HumanMessage("Hi")]})
else:
agent.invoke({"messages": [HumanMessage("Hi")]})
assert f"{hook_type}_agent_called" in execution_log
assert any("message_count:" in log for log in execution_log)
@pytest.mark.parametrize("is_async", [False, True])
@pytest.mark.parametrize("hook_type", ["before", "after"])
async def test_hook_with_class_inheritance(self, is_async: bool, hook_type: str) -> None:
"""Test agent hooks using class inheritance in both sync and async modes."""
execution_log: list[str] = []
if is_async:
class CustomMiddleware(AgentMiddleware):
async def abefore_agent(self, state: AgentState, runtime) -> dict[str, Any] | None:
if hook_type == "before":
execution_log.append("hook_called")
return None
async def aafter_agent(self, state: AgentState, runtime) -> dict[str, Any] | None:
if hook_type == "after":
execution_log.append("hook_called")
return None
else:
class CustomMiddleware(AgentMiddleware):
def before_agent(self, state: AgentState, runtime) -> dict[str, Any] | None:
if hook_type == "before":
execution_log.append("hook_called")
return None
def after_agent(self, state: AgentState, runtime) -> dict[str, Any] | None:
if hook_type == "after":
execution_log.append("hook_called")
return None
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
middleware = CustomMiddleware()
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[middleware])
if is_async:
await agent.ainvoke({"messages": [HumanMessage("Test")]})
else:
agent.invoke({"messages": [HumanMessage("Test")]})
assert "hook_called" in execution_log
| TestAgentMiddlewareHooks |
python | ray-project__ray | python/ray/serve/tests/test_config_files/test_dag/hello_serve.py | {
"start": 154,
"end": 289
} | class ____:
async def __call__(self, starlette_request: Request) -> None:
return hello()
model = HelloModel.bind()
| HelloModel |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/container_volume.py | {
"start": 1193,
"end": 5794
} | class ____(BaseHook):
"""
A hook which wraps an Azure Volume.
:param azure_container_volume_conn_id: Reference to the
:ref:`Azure Container Volume connection id <howto/connection:azure_container_volume>`
of an Azure account of which container volumes should be used.
"""
conn_name_attr = "azure_container_volume_conn_id"
default_conn_name = "azure_container_volume_default"
conn_type = "azure_container_volume"
hook_name = "Azure Container Volume"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"connection_string": PasswordField(
lazy_gettext("Blob Storage Connection String (optional)"), widget=BS3PasswordFieldWidget()
),
"subscription_id": StringField(
lazy_gettext("Subscription ID (optional)"),
widget=BS3TextFieldWidget(),
),
"resource_group": StringField(
lazy_gettext("Resource group name (optional)"),
widget=BS3TextFieldWidget(),
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Azure Client ID",
"password": "Azure Secret",
},
"placeholders": {
"login": "client_id (token credentials auth)",
"password": "secret (token credentials auth)",
"connection_string": "connection string auth",
"subscription_id": "Subscription id (required for Azure AD authentication)",
"resource_group": "Resource group name (required for Azure AD authentication)",
},
}
def __init__(self, azure_container_volume_conn_id: str = "azure_container_volume_default") -> None:
super().__init__()
self.conn_id = azure_container_volume_conn_id
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_storagekey(self, *, storage_account_name: str | None = None) -> str:
"""Get Azure File Volume storage key."""
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
connection_string = self._get_field(extras, "connection_string")
if connection_string:
for keyvalue in connection_string.split(";"):
key, value = keyvalue.split("=", 1)
if key == "AccountKey":
return value
subscription_id = self._get_field(extras, "subscription_id")
resource_group = self._get_field(extras, "resource_group")
if subscription_id and storage_account_name and resource_group:
managed_identity_client_id = self._get_field(extras, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extras, "workload_identity_tenant_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
storage_client = StorageManagementClient(credential, subscription_id)
storage_account_list_keys_result = storage_client.storage_accounts.list_keys(
resource_group, storage_account_name
)
return storage_account_list_keys_result.as_dict()["keys"][0]["value"]
return cast("str", conn.password)
def get_file_volume(
self, mount_name: str, share_name: str, storage_account_name: str, read_only: bool = False
) -> Volume:
"""Get Azure File Volume."""
return Volume(
name=mount_name,
azure_file=AzureFileVolume(
share_name=share_name,
storage_account_name=storage_account_name,
read_only=read_only,
storage_account_key=self.get_storagekey(storage_account_name=storage_account_name),
),
)
| AzureContainerVolumeHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/integration_tests/test_streams.py | {
"start": 1233,
"end": 3326
} | class ____:
"""Custom integration tests should test incremental with nested state"""
def test_incremental_streams(self, config, state):
records, states = self._read_records(config, "user_insights")
assert len(records) == 30, "UserInsights for two accounts over last 30 day should return 30 records when empty STATE provided"
records, states = self._read_records(config, "user_insights", state)
assert len(records) <= 60 - 10 - 5, "UserInsights should have less records returned when non empty STATE provided"
assert states, "insights should produce states"
for state_msg in states:
stream_name, stream_state, state_keys_count = (
state_msg.state.stream.stream_descriptor.name,
state_msg.state.stream.stream_state,
len(state_msg.state.stream.stream_state.__dict__.get("states", {})),
)
assert stream_name == "user_insights", f"each state message should reference 'user_insights' stream, got {stream_name} instead"
assert isinstance(
stream_state, AirbyteStateBlob
), f"Stream state should be type AirbyteStateBlob, got {type(stream_state)} instead"
assert state_keys_count == 2, f"Stream state should contain 2 partition keys, got {state_keys_count} instead"
@staticmethod
def _read_records(conf, stream_name, state=None) -> Tuple[List[AirbyteMessage], List[AirbyteMessage]]:
records = []
states = []
output = read(
get_source(config=conf, state=state),
conf,
CatalogBuilder().with_stream(ConfiguredAirbyteStreamBuilder().with_name(stream_name)).build(),
state=state,
)
for message in output.records_and_state_messages:
if message.type == Type.RECORD:
records.append(message)
elif message.type == Type.STATE:
print(message.state.stream.stream_state.__dict__)
states.append(message)
return records, states
| TestInstagramSource |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/where_test.py | {
"start": 706,
"end": 1403
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, cond_shape, input_shape, other_shape, dtype, device):
def _create_tensor(shape):
return torch.randn(*shape, dtype=dtype, device=device)
self.inputs = {
"condition": _create_tensor(cond_shape) > 0,
"input": _create_tensor(input_shape),
"other": _create_tensor(other_shape),
}
self.set_module_name("where")
def forward(self, condition, input, other):
return torch.where(condition, input, other)
op_bench.generate_pt_test(configs_short + configs_long, WhereBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| WhereBenchmark |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/confirm.py | {
"start": 956,
"end": 4077
} | class ____(Enum):
YES = "y"
NO = "n"
QUIT = "q"
def user_confirm(
message: str,
timeout: float | None = None,
default_answer: Answer | None = Answer.NO,
quit_allowed: bool = True,
) -> Answer:
"""Ask the user for confirmation.
:param message: message to display to the user (should end with the question mark)
:param timeout: time given user to answer
:param default_answer: default value returned on timeout. If no default - is set, the timeout is ignored.
:param quit_allowed: whether quit answer is allowed
"""
from inputimeout import TimeoutOccurred, inputimeout
allowed_answers = "y/n/q" if quit_allowed else "y/n"
while True:
try:
force = get_forced_answer() or os.environ.get("ANSWER")
if force:
user_status = force
print(f"Forced answer for '{message}': {force}")
else:
if default_answer:
# Capitalise default answer
allowed_answers = allowed_answers.replace(
default_answer.value, default_answer.value.upper()
)
timeout_answer = default_answer.value
else:
timeout = None
timeout_answer = ""
message_prompt = f"\n{message} \nPress {allowed_answers}"
if default_answer and timeout:
message_prompt += (
f". Auto-select {timeout_answer} in {timeout} seconds "
f"(add `--answer {default_answer.value}` to avoid delay next time)"
)
message_prompt += ": "
user_status = inputimeout(
prompt=message_prompt,
timeout=timeout,
)
if user_status == "":
if default_answer:
return default_answer
continue
if user_status.upper() in ["Y", "YES"]:
return Answer.YES
if user_status.upper() in ["N", "NO"]:
return Answer.NO
if user_status.upper() in ["Q", "QUIT"] and quit_allowed:
return Answer.QUIT
print(f"Wrong answer given {user_status}. Should be one of {allowed_answers}. Try again.")
except TimeoutOccurred:
if default_answer:
return default_answer
# timeout should only occur when default_answer is set so this should never happened
except KeyboardInterrupt:
if quit_allowed:
return Answer.QUIT
sys.exit(1)
def confirm_action(
message: str,
timeout: float | None = None,
default_answer: Answer | None = Answer.NO,
quit_allowed: bool = True,
abort: bool = False,
) -> bool:
answer = user_confirm(message, timeout, default_answer, quit_allowed)
if answer == Answer.YES:
return True
if abort:
sys.exit(1)
elif answer == Answer.QUIT:
sys.exit(1)
return False
| Answer |
python | weaviate__weaviate-python-client | weaviate/backup/sync.py | {
"start": 163,
"end": 220
} | class ____(_BackupExecutor[ConnectionSync]):
pass
| _Backup |
python | django__django | django/template/response.py | {
"start": 5098,
"end": 5584
} | class ____(SimpleTemplateResponse):
rendering_attrs = [*SimpleTemplateResponse.rendering_attrs, "_request"]
def __init__(
self,
request,
template,
context=None,
content_type=None,
status=None,
charset=None,
using=None,
headers=None,
):
super().__init__(
template, context, content_type, status, charset, using, headers=headers
)
self._request = request
| TemplateResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 651,
"end": 1282
} | class ____(ClassB, ClassC):
def __init__(self):
super().method2()
super().method3()
# This should generate an error
super().non_method1()
def method(self):
def inner():
super().method1()
super(ClassD)
# This should generate an error
super(ClassD).non_method2()
super(ClassB, ClassD).method1()
# This should generate an error because Foo2
# is not a subclass of Foo1.
super(ClassB, ClassC).method1()
v1 = ClassD()
super(ClassB, v1).method1()
v2 = ClassC()
# This should generate an error because Foo2
# is not a subclass of Foo1.
super(ClassB, v2).method1()
| ClassD |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 214627,
"end": 217206
} | class ____(contextlib.AbstractContextManager[Optional[str]]): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
__slots__ = ["_name", "_name_scope"]
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None) -> None:
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self) -> Optional[str]:
return self._name_scope.__enter__()
def __exit__(self, *exc_info) -> Optional[bool]:
return self._name_scope.__exit__(*exc_info)
@tf_export("get_current_name_scope", v1=[])
def get_current_name_scope() -> str:
"""Returns current full name scope specified by `tf.name_scope(...)`s.
For example,
```python
with tf.name_scope("outer"):
tf.get_current_name_scope() # "outer"
with tf.name_scope("inner"):
tf.get_current_name_scope() # "outer/inner"
```
In other words, `tf.get_current_name_scope()` returns the op name prefix that
will be prepended to, if an op is created at that place.
Note that `@tf.function` resets the name scope stack as shown below.
```
with tf.name_scope("outer"):
@tf.function
def foo(x):
with tf.name_scope("inner"):
return tf.add(x * x) # Op name is "inner/Add", not "outer/inner/Add"
```
"""
ctx = context.context()
if ctx.executing_eagerly():
return ctx.scope_name.rstrip("/")
else:
return get_default_graph().get_name_scope()
@tf_export("name_scope", v1=[])
| name_scope_v1 |
python | numpy__numpy | numpy/lib/tests/test__datasource.py | {
"start": 8530,
"end": 9675
} | class ____:
def test_ValidFile(self, tmp_path):
# Create local temp file
repos = datasource.Repository(valid_baseurl(), tmp_path)
tmpfile = valid_textfile(tmp_path)
assert_(repos.exists(tmpfile))
def test_InvalidFile(self, tmp_path):
repos = datasource.Repository(valid_baseurl(), tmp_path)
tmpfile = invalid_textfile(tmp_path)
assert_equal(repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self, tmp_path):
repos = datasource.Repository(valid_baseurl(), tmp_path)
assert_(repos.exists(valid_httpurl()))
def test_CachedHTTPFile(self, tmp_path):
localfile = valid_httpurl()
# Create a locally cached temp file with an URL based
# directory structure. This is similar to what Repository.open
# would do.
repos = datasource.Repository(valid_baseurl(), tmp_path)
_, netloc, _, _, _, _ = urlparse(localfile)
local_path = os.path.join(repos._destpath, netloc)
os.mkdir(local_path, 0o0700)
tmpfile = valid_textfile(local_path)
assert_(repos.exists(tmpfile))
| TestRepositoryExists |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 16888,
"end": 17225
} | class ____(CharStreamException):
def __init__(self, *args):
if args and isinstance(args[0], Exception):
io = args[0]
CharStreamException.__init__(self, str(io))
self.io = io
else:
CharStreamException.__init__(self, *args)
self.io = self
| CharStreamIOException |
python | huggingface__transformers | src/transformers/models/sam2/modeling_sam2.py | {
"start": 23826,
"end": 26633
} | class ____(Sam2PreTrainedModel):
config_class = Sam2HieraDetConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Sam2MultiScaleBlock,
"attentions": Sam2MultiScaleAttention,
}
def __init__(self, config: Sam2HieraDetConfig):
super().__init__(config)
self.patch_embed = Sam2PatchEmbeddings(config)
# Windowed positional embedding (https://huggingface.co/papers/2311.05613)
self.pos_embed = nn.Parameter(
torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size)
)
self.pos_embed_window = nn.Parameter(
torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0])
)
self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist()
self.blocks = nn.ModuleList()
total_block_idx = 0
for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage):
for block_idx in range(blocks_per_stage):
block = Sam2MultiScaleBlock(
config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx
)
self.blocks.append(block)
total_block_idx += 1
def get_input_embeddings(self):
return self.patch_embed
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
h, w = hw
window_embed = self.pos_embed_window
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
pos_embed = pos_embed.permute(0, 2, 3, 1)
return pos_embed
@check_model_inputs()
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Sam2HieraDetModelOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3])
intermediate_hidden_states = ()
for i, block_module in enumerate(self.blocks):
hidden_states = block_module(hidden_states, **kwargs)
if i in self.stage_ends:
intermediate_hidden_states = intermediate_hidden_states + (hidden_states,)
return Sam2HieraDetModelOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate_hidden_states,
)
@auto_docstring(
custom_intro="""
The vision model from Sam without any head or projection on top.
"""
)
| Sam2HieraDetModel |
python | tensorflow__tensorflow | tensorflow/python/autograph/impl/conversion_test.py | {
"start": 1238,
"end": 4119
} | class ____(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_allowlisted(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_allowlisted(test_fn))
self.assertTrue(conversion.is_allowlisted(utils))
self.assertTrue(conversion.is_allowlisted(constant_op.constant))
def test_is_allowlisted_tensorflow_like(self):
tf_like = types.ModuleType('tensorflow_foo')
def test_fn():
pass
tf_like.test_fn = test_fn
test_fn.__module__ = tf_like
self.assertFalse(conversion.is_allowlisted(tf_like.test_fn))
def test_is_allowlisted_callable_allowlisted_call(self):
allowlisted_mod = types.ModuleType('test_allowlisted_call')
sys.modules['test_allowlisted_call'] = allowlisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_allowlisted_call'),) +
config.CONVERSION_RULES)
class TestClass:
def __call__(self):
pass
def allowlisted_method(self):
pass
TestClass.__module__ = 'test_allowlisted_call'
TestClass.__call__.__module__ = 'test_allowlisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_allowlisted(TestClass.__call__))
self.assertTrue(conversion.is_allowlisted(tc))
self.assertTrue(conversion.is_allowlisted(tc.__call__))
self.assertTrue(conversion.is_allowlisted(tc.allowlisted_method))
self.assertFalse(conversion.is_allowlisted(Subclass))
self.assertFalse(conversion.is_allowlisted(tc.converted_method))
def test_is_allowlisted_tfmethodwrapper(self):
allowlisted_mod = types.ModuleType('test_allowlisted_call')
sys.modules['test_allowlisted_call'] = allowlisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_allowlisted_call'),) +
config.CONVERSION_RULES)
class TestClass:
def member_function(self):
pass
TestClass.__module__ = 'test_allowlisted_call'
test_obj = TestClass()
def test_fn(self):
del self
bound_method = types.MethodType(
test_fn,
function.TfMethodTarget(
weakref.ref(test_obj), test_obj.member_function))
self.assertTrue(conversion.is_allowlisted(bound_method))
def test_is_allowlisted_pybind(self):
test_object = pybind_for_testing.TestClassDef()
with test.mock.patch.object(config, 'CONVERSION_RULES', ()):
# TODO(mdan): This should return True for functions and methods.
# Note: currently, native bindings are allowlisted by a separate check.
self.assertFalse(conversion.is_allowlisted(test_object.method))
if __name__ == '__main__':
test.main()
| ConversionTest |
python | pypa__pip | src/pip/_vendor/idna/core.py | {
"start": 480,
"end": 598
} | class ____(IDNAError):
"""Exception when a disallowed or unallocated codepoint is used"""
pass
| InvalidCodepoint |
python | py-pdf__pypdf | pypdf/_encryption.py | {
"start": 32821,
"end": 48697
} | class ____:
"""
Collects and manages parameters for PDF document encryption and decryption.
Args:
V: A code specifying the algorithm to be used in encrypting and
decrypting the document.
R: The revision of the standard security handler.
Length: The length of the encryption key in bits.
P: A set of flags specifying which operations shall be permitted
when the document is opened with user access
entry: The encryption dictionary object.
EncryptMetadata: Whether to encrypt metadata in the document.
first_id_entry: The first 16 bytes of the file's original ID.
StmF: The name of the crypt filter that shall be used by default
when decrypting streams.
StrF: The name of the crypt filter that shall be used when decrypting
all strings in the document.
EFF: The name of the crypt filter that shall be used when
encrypting embedded file streams that do not have their own
crypt filter specifier.
values: Additional encryption parameters.
"""
def __init__(
self,
V: int,
R: int,
Length: int,
P: int,
entry: DictionaryObject,
EncryptMetadata: bool,
first_id_entry: bytes,
StmF: str,
StrF: str,
EFF: str,
values: Optional[EncryptionValues],
) -> None:
# §7.6.2, entries common to all encryption dictionaries
# use same name as keys of encryption dictionaries entries
self.V = V
self.R = R
self.Length = Length # key_size
self.P = (P + 0x100000000) % 0x100000000 # maybe P < 0
self.EncryptMetadata = EncryptMetadata
self.id1_entry = first_id_entry
self.StmF = StmF
self.StrF = StrF
self.EFF = EFF
self.values: EncryptionValues = values if values else EncryptionValues()
self._password_type = PasswordType.NOT_DECRYPTED
self._key: Optional[bytes] = None
def is_decrypted(self) -> bool:
return self._password_type != PasswordType.NOT_DECRYPTED
def encrypt_object(self, obj: PdfObject, idnum: int, generation: int) -> PdfObject:
# skip calculate key
if not self._is_encryption_object(obj):
return obj
cf = self._make_crypt_filter(idnum, generation)
return cf.encrypt_object(obj)
def decrypt_object(self, obj: PdfObject, idnum: int, generation: int) -> PdfObject:
# skip calculate key
if not self._is_encryption_object(obj):
return obj
cf = self._make_crypt_filter(idnum, generation)
return cf.decrypt_object(obj)
@staticmethod
def _is_encryption_object(obj: PdfObject) -> bool:
return isinstance(
obj,
(
ByteStringObject,
TextStringObject,
StreamObject,
ArrayObject,
DictionaryObject,
),
)
def _make_crypt_filter(self, idnum: int, generation: int) -> CryptFilter:
"""
Algorithm 1: Encryption of data using the RC4 or AES algorithms.
a) Obtain the object number and generation number from the object
identifier of the string or stream to be encrypted
(see 7.3.10, "Indirect Objects"). If the string is a direct object,
use the identifier of the indirect object containing it.
b) For all strings and streams without crypt filter specifier; treating
the object number and generation number as binary integers, extend
the original n-byte encryption key to n + 5 bytes by appending the
low-order 3 bytes of the object number and the low-order 2 bytes of
the generation number in that order, low-order byte first.
(n is 5 unless the value of V in the encryption dictionary is greater
than 1, in which case n is the value of Length divided by 8.)
If using the AES algorithm, extend the encryption key an additional
4 bytes by adding the value “sAlT”, which corresponds to the
hexadecimal values 0x73, 0x41, 0x6C, 0x54. (This addition is done for
backward compatibility and is not intended to provide additional
security.)
c) Initialize the MD5 hash function and pass the result of step (b) as
input to this function.
d) Use the first (n + 5) bytes, up to a maximum of 16, of the output
from the MD5 hash as the key for the RC4 or AES symmetric key
algorithms, along with the string or stream data to be encrypted.
If using the AES algorithm, the Cipher Block Chaining (CBC) mode,
which requires an initialization vector, is used. The block size
parameter is set to 16 bytes, and the initialization vector is a
16-byte random number that is stored as the first 16 bytes of the
encrypted stream or string.
Algorithm 3.1a Encryption of data using the AES algorithm
1. Use the 32-byte file encryption key for the AES-256 symmetric key
algorithm, along with the string or stream data to be encrypted.
Use the AES algorithm in Cipher Block Chaining (CBC) mode, which
requires an initialization vector. The block size parameter is set to
16 bytes, and the initialization vector is a 16-byte random number
that is stored as the first 16 bytes of the encrypted stream or string.
The output is the encrypted data to be stored in the PDF file.
"""
pack1 = struct.pack("<i", idnum)[:3]
pack2 = struct.pack("<i", generation)[:2]
assert self._key
key = self._key
n = 5 if self.V == 1 else self.Length // 8
key_data = key[:n] + pack1 + pack2
key_hash = hashlib.md5(key_data)
rc4_key = key_hash.digest()[: min(n + 5, 16)]
# for AES-128
key_hash.update(b"sAlT")
aes128_key = key_hash.digest()[: min(n + 5, 16)]
# for AES-256
aes256_key = key
stm_crypt = self._get_crypt(self.StmF, rc4_key, aes128_key, aes256_key)
str_crypt = self._get_crypt(self.StrF, rc4_key, aes128_key, aes256_key)
ef_crypt = self._get_crypt(self.EFF, rc4_key, aes128_key, aes256_key)
return CryptFilter(stm_crypt, str_crypt, ef_crypt)
@staticmethod
def _get_crypt(
method: str, rc4_key: bytes, aes128_key: bytes, aes256_key: bytes
) -> CryptBase:
if method == "/AESV2":
return CryptAES(aes128_key)
if method == "/AESV3":
return CryptAES(aes256_key)
if method == "/Identity":
return CryptIdentity()
return CryptRC4(rc4_key)
@staticmethod
def _encode_password(password: Union[bytes, str]) -> bytes:
if isinstance(password, str):
try:
pwd = password.encode("latin-1")
except Exception:
pwd = password.encode("utf-8")
else:
pwd = password
return pwd
def verify(self, password: Union[bytes, str]) -> PasswordType:
pwd = self._encode_password(password)
key, rc = self.verify_v4(pwd) if self.V <= 4 else self.verify_v5(pwd)
if rc != PasswordType.NOT_DECRYPTED:
self._password_type = rc
self._key = key
return rc
def verify_v4(self, password: bytes) -> tuple[bytes, PasswordType]:
# verify owner password first
key = AlgV4.verify_owner_password(
password,
self.R,
self.Length,
self.values.O,
self.values.U,
self.P,
self.id1_entry,
self.EncryptMetadata,
)
if key:
return key, PasswordType.OWNER_PASSWORD
key = AlgV4.verify_user_password(
password,
self.R,
self.Length,
self.values.O,
self.values.U,
self.P,
self.id1_entry,
self.EncryptMetadata,
)
if key:
return key, PasswordType.USER_PASSWORD
return b"", PasswordType.NOT_DECRYPTED
def verify_v5(self, password: bytes) -> tuple[bytes, PasswordType]:
# TODO: use SASLprep process
# verify owner password first
key = AlgV5.verify_owner_password(
self.R, password, self.values.O, self.values.OE, self.values.U
)
rc = PasswordType.OWNER_PASSWORD
if not key:
key = AlgV5.verify_user_password(
self.R, password, self.values.U, self.values.UE
)
rc = PasswordType.USER_PASSWORD
if not key:
return b"", PasswordType.NOT_DECRYPTED
# verify Perms
if not AlgV5.verify_perms(key, self.values.Perms, self.P, self.EncryptMetadata):
logger_warning("ignore '/Perms' verify failed", __name__)
return key, rc
def write_entry(
self, user_password: str, owner_password: Optional[str]
) -> DictionaryObject:
user_pwd = self._encode_password(user_password)
owner_pwd = self._encode_password(owner_password) if owner_password else None
if owner_pwd is None:
owner_pwd = user_pwd
if self.V <= 4:
self.compute_values_v4(user_pwd, owner_pwd)
else:
self._key = secrets.token_bytes(self.Length // 8)
values = AlgV5.generate_values(
self.R, user_pwd, owner_pwd, self._key, self.P, self.EncryptMetadata
)
self.values.O = values["/O"]
self.values.U = values["/U"]
self.values.OE = values["/OE"]
self.values.UE = values["/UE"]
self.values.Perms = values["/Perms"]
dict_obj = DictionaryObject()
dict_obj[NameObject("/V")] = NumberObject(self.V)
dict_obj[NameObject("/R")] = NumberObject(self.R)
dict_obj[NameObject("/Length")] = NumberObject(self.Length)
dict_obj[NameObject("/P")] = NumberObject(self.P)
dict_obj[NameObject("/Filter")] = NameObject("/Standard")
# ignore /EncryptMetadata
dict_obj[NameObject("/O")] = ByteStringObject(self.values.O)
dict_obj[NameObject("/U")] = ByteStringObject(self.values.U)
if self.V >= 4:
# TODO: allow different method
std_cf = DictionaryObject()
std_cf[NameObject("/AuthEvent")] = NameObject("/DocOpen")
std_cf[NameObject("/CFM")] = NameObject(self.StmF)
std_cf[NameObject("/Length")] = NumberObject(self.Length // 8)
cf = DictionaryObject()
cf[NameObject("/StdCF")] = std_cf
dict_obj[NameObject("/CF")] = cf
dict_obj[NameObject("/StmF")] = NameObject("/StdCF")
dict_obj[NameObject("/StrF")] = NameObject("/StdCF")
# ignore EFF
# dict_obj[NameObject("/EFF")] = NameObject("/StdCF")
if self.V >= 5:
dict_obj[NameObject("/OE")] = ByteStringObject(self.values.OE)
dict_obj[NameObject("/UE")] = ByteStringObject(self.values.UE)
dict_obj[NameObject("/Perms")] = ByteStringObject(self.values.Perms)
return dict_obj
def compute_values_v4(self, user_password: bytes, owner_password: bytes) -> None:
rc4_key = AlgV4.compute_O_value_key(owner_password, self.R, self.Length)
o_value = AlgV4.compute_O_value(rc4_key, user_password, self.R)
key = AlgV4.compute_key(
user_password,
self.R,
self.Length,
o_value,
self.P,
self.id1_entry,
self.EncryptMetadata,
)
u_value = AlgV4.compute_U_value(key, self.R, self.id1_entry)
self._key = key
self.values.O = o_value
self.values.U = u_value
@staticmethod
def read(encryption_entry: DictionaryObject, first_id_entry: bytes) -> "Encryption":
if encryption_entry.get("/Filter") != "/Standard":
raise NotImplementedError(
"only Standard PDF encryption handler is available"
)
if "/SubFilter" in encryption_entry:
raise NotImplementedError("/SubFilter NOT supported")
stm_filter = "/V2"
str_filter = "/V2"
ef_filter = "/V2"
alg_ver = encryption_entry.get("/V", 0)
if alg_ver not in (1, 2, 3, 4, 5):
raise NotImplementedError(f"Encryption V={alg_ver} NOT supported")
if alg_ver >= 4:
filters = encryption_entry["/CF"]
stm_filter = encryption_entry.get("/StmF", "/Identity")
str_filter = encryption_entry.get("/StrF", "/Identity")
ef_filter = encryption_entry.get("/EFF", stm_filter)
if stm_filter != "/Identity":
stm_filter = filters[stm_filter]["/CFM"] # type: ignore
if str_filter != "/Identity":
str_filter = filters[str_filter]["/CFM"] # type: ignore
if ef_filter != "/Identity":
ef_filter = filters[ef_filter]["/CFM"] # type: ignore
allowed_methods = ("/Identity", "/V2", "/AESV2", "/AESV3")
if stm_filter not in allowed_methods:
raise NotImplementedError(f"StmF Method {stm_filter} NOT supported!")
if str_filter not in allowed_methods:
raise NotImplementedError(f"StrF Method {str_filter} NOT supported!")
if ef_filter not in allowed_methods:
raise NotImplementedError(f"EFF Method {ef_filter} NOT supported!")
alg_rev = cast(int, encryption_entry["/R"])
perm_flags = cast(int, encryption_entry["/P"])
key_bits = encryption_entry.get("/Length", 40)
encrypt_metadata = encryption_entry.get("/EncryptMetadata")
encrypt_metadata = (
encrypt_metadata.value if encrypt_metadata is not None else True
)
values = EncryptionValues()
values.O = cast(ByteStringObject, encryption_entry["/O"]).original_bytes
values.U = cast(ByteStringObject, encryption_entry["/U"]).original_bytes
values.OE = encryption_entry.get("/OE", ByteStringObject()).original_bytes
values.UE = encryption_entry.get("/UE", ByteStringObject()).original_bytes
values.Perms = encryption_entry.get("/Perms", ByteStringObject()).original_bytes
return Encryption(
V=alg_ver,
R=alg_rev,
Length=key_bits,
P=perm_flags,
EncryptMetadata=encrypt_metadata,
first_id_entry=first_id_entry,
values=values,
StrF=str_filter,
StmF=stm_filter,
EFF=ef_filter,
entry=encryption_entry, # Dummy entry for the moment; will get removed
)
@staticmethod
def make(
alg: EncryptAlgorithm, permissions: int, first_id_entry: bytes
) -> "Encryption":
alg_ver, alg_rev, key_bits = alg
stm_filter, str_filter, ef_filter = "/V2", "/V2", "/V2"
if alg == EncryptAlgorithm.AES_128:
stm_filter, str_filter, ef_filter = "/AESV2", "/AESV2", "/AESV2"
elif alg in (EncryptAlgorithm.AES_256_R5, EncryptAlgorithm.AES_256):
stm_filter, str_filter, ef_filter = "/AESV3", "/AESV3", "/AESV3"
return Encryption(
V=alg_ver,
R=alg_rev,
Length=key_bits,
P=permissions,
EncryptMetadata=True,
first_id_entry=first_id_entry,
values=None,
StrF=str_filter,
StmF=stm_filter,
EFF=ef_filter,
entry=DictionaryObject(), # Dummy entry for the moment; will get removed
)
| Encryption |
python | astropy__astropy | astropy/samp/utils.py | {
"start": 1677,
"end": 2518
} | class ____:
"""
A thread-safe pool of `xmlrpc.ServerProxy` objects.
"""
def __init__(self, size, proxy_class, *args, **keywords):
self._proxies = queue.Queue(size)
for _ in range(size):
self._proxies.put(proxy_class(*args, **keywords))
def __getattr__(self, name):
# magic method dispatcher
return _ServerProxyPoolMethod(self._proxies, name)
def shutdown(self):
"""Shut down the proxy pool by closing all active connections."""
while True:
try:
proxy = self._proxies.get_nowait()
except queue.Empty:
break
# An undocumented but apparently supported way to call methods on
# an ServerProxy that are not dispatched to the remote server
proxy("close")
| ServerProxyPool |
python | streamlit__streamlit | lib/tests/streamlit/streamlit_test.py | {
"start": 2236,
"end": 7675
} | class ____(unittest.TestCase):
"""Test Streamlit.__init__.py."""
def test_streamlit_version(self):
"""Test streamlit.__version__."""
assert __version__ == get_version()
def test_get_option(self):
"""Test streamlit.get_option."""
# This is set in lib/tests/conftest.py to False
assert not st.get_option("browser.gatherUsageStats")
def test_matplotlib_uses_agg(self):
"""Test that Streamlit uses the 'Agg' backend for matplotlib."""
ORIG_PLATFORM = sys.platform
for platform in ["darwin", "linux2"]:
sys.platform = platform
assert mpl.get_backend().lower() == "agg"
assert os.environ.get("MPLBACKEND").lower() == "agg"
# Force matplotlib to use a different backend
mpl.use("pdf", force=True)
assert mpl.get_backend().lower() == "pdf"
# Reset the backend to 'Agg'
mpl.use("agg", force=True)
assert mpl.get_backend().lower() == "agg"
sys.platform = ORIG_PLATFORM
def test_ensure_completeness_element_mocks(self):
"""Test that we have mocked all elements in the public API.
The full public API should be covered by:
- element_mocks.WIDGET_ELEMENTS
- element_mocks.NON_WIDGET_ELEMENTS
- element_mocks.CONTAINER_ELEMENTS
- NON_ELEMENT_COMMANDS
"""
api = {
k
for k, v in st.__dict__.items()
if not k.startswith("_") and not isinstance(v, type(st))
}
mocked_elements = {
element
for element, _ in WIDGET_ELEMENTS + NON_WIDGET_ELEMENTS + CONTAINER_ELEMENTS
}
mocked_elements.update(NON_ELEMENT_COMMANDS)
assert api == mocked_elements, (
"There are new public commands that might be needed to be added to element "
"mocks or NON_ELEMENT_COMMANDS. Please add it to the correct list of "
"mocked elements or NON_ELEMENT_COMMANDS."
)
def test_public_api(self):
"""Test that we don't accidentally remove (or add) symbols
to the public `st` API.
"""
api = {
k
for k, v in st.__dict__.items()
if not k.startswith("_") and not isinstance(v, type(st))
}
assert api == ELEMENT_COMMANDS.union(NON_ELEMENT_COMMANDS)
def test_pydoc(self):
"""Test that we can run pydoc on the streamlit package"""
cwd = os.getcwd()
try:
os.chdir(tempfile.mkdtemp())
# Run the script as a separate process to make sure that
# the currently loaded modules do not affect the test result.
output = subprocess.check_output(
[sys.executable, "-m", "pydoc", "streamlit"]
).decode()
assert "Help on package streamlit:" in output
finally:
os.chdir(cwd)
@pytest.mark.usefixtures("benchmark")
def test_cold_import_time(benchmark):
"""
Measure the import time of `streamlit` by spawning a new Python subprocess.
This simulates a “cold” import because each run starts a fresh
interpreter session. It includes Python startup overhead, so it
approximates how a user experiences an import in a newly launched
Python process.
"""
def do_cold_import():
# We invoke a separate Python process that just imports the package.
subprocess.check_call([sys.executable, "-c", "import streamlit"])
benchmark(do_cold_import)
def test_importtime_median_under_threshold():
"""
Measure the import time of Streamlit via the built-in `importtime`
in a fresh interpreter, compute the median import time,
and check if it's under a static threshold.
"""
# Define an acceptable threshold for import time (in microseconds).
# This value is also dependent a bit on the machine it's run on,
# so needs to be mainly adjusted to our CI runners.
# While its important to keep the import time low, you can
# modify this threshold if it's really needed to add some new features.
# But make sure that its justified and intended.
max_allowed_import_time_us = 700_000
import_times = []
for _ in range(25):
# Spawn a subprocess that imports `streamlit` with Python's importtime
# instrumentation
cmd = [sys.executable, "-X", "importtime", "-c", "import streamlit"]
p = subprocess.run(cmd, stderr=subprocess.PIPE, check=True)
# The last line of stderr has the total import time:
# import time: self [us] | cumulative [us] | streamlit
line = p.stderr.splitlines()[-1]
field = line.split(b"|")[-2].strip() # e.g. b"123456"
total_us = int(field) # convert to integer microseconds
import_times.append(total_us)
# Calculate the median import time across all runs
median_time_us = statistics.median(import_times)
# Check if the median is within the desired threshold
assert median_time_us <= max_allowed_import_time_us, (
f"Median import time {round(median_time_us)}us of streamlit exceeded the max "
f"allowed threshold {max_allowed_import_time_us}us (percentage: "
f"{round(median_time_us / max_allowed_import_time_us * 100)}%)."
"In case this is expected and justified, you can change the "
"threshold in the test."
)
| StreamlitTest |
python | zarr-developers__zarr-python | src/zarr/abc/codec.py | {
"start": 8766,
"end": 15687
} | class ____:
"""Base class for implementing CodecPipeline.
A CodecPipeline implements the read and write paths for chunk data.
On the read path, it is responsible for fetching chunks from a store (via ByteGetter),
decoding them and assembling an output array. On the write path, it encodes the chunks
and writes them to a store (via ByteSetter)."""
@abstractmethod
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
"""Fills in codec configuration parameters that can be automatically
inferred from the array metadata.
Parameters
----------
array_spec : ArraySpec
Returns
-------
Self
"""
...
@classmethod
@abstractmethod
def from_codecs(cls, codecs: Iterable[Codec]) -> Self:
"""Creates a codec pipeline from an iterable of codecs.
Parameters
----------
codecs : Iterable[Codec]
Returns
-------
Self
"""
...
@classmethod
def from_array_metadata_and_store(cls, array_metadata: ArrayMetadata, store: Store) -> Self:
"""Creates a codec pipeline from array metadata and a store path.
Raises NotImplementedError by default, indicating the CodecPipeline must be created with from_codecs instead.
Parameters
----------
array_metadata : ArrayMetadata
store : Store
Returns
-------
Self
"""
raise NotImplementedError(
f"'{type(cls).__name__}' does not implement CodecPipeline.from_array_metadata_and_store."
)
@property
@abstractmethod
def supports_partial_decode(self) -> bool: ...
@property
@abstractmethod
def supports_partial_encode(self) -> bool: ...
@abstractmethod
def validate(
self,
*,
shape: tuple[int, ...],
dtype: ZDType[TBaseDType, TBaseScalar],
chunk_grid: ChunkGrid,
) -> None:
"""Validates that all codec configurations are compatible with the array metadata.
Raises errors when a codec configuration is not compatible.
Parameters
----------
shape : tuple[int, ...]
The array shape
dtype : np.dtype[Any]
The array data type
chunk_grid : ChunkGrid
The array chunk grid
"""
...
@abstractmethod
def compute_encoded_size(self, byte_length: int, array_spec: ArraySpec) -> int:
"""Given an input byte length, this method returns the output byte length.
Raises a NotImplementedError for codecs with variable-sized outputs (e.g. compressors).
Parameters
----------
byte_length : int
array_spec : ArraySpec
Returns
-------
int
"""
...
@abstractmethod
async def decode(
self,
chunk_bytes_and_specs: Iterable[tuple[Buffer | None, ArraySpec]],
) -> Iterable[NDBuffer | None]:
"""Decodes a batch of chunks.
Chunks can be None in which case they are ignored by the codec.
Parameters
----------
chunk_bytes_and_specs : Iterable[tuple[Buffer | None, ArraySpec]]
Ordered set of encoded chunks with their accompanying chunk spec.
Returns
-------
Iterable[NDBuffer | None]
"""
...
@abstractmethod
async def encode(
self,
chunk_arrays_and_specs: Iterable[tuple[NDBuffer | None, ArraySpec]],
) -> Iterable[Buffer | None]:
"""Encodes a batch of chunks.
Chunks can be None in which case they are ignored by the codec.
Parameters
----------
chunk_arrays_and_specs : Iterable[tuple[NDBuffer | None, ArraySpec]]
Ordered set of to-be-encoded chunks with their accompanying chunk spec.
Returns
-------
Iterable[Buffer | None]
"""
...
@abstractmethod
async def read(
self,
batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple, bool]],
out: NDBuffer,
drop_axes: tuple[int, ...] = (),
) -> None:
"""Reads chunk data from the store, decodes it and writes it into an output array.
Partial decoding may be utilized if the codecs and stores support it.
Parameters
----------
batch_info : Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple]]
Ordered set of information about the chunks.
The first slice selection determines which parts of the chunk will be fetched.
The second slice selection determines where in the output array the chunk data will be written.
The ByteGetter is used to fetch the necessary bytes.
The chunk spec contains information about the construction of an array from the bytes.
If the Store returns ``None`` for a chunk, then the chunk was not
written and the implementation must set the values of that chunk (or
``out``) to the fill value for the array.
out : NDBuffer
"""
...
@abstractmethod
async def write(
self,
batch_info: Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple, bool]],
value: NDBuffer,
drop_axes: tuple[int, ...] = (),
) -> None:
"""Encodes chunk data and writes it to the store.
Merges with existing chunk data by reading first, if necessary.
Partial encoding may be utilized if the codecs and stores support it.
Parameters
----------
batch_info : Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple]]
Ordered set of information about the chunks.
The first slice selection determines which parts of the chunk will be encoded.
The second slice selection determines where in the value array the chunk data is located.
The ByteSetter is used to fetch and write the necessary bytes.
The chunk spec contains information about the chunk.
value : NDBuffer
"""
...
async def _batching_helper(
func: Callable[[CodecInput, ArraySpec], Awaitable[CodecOutput | None]],
batch_info: Iterable[tuple[CodecInput | None, ArraySpec]],
) -> list[CodecOutput | None]:
return await concurrent_map(
list(batch_info),
_noop_for_none(func),
config.get("async.concurrency"),
)
def _noop_for_none(
func: Callable[[CodecInput, ArraySpec], Awaitable[CodecOutput | None]],
) -> Callable[[CodecInput | None, ArraySpec], Awaitable[CodecOutput | None]]:
async def wrap(chunk: CodecInput | None, chunk_spec: ArraySpec) -> CodecOutput | None:
if chunk is None:
return None
return await func(chunk, chunk_spec)
return wrap
| CodecPipeline |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 84221,
"end": 84478
} | class ____(CIntType):
is_enum = 1
def sign_and_name(self):
return 'int'
def specialization_name(self):
# ensure that the to/from Python functions don't conflict with
# "int"
return '__pyx_anon_enum'
| CAnonEnumType |
python | kubernetes-client__python | kubernetes/client/models/v1_container.py | {
"start": 383,
"end": 35110
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'readiness_probe': 'V1Probe',
'resize_policy': 'list[V1ContainerResizePolicy]',
'resources': 'V1ResourceRequirements',
'restart_policy': 'str',
'restart_policy_rules': 'list[V1ContainerRestartRule]',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'readiness_probe': 'readinessProbe',
'resize_policy': 'resizePolicy',
'resources': 'resources',
'restart_policy': 'restartPolicy',
'restart_policy_rules': 'restartPolicyRules',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resize_policy=None, resources=None, restart_policy=None, restart_policy_rules=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1Container - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._readiness_probe = None
self._resize_policy = None
self._resources = None
self._restart_policy = None
self._restart_policy_rules = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
self.name = name
if ports is not None:
self.ports = ports
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resize_policy is not None:
self.resize_policy = resize_policy
if resources is not None:
self.resources = resources
if restart_policy is not None:
self.restart_policy = restart_policy
if restart_policy_rules is not None:
self.restart_policy_rules = restart_policy_rules
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1Container. # noqa: E501
Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1Container. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1Container.
Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1Container. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1Container. # noqa: E501
Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1Container. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1Container.
Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1Container. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1Container. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1Container. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1Container.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1Container. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1Container. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1Container. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1Container.
List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1Container. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1Container. # noqa: E501
Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:return: The image of this V1Container. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1Container.
Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:param image: The image of this V1Container. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1Container. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:return: The image_pull_policy of this V1Container. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1Container.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1Container. # noqa: E501
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1Container. # noqa: E501
:return: The lifecycle of this V1Container. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1Container.
:param lifecycle: The lifecycle of this V1Container. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1Container. # noqa: E501
:return: The liveness_probe of this V1Container. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1Container.
:param liveness_probe: The liveness_probe of this V1Container. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1Container. # noqa: E501
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:return: The name of this V1Container. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Container.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:param name: The name of this V1Container. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def ports(self):
"""Gets the ports of this V1Container. # noqa: E501
List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. # noqa: E501
:return: The ports of this V1Container. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1Container.
List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. # noqa: E501
:param ports: The ports of this V1Container. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1Container. # noqa: E501
:return: The readiness_probe of this V1Container. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1Container.
:param readiness_probe: The readiness_probe of this V1Container. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resize_policy(self):
"""Gets the resize_policy of this V1Container. # noqa: E501
Resources resize policy for the container. # noqa: E501
:return: The resize_policy of this V1Container. # noqa: E501
:rtype: list[V1ContainerResizePolicy]
"""
return self._resize_policy
@resize_policy.setter
def resize_policy(self, resize_policy):
"""Sets the resize_policy of this V1Container.
Resources resize policy for the container. # noqa: E501
:param resize_policy: The resize_policy of this V1Container. # noqa: E501
:type: list[V1ContainerResizePolicy]
"""
self._resize_policy = resize_policy
@property
def resources(self):
"""Gets the resources of this V1Container. # noqa: E501
:return: The resources of this V1Container. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1Container.
:param resources: The resources of this V1Container. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def restart_policy(self):
"""Gets the restart_policy of this V1Container. # noqa: E501
RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. # noqa: E501
:return: The restart_policy of this V1Container. # noqa: E501
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""Sets the restart_policy of this V1Container.
RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. # noqa: E501
:param restart_policy: The restart_policy of this V1Container. # noqa: E501
:type: str
"""
self._restart_policy = restart_policy
@property
def restart_policy_rules(self):
"""Gets the restart_policy_rules of this V1Container. # noqa: E501
Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy. # noqa: E501
:return: The restart_policy_rules of this V1Container. # noqa: E501
:rtype: list[V1ContainerRestartRule]
"""
return self._restart_policy_rules
@restart_policy_rules.setter
def restart_policy_rules(self, restart_policy_rules):
"""Sets the restart_policy_rules of this V1Container.
Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy. # noqa: E501
:param restart_policy_rules: The restart_policy_rules of this V1Container. # noqa: E501
:type: list[V1ContainerRestartRule]
"""
self._restart_policy_rules = restart_policy_rules
@property
def security_context(self):
"""Gets the security_context of this V1Container. # noqa: E501
:return: The security_context of this V1Container. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1Container.
:param security_context: The security_context of this V1Container. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1Container. # noqa: E501
:return: The startup_probe of this V1Container. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1Container.
:param startup_probe: The startup_probe of this V1Container. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1Container. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1Container. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1Container.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1Container. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1Container. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1Container. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1Container.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1Container. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1Container. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1Container. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1Container.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1Container. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1Container. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:return: The termination_message_policy of this V1Container. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1Container.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1Container. # noqa: E501
:type: str
"""
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1Container. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1Container. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1Container.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1Container. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1Container. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1Container. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1Container.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1Container. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1Container. # noqa: E501
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1Container. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1Container.
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1Container. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1Container. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1Container. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1Container.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1Container. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Container):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Container):
return True
return self.to_dict() != other.to_dict()
| V1Container |
python | huggingface__transformers | src/transformers/models/lfm2_vl/modeling_lfm2_vl.py | {
"start": 1680,
"end": 3273
} | class ____(nn.Module):
def __init__(self, config: Lfm2VlConfig):
super().__init__()
in_channels = config.vision_config.hidden_size * (config.downsample_factor**2)
self.factor = config.downsample_factor
self.layer_norm = nn.LayerNorm(in_channels)
self.linear_1 = nn.Linear(
in_channels,
config.projector_hidden_size,
bias=config.projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.projector_hidden_size,
config.text_config.hidden_size,
bias=config.projector_bias,
)
def forward(self, image_features: torch.Tensor):
image_features = self.pixel_unshuffle(image_features)
image_features = self.layer_norm(image_features)
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_unshuffle(self, hidden_states: torch.Tensor):
batch_size, width, height, channels = hidden_states.size()
hidden_states = hidden_states.reshape(batch_size, width, height // self.factor, channels * self.factor)
hidden_states = hidden_states.permute(0, 2, 1, 3)
hidden_states = hidden_states.reshape(
batch_size, height // self.factor, width // self.factor, channels * self.factor**2
)
hidden_states = hidden_states.permute(0, 2, 1, 3)
return hidden_states
@auto_docstring
| Lfm2VlMultiModalProjector |
python | fluentpython__example-code | attic/concurrency/flags/count_colors.py | {
"start": 16,
"end": 506
} | class ____:
def __init__(self, master):
canvas = tkinter.Canvas(master)
canvas.image = tkinter.PhotoImage(file = 'img/br.gif')
print(vars(canvas.image))
canvas.create_image(0,0, image=canvas.image, anchor=tkinter.NW)
canvas.bind('<Button-2>', self.right_click)
canvas.grid(row=0, column=0)
def right_click(self, event):
print(vars(event))
raise SystemExit()
root = tkinter.Tk()
test = Test(root)
root.mainloop()
| Test |
python | Textualize__textual | src/textual/widgets/_selection_list.py | {
"start": 1209,
"end": 2501
} | class ____(Generic[SelectionType], Option):
"""A selection for a [`SelectionList`][textual.widgets.SelectionList]."""
def __init__(
self,
prompt: ContentText,
value: SelectionType,
initial_state: bool = False,
id: str | None = None,
disabled: bool = False,
):
"""Initialise the selection.
Args:
prompt: The prompt for the selection.
value: The value for the selection.
initial_state: The initial selected state of the selection.
id: The optional ID for the selection.
disabled: The initial enabled/disabled state. Enabled by default.
"""
selection_prompt = Content.from_text(prompt)
super().__init__(selection_prompt.split()[0], id, disabled)
self._value: SelectionType = value
"""The value associated with the selection."""
self._initial_state: bool = initial_state
"""The initial selected state for the selection."""
@property
def value(self) -> SelectionType:
"""The value for this selection."""
return self._value
@property
def initial_state(self) -> bool:
"""The initial selected state for the selection."""
return self._initial_state
| Selection |
python | wandb__wandb | wandb/sdk/artifacts/_generated/registry_versions.py | {
"start": 961,
"end": 1373
} | class ____(GQLResult):
node: Optional[ArtifactMembershipFragment]
RegistryVersions.model_rebuild()
RegistryVersionsOrganization.model_rebuild()
RegistryVersionsOrganizationOrgEntity.model_rebuild()
RegistryVersionsOrganizationOrgEntityArtifactMemberships.model_rebuild()
RegistryVersionsOrganizationOrgEntityArtifactMembershipsEdges.model_rebuild()
| RegistryVersionsOrganizationOrgEntityArtifactMembershipsEdges |
python | doocs__leetcode | solution/0000-0099/0032.Longest Valid Parentheses/Solution2.py | {
"start": 0,
"end": 402
} | class ____:
def longestValidParentheses(self, s: str) -> int:
stack = [-1]
ans = 0
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
else:
stack.pop()
if not stack:
stack.append(i)
else:
ans = max(ans, i - stack[-1])
return ans
| Solution |
python | joblib__joblib | joblib/test/test_numpy_pickle.py | {
"start": 37666,
"end": 42130
} | class ____(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b"prefix")
def test_register_compressor_already_registered():
# Test registration of existing compressor files.
compressor_name = "test-name"
# register a test compressor
register_compressor(compressor_name, AnotherZlibCompressorWrapper())
with raises(ValueError) as excinfo:
register_compressor(compressor_name, StandardLibGzipCompressorWrapper())
excinfo.match("Compressor '{}' already registered.".format(compressor_name))
register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), force=True)
assert compressor_name in _COMPRESSORS
assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile
# Remove this dummy compressor file from extra compressors because other
# tests might fail because of this
_COMPRESSORS.pop(compressor_name)
@with_lz4
def test_lz4_compression(tmpdir):
# Check that lz4 can be used when dependency is available.
import lz4.frame
compressor = "lz4"
assert compressor in _COMPRESSORS
assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile
fname = tmpdir.join("test.pkl").strpath
data = "test data"
numpy_pickle.dump(data, fname, compress=compressor)
with open(fname, "rb") as f:
assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
assert numpy_pickle.load(fname) == data
# Test that LZ4 is applied based on file extension
numpy_pickle.dump(data, fname + ".lz4")
with open(fname, "rb") as f:
assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
assert numpy_pickle.load(fname) == data
@without_lz4
def test_lz4_compression_without_lz4(tmpdir):
# Check that lz4 cannot be used when dependency is not available.
fname = tmpdir.join("test.nolz4").strpath
data = "test data"
msg = LZ4_NOT_INSTALLED_ERROR
with raises(ValueError) as excinfo:
numpy_pickle.dump(data, fname, compress="lz4")
excinfo.match(msg)
with raises(ValueError) as excinfo:
numpy_pickle.dump(data, fname + ".lz4")
excinfo.match(msg)
protocols = [pickle.DEFAULT_PROTOCOL]
if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL:
protocols.append(pickle.HIGHEST_PROTOCOL)
@with_numpy
@parametrize("protocol", protocols)
def test_memmap_alignment_padding(tmpdir, protocol):
# Test that memmaped arrays returned by numpy.load are correctly aligned
fname = tmpdir.join("test.mmap").strpath
a = np.random.randn(2)
numpy_pickle.dump(a, fname, protocol=protocol)
memmap = numpy_pickle.load(fname, mmap_mode="r")
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(a, memmap)
assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0
assert memmap.flags.aligned
array_list = [
np.random.randn(2),
np.random.randn(2),
np.random.randn(2),
np.random.randn(2),
]
# On Windows OSError 22 if reusing the same path for memmap ...
fname = tmpdir.join("test1.mmap").strpath
numpy_pickle.dump(array_list, fname, protocol=protocol)
l_reloaded = numpy_pickle.load(fname, mmap_mode="r")
for idx, memmap in enumerate(l_reloaded):
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(array_list[idx], memmap)
assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0
assert memmap.flags.aligned
array_dict = {
"a0": np.arange(2, dtype=np.uint8),
"a1": np.arange(3, dtype=np.uint8),
"a2": np.arange(5, dtype=np.uint8),
"a3": np.arange(7, dtype=np.uint8),
"a4": np.arange(11, dtype=np.uint8),
"a5": np.arange(13, dtype=np.uint8),
"a6": np.arange(17, dtype=np.uint8),
"a7": np.arange(19, dtype=np.uint8),
"a8": np.arange(23, dtype=np.uint8),
}
# On Windows OSError 22 if reusing the same path for memmap ...
fname = tmpdir.join("test2.mmap").strpath
numpy_pickle.dump(array_dict, fname, protocol=protocol)
d_reloaded = numpy_pickle.load(fname, mmap_mode="r")
for key, memmap in d_reloaded.items():
assert isinstance(memmap, np.memmap)
np.testing.assert_array_equal(array_dict[key], memmap)
assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0
assert memmap.flags.aligned
| StandardLibGzipCompressorWrapper |
python | urllib3__urllib3 | test/with_dummyserver/test_socketlevel.py | {
"start": 69292,
"end": 76730
} | class ____(SocketDummyServerTestCase):
def test_httplib_headers_case_insensitive(self) -> None:
self.start_response_handler(
b"HTTP/1.1 200 OK\r\n"
b"Content-Length: 0\r\n"
b"Content-type: text/plain\r\n"
b"\r\n"
)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
HEADERS = {"Content-Length": "0", "Content-type": "text/plain"}
r = pool.request("GET", "/")
assert HEADERS == dict(r.headers.items()) # to preserve case sensitivity
def start_parsing_handler(self) -> None:
self.parsed_headers: typing.OrderedDict[str, str] = OrderedDict()
self.received_headers: list[bytes] = []
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
self.received_headers = [
header for header in buf.split(b"\r\n")[1:] if header
]
for header in self.received_headers:
(key, value) = header.split(b": ")
self.parsed_headers[key.decode("ascii")] = value.decode("ascii")
sock.send(b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n")
sock.close()
self._start_server(socket_handler)
def test_headers_are_sent_with_the_original_case(self) -> None:
headers = {"foo": "bar", "bAz": "quux"}
self.start_parsing_handler()
expected_headers = {
"Accept-Encoding": "identity",
"Host": f"{self.host}:{self.port}",
"User-Agent": _get_default_user_agent(),
}
expected_headers.update(headers)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
pool.request("GET", "/", headers=HTTPHeaderDict(headers))
assert expected_headers == self.parsed_headers
def test_ua_header_can_be_overridden(self) -> None:
headers = {"uSeR-AgENt": "Definitely not urllib3!"}
self.start_parsing_handler()
expected_headers = {
"Accept-Encoding": "identity",
"Host": f"{self.host}:{self.port}",
}
expected_headers.update(headers)
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
pool.request("GET", "/", headers=HTTPHeaderDict(headers))
assert expected_headers == self.parsed_headers
def test_request_headers_are_sent_in_the_original_order(self) -> None:
# NOTE: Probability this test gives a false negative is 1/(K!)
K = 16
# NOTE: Provide headers in non-sorted order (i.e. reversed)
# so that if the internal implementation tries to sort them,
# a change will be detected.
expected_request_headers = [
(f"X-Header-{int(i)}", str(i)) for i in reversed(range(K))
]
def filter_non_x_headers(
d: typing.OrderedDict[str, str],
) -> list[tuple[str, str]]:
return [(k, v) for (k, v) in d.items() if k.startswith("X-Header-")]
self.start_parsing_handler()
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
pool.request("GET", "/", headers=OrderedDict(expected_request_headers))
assert expected_request_headers == filter_non_x_headers(self.parsed_headers)
@resolvesLocalhostFQDN()
def test_request_host_header_ignores_fqdn_dot(self) -> None:
self.start_parsing_handler()
with HTTPConnectionPool(self.host + ".", self.port, retries=False) as pool:
pool.request("GET", "/")
self.assert_header_received(
self.received_headers, "Host", f"{self.host}:{self.port}"
)
def test_response_headers_are_returned_in_the_original_order(self) -> None:
# NOTE: Probability this test gives a false negative is 1/(K!)
K = 16
# NOTE: Provide headers in non-sorted order (i.e. reversed)
# so that if the internal implementation tries to sort them,
# a change will be detected.
expected_response_headers = [
(f"X-Header-{int(i)}", str(i)) for i in reversed(range(K))
]
def socket_handler(listener: socket.socket) -> None:
sock = listener.accept()[0]
buf = b""
while not buf.endswith(b"\r\n\r\n"):
buf += sock.recv(65536)
sock.send(
b"HTTP/1.1 200 OK\r\n"
+ b"\r\n".join(
[
(k.encode("utf8") + b": " + v.encode("utf8"))
for (k, v) in expected_response_headers
]
)
+ b"\r\n"
)
sock.close()
self._start_server(socket_handler)
with HTTPConnectionPool(self.host, self.port) as pool:
r = pool.request("GET", "/", retries=0)
actual_response_headers = [
(k, v) for (k, v) in r.headers.items() if k.startswith("X-Header-")
]
assert expected_response_headers == actual_response_headers
@pytest.mark.parametrize(
"method_type, body_type",
[
("GET", None),
("POST", None),
("POST", "bytes"),
("POST", "bytes-io"),
],
)
def test_headers_sent_with_add(
self, method_type: str, body_type: str | None
) -> None:
"""
Confirm that when adding headers with combine=True that we simply append to the
most recent value, rather than create a new header line.
"""
body: None | bytes | io.BytesIO
if body_type is None:
body = None
expected = b"\r\n\r\n"
elif body_type == "bytes":
body = b"my-body"
expected = b"\r\n\r\nmy-body"
elif body_type == "bytes-io":
body = io.BytesIO(b"bytes-io-body")
body.seek(0, 0)
expected = b"bytes-io-body\r\n0\r\n\r\n"
else:
raise ValueError("Unknown body type")
buffer: bytes = b""
def socket_handler(listener: socket.socket) -> None:
nonlocal buffer
sock = listener.accept()[0]
sock.settimeout(0)
while expected not in buffer:
with contextlib.suppress(BlockingIOError):
buffer += sock.recv(65536)
sock.sendall(
b"HTTP/1.1 200 OK\r\n"
b"Server: example.com\r\n"
b"Content-Length: 0\r\n\r\n"
)
sock.close()
self._start_server(socket_handler)
headers = HTTPHeaderDict()
headers.add("A", "1")
headers.add("C", "3")
headers.add("B", "2")
headers.add("B", "3")
headers.add("A", "4", combine=False)
headers.add("C", "5", combine=True)
headers.add("C", "6")
with HTTPConnectionPool(self.host, self.port, retries=False) as pool:
r = pool.request(
method_type,
"/",
body=body,
headers=headers,
)
assert r.status == 200
assert b"A: 1\r\nA: 4\r\nC: 3, 5\r\nC: 6\r\nB: 2\r\nB: 3" in buffer
| TestHeaders |
python | pytorch__pytorch | benchmarks/dynamo/microbenchmarks/microbench.py | {
"start": 1815,
"end": 1946
} | class ____(torch.nn.Module):
def forward(self, x, y):
# return x / (torch.abs(x) + 1.0),
return (x + y,)
| MyModel2 |
python | django__django | tests/db_functions/migrations/0002_create_test_models.py | {
"start": 43,
"end": 3543
} | class ____(migrations.Migration):
dependencies = [
("db_functions", "0001_setup_extensions"),
]
operations = [
migrations.CreateModel(
name="Author",
fields=[
("name", models.CharField(max_length=50)),
("alias", models.CharField(max_length=50, null=True, blank=True)),
("goes_by", models.CharField(max_length=50, null=True, blank=True)),
("age", models.PositiveSmallIntegerField(default=30)),
],
),
migrations.CreateModel(
name="Article",
fields=[
(
"authors",
models.ManyToManyField(
"db_functions.Author", related_name="articles"
),
),
("title", models.CharField(max_length=50)),
("summary", models.CharField(max_length=200, null=True, blank=True)),
("text", models.TextField()),
("written", models.DateTimeField()),
("published", models.DateTimeField(null=True, blank=True)),
("updated", models.DateTimeField(null=True, blank=True)),
("views", models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name="Fan",
fields=[
("name", models.CharField(max_length=50)),
("age", models.PositiveSmallIntegerField(default=30)),
(
"author",
models.ForeignKey(
"db_functions.Author", models.CASCADE, related_name="fans"
),
),
("fan_since", models.DateTimeField(null=True, blank=True)),
],
),
migrations.CreateModel(
name="DTModel",
fields=[
("name", models.CharField(max_length=32)),
("start_datetime", models.DateTimeField(null=True, blank=True)),
("end_datetime", models.DateTimeField(null=True, blank=True)),
("start_date", models.DateField(null=True, blank=True)),
("end_date", models.DateField(null=True, blank=True)),
("start_time", models.TimeField(null=True, blank=True)),
("end_time", models.TimeField(null=True, blank=True)),
("duration", models.DurationField(null=True, blank=True)),
],
),
migrations.CreateModel(
name="DecimalModel",
fields=[
("n1", models.DecimalField(decimal_places=2, max_digits=6)),
(
"n2",
models.DecimalField(
decimal_places=7, max_digits=9, null=True, blank=True
),
),
],
),
migrations.CreateModel(
name="IntegerModel",
fields=[
("big", models.BigIntegerField(null=True, blank=True)),
("normal", models.IntegerField(null=True, blank=True)),
("small", models.SmallIntegerField(null=True, blank=True)),
],
),
migrations.CreateModel(
name="FloatModel",
fields=[
("f1", models.FloatField(null=True, blank=True)),
("f2", models.FloatField(null=True, blank=True)),
],
),
]
| Migration |
python | openai__openai-python | src/openai/types/responses/response_file_search_tool_call.py | {
"start": 1062,
"end": 1664
} | class ____(BaseModel):
id: str
"""The unique ID of the file search tool call."""
queries: List[str]
"""The queries used to search for files."""
status: Literal["in_progress", "searching", "completed", "incomplete", "failed"]
"""The status of the file search tool call.
One of `in_progress`, `searching`, `incomplete` or `failed`,
"""
type: Literal["file_search_call"]
"""The type of the file search tool call. Always `file_search_call`."""
results: Optional[List[Result]] = None
"""The results of the file search tool call."""
| ResponseFileSearchToolCall |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 19076,
"end": 20215
} | class ____(unittest.TestCase):
def test_unbiased(self):
chars = ['a', 'b', 'c']
fuzz = fuzzy.FuzzyText(prefix='pre', suffix='post', chars=chars, length=12)
res = utils.evaluate_declaration(fuzz)
self.assertEqual('pre', res[:3])
self.assertEqual('post', res[-4:])
self.assertEqual(3 + 12 + 4, len(res))
for char in res[3:-4]:
self.assertIn(char, chars)
def test_mock(self):
fake_choice = lambda chars: chars[0]
chars = ['a', 'b', 'c']
fuzz = fuzzy.FuzzyText(prefix='pre', suffix='post', chars=chars, length=4)
with mock.patch('factory.random.randgen.choice', fake_choice):
res = utils.evaluate_declaration(fuzz)
self.assertEqual('preaaaapost', res)
def test_generator(self):
def options():
yield 'a'
yield 'b'
yield 'c'
fuzz = fuzzy.FuzzyText(chars=options(), length=12)
res = utils.evaluate_declaration(fuzz)
self.assertEqual(12, len(res))
for char in res:
self.assertIn(char, ['a', 'b', 'c'])
| FuzzyTextTestCase |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/obscure_tito.py | {
"start": 216,
"end": 335
} | class ____:
def update(self, parameter):
...
def taint_parameter(self, tainted_parameter):
...
| C |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_bits.py | {
"start": 23455,
"end": 43835
} | class ____(SandboxedEnvironment):
"""
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
"""
context_class = AnsibleContext
template_class = AnsibleTemplate
code_generator_class = AnsibleCodeGenerator
intercepted_binops = frozenset(('eq',))
_allowed_unsafe_attributes: dict[str, type | tuple[type, ...]] = dict(
# Allow bitwise operations on int until bitwise filters are available.
# see: https://github.com/ansible/ansible/issues/85204
__and__=int,
__lshift__=int,
__or__=int,
__rshift__=int,
__xor__=int,
)
"""
Attributes which are considered unsafe by `is_safe_attribute`, which should be allowed when used on specific types.
The attributes allowed here are intended only for backward compatibility with existing use cases.
They should be exposed as filters in a future release and eventually deprecated.
"""
_lexer_cache = LRUCache(50)
# DTFIX-FUTURE: bikeshed a name/mechanism to control template debugging
_debuggable_template_source = False
_debuggable_template_source_path: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.parent / '.template_debug_source'
def __init__(self, *args, ansible_basedir: str | None = None, **kwargs) -> None:
if ansible_basedir:
kwargs.update(loader=FileSystemLoader(ansible_basedir))
super().__init__(*args, extensions=_TemplateConfig.jinja_extensions, **kwargs)
self.filters = JinjaPluginIntercept(_BUILTIN_FILTERS, filter_loader) # type: ignore[assignment]
self.tests = JinjaPluginIntercept(_BUILTIN_TESTS, test_loader) # type: ignore[assignment,arg-type]
# future Jinja releases may default-enable autoescape; force-disable to prevent the problems it could cause
# see https://github.com/pallets/jinja/blob/3.1.2/docs/api.rst?plain=1#L69
self.autoescape = False
self.trim_blocks = True
self.undefined = UndefinedMarker
self.finalize = _ansible_finalize
self.globals.update(
range=range, # the sandboxed environment limits range in ways that may cause us problems; use the real Python one
now=_now,
undef=_undef,
omit=Omit,
lookup=_lookup,
query=_query,
q=_query,
)
# Disabling the optimizer prevents compile-time constant expression folding, which prevents our
# visit_Const recursive inline template expansion tricks from working in many cases where Jinja's
# ignorance of our embedded templates are optimized away as fully-constant expressions,
# eg {{ "{{'hi'}}" == "hi" }}. As of Jinja ~3.1, this specifically avoids cases where the @optimizeconst
# visitor decorator performs constant folding, which bypasses our visit_Const impl and causes embedded
# templates to be lost.
# See also optimizeconst impl: https://github.com/pallets/jinja/blob/3.1.0/src/jinja2/compiler.py#L48-L49
self.optimized = False
def get_template(
self,
name: str | Template,
parent: str | None = None,
globals: c.MutableMapping[str, t.Any] | None = None,
) -> Template:
"""Ensures that templates built via `get_template` are also source debuggable."""
with _CompileStateSmugglingCtx.when(self._debuggable_template_source) as ctx:
template_obj = t.cast(AnsibleTemplate, super().get_template(name, parent, globals))
if isinstance(ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
template_obj._python_source_temp_path = ctx.python_source_temp_path # facilitate deletion of the temp file when template_obj is deleted
return template_obj
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
# deprecated: description="remove relaxed template sandbox mode support" core_version="2.23"
if _TemplateConfig.sandbox_mode == _SandboxMode.ALLOW_UNSAFE_ATTRIBUTES:
return True
if (type_or_tuple := self._allowed_unsafe_attributes.get(attr)) and isinstance(obj, type_or_tuple):
return True
return super().is_safe_attribute(obj, attr, value)
@property
def lexer(self) -> AnsibleLexer:
"""Return/cache an AnsibleLexer with settings from the current AnsibleEnvironment"""
# DTFIX-FUTURE: optimization - we should pre-generate the default cached lexer before forking, not leave it to chance (e.g. simple playbooks)
key = tuple(getattr(self, name) for name in _TEMPLATE_OVERRIDE_FIELD_NAMES)
lex = self._lexer_cache.get(key)
if lex is None:
self._lexer_cache[key] = lex = AnsibleLexer(self)
return lex
def call_filter(
self,
name: str,
value: t.Any,
args: c.Sequence[t.Any] | None = None,
kwargs: c.Mapping[str, t.Any] | None = None,
context: Context | None = None,
eval_ctx: EvalContext | None = None,
) -> t.Any:
"""
Ensure that filters directly invoked by plugins will see non-templating lazy containers.
Without this, `_wrap_filter` will wrap `args` and `kwargs` in templating lazy containers.
This provides consistency with plugin output handling by preventing auto-templating of trusted templates passed in native containers.
"""
# DTFIX-FUTURE: need better logic to handle non-list/non-dict inputs for args/kwargs
args = _AnsibleLazyTemplateMixin._try_create(list(args or []), LazyOptions.SKIP_TEMPLATES)
kwargs = _AnsibleLazyTemplateMixin._try_create(kwargs, LazyOptions.SKIP_TEMPLATES)
return super().call_filter(name, value, args, kwargs, context, eval_ctx)
def call_test(
self,
name: str,
value: t.Any,
args: c.Sequence[t.Any] | None = None,
kwargs: c.Mapping[str, t.Any] | None = None,
context: Context | None = None,
eval_ctx: EvalContext | None = None,
) -> t.Any:
"""
Ensure that tests directly invoked by plugins will see non-templating lazy containers.
Without this, `_wrap_test` will wrap `args` and `kwargs` in templating lazy containers.
This provides consistency with plugin output handling by preventing auto-templating of trusted templates passed in native containers.
"""
# DTFIX-FUTURE: need better logic to handle non-list/non-dict inputs for args/kwargs
args = _AnsibleLazyTemplateMixin._try_create(list(args or []), LazyOptions.SKIP_TEMPLATES)
kwargs = _AnsibleLazyTemplateMixin._try_create(kwargs, LazyOptions.SKIP_TEMPLATES)
return super().call_test(name, value, args, kwargs, context, eval_ctx)
def compile_expression(self, source: str, *args, **kwargs) -> TemplateExpression:
# compile_expression parses and passes the tree to from_string; for debug support, activate the context here to capture the intermediate results
with _CompileStateSmugglingCtx.when(self._debuggable_template_source) as ctx:
if isinstance(ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
ctx.template_source = source
return super().compile_expression(source, *args, **kwargs)
def from_string(self, source: str | jinja2.nodes.Template, *args, **kwargs) -> AnsibleTemplate:
# if debugging is enabled, use existing context when present (e.g., from compile_expression)
current_ctx = _CompileStateSmugglingCtx.current(optional=True) if self._debuggable_template_source else None
with _CompileStateSmugglingCtx.when(self._debuggable_template_source and not current_ctx) as new_ctx:
template_obj = t.cast(AnsibleTemplate, super().from_string(source, *args, **kwargs))
if isinstance(ctx := current_ctx or new_ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
template_obj._python_source_temp_path = ctx.python_source_temp_path # facilitate deletion of the temp file when template_obj is deleted
return template_obj
def _parse(self, source: str, *args, **kwargs) -> jinja2.nodes.Template:
if csc := _CompileStateSmugglingCtx.current(optional=True):
csc.template_source = source
return super()._parse(source, *args, **kwargs)
def _compile(self, source: str, filename: str) -> types.CodeType:
if csc := _CompileStateSmugglingCtx.current(optional=True):
origin = Origin.get_tag(csc.template_source) or Origin.UNKNOWN
source = '\n'.join(
(
"import sys; breakpoint() if type(sys.breakpointhook) is not type(breakpoint) else None",
f"# original template source from {str(origin)!r}: ",
'\n'.join(f'# {line}' for line in (csc.template_source or '').splitlines()),
source,
)
)
source_temp_dir = self._debuggable_template_source_path
source_temp_dir.mkdir(parents=True, exist_ok=True)
with tempfile.NamedTemporaryFile(dir=source_temp_dir, mode='w', suffix='.py', prefix='j2_src_', delete=False) as source_file:
filename = source_file.name
source_file.write(source)
source_file.flush()
csc.python_source = source
csc.python_source_temp_path = pathlib.Path(filename)
res = super()._compile(source, filename)
return res
@staticmethod
def concat(nodes: t.Iterable[t.Any]) -> t.Any: # type: ignore[override]
node_list = list(_flatten_nodes(nodes))
if not node_list:
return None
# this code is complemented by our tweaked CodeGenerator _output_const_repr that ensures that literal constants
# in templates aren't double-repr'd in the generated code
if len(node_list) == 1:
return node_list[0]
# In order to ensure that all markers are tripped, do a recursive finalize before we repr (otherwise we can end up
# repr'ing a Marker). This requires two passes, but avoids the need for a parallel reimplementation of all repr methods.
try:
node_list = _finalize_template_result(node_list, FinalizeMode.CONCAT)
except MarkerError as ex:
return ex.source # return the first Marker encountered
return ''.join([to_text(v) for v in node_list])
@staticmethod
def _access_const(const_template: t.LiteralString) -> t.Any:
"""
Called during template rendering on template-looking string constants embedded in the template.
It provides the following functionality:
* Propagates origin from the containing template.
* For backward compatibility when embedded templates are enabled:
* Conditionals - Renders embedded template constants and accesses the result. Warns on each constant immediately.
* Non-conditionals - Tags constants for deferred rendering of templates in lookup terms. Warns on each constant during lookup invocation.
"""
ctx = TemplateContext.current()
if (tv := ctx.template_value) and (origin := Origin.get_tag(tv)):
const_template = origin.tag(const_template)
if ctx._render_jinja_const_template:
_jinja_const_template_warning(const_template, is_conditional=True)
result = ctx.templar.template(TrustedAsTemplate().tag(const_template))
AnsibleAccessContext.current().access(result)
else:
# warnings will be issued when lookup terms processing occurs, to avoid false positives
result = _JinjaConstTemplate().tag(const_template)
return result
def getitem(self, obj: t.Any, argument: t.Any) -> t.Any:
value = super().getitem(obj, argument)
AnsibleAccessContext.current().access(value)
return value
def getattr(self, obj: t.Any, attribute: str) -> t.Any:
"""
Get `attribute` from the attributes of `obj`, falling back to items in `obj`.
If no item was found, return a sandbox-specific `UndefinedMarker` if `attribute` is protected by the sandbox,
otherwise return a normal `UndefinedMarker` instance.
This differs from the built-in Jinja behavior which will not fall back to items if `attribute` is protected by the sandbox.
"""
# example template that uses this: "{{ some.thing }}" -- obj is the "some" dict, attribute is "thing"
is_safe = True
try:
value = getattr(obj, attribute)
except AttributeError:
value = _sentinel
else:
if not (is_safe := self.is_safe_attribute(obj, attribute, value)):
value = _sentinel
if value is _sentinel:
try:
value = obj[attribute]
except (TypeError, LookupError):
value = self.undefined(obj=obj, name=attribute) if is_safe else self.unsafe_undefined(obj, attribute)
AnsibleAccessContext.current().access(value)
return value
def call(
self,
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
try:
if _DirectCall.is_marked(__obj):
# Both `_lookup` and `_query` handle arg proxying and `Marker` args internally.
# Performing either before calling them will interfere with that processing.
return super().call(__context, __obj, *args, **kwargs)
# Jinja's generated macro code handles Markers, so preemptive raise on Marker args and lazy retrieval should be disabled for the macro invocation.
is_macro = isinstance(__obj, Macro)
if not is_macro and (first_marker := get_first_marker_arg(args, kwargs)) is not None:
return first_marker
with JinjaCallContext(accept_lazy_markers=is_macro):
call_res = super().call(__context, __obj, *lazify_container_args(args), **lazify_container_kwargs(kwargs))
if __obj is range:
# Preserve the ability to do `range(1000000000) | random` by not converting range objects to lists.
# Historically, range objects were only converted on Jinja finalize and filter outputs, so they've always been floating around in templating
# code and visible to user plugins.
return call_res
return _wrap_plugin_output(call_res)
except MarkerError as ex:
return ex.source
except Exception as ex:
return CapturedExceptionMarker(ex)
AnsibleTemplate.environment_class = AnsibleEnvironment
_DEFAULT_UNDEF = UndefinedMarker("Mandatory variable has not been overridden", _no_template_source=True)
_sentinel: t.Final[object] = object()
@_DirectCall.mark
def _undef(hint: str | None = None) -> UndefinedMarker:
"""Jinja2 global function (undef) for creating getting a `UndefinedMarker` instance, optionally with a custom hint."""
validate_arg_type('hint', hint, (str, type(None)))
if not hint:
return _DEFAULT_UNDEF
return UndefinedMarker(hint)
def _flatten_nodes(nodes: t.Iterable[t.Any]) -> t.Iterable[t.Any]:
"""
Yield nodes from a potentially recursive iterable of nodes.
The recursion is required to expand template imports (TemplateModule).
Any exception raised while consuming a template node will be yielded as a Marker for that node.
"""
iterator = iter(nodes)
while True:
try:
node = next(iterator)
except StopIteration:
break
except Exception as ex:
yield defer_template_error(ex, TemplateContext.current().template_value, is_expression=False)
# DTFIX-FUTURE: We should be able to determine if truncation occurred by having the code generator smuggle out the number of expected nodes.
yield TruncationMarker()
else:
if type(node) is TemplateModule: # pylint: disable=unidiomatic-typecheck
yield from _flatten_nodes(node._body_stream)
elif node is None:
continue # avoid yielding `None`-valued nodes to avoid literal "None" in stringified template results
else:
yield node
def _flatten_and_lazify_vars(mapping: c.Mapping) -> t.Iterable[c.Mapping]:
"""Prevent deeply-nested Jinja vars ChainMaps from being created by nested contexts and ensure that all top-level containers support lazy templating."""
mapping_type = type(mapping)
if mapping_type is ChainMap:
# noinspection PyUnresolvedReferences
for m in mapping.maps:
yield from _flatten_and_lazify_vars(m)
elif mapping_type is _AnsibleLazyTemplateDict:
yield mapping
elif mapping_type in (dict, _AnsibleTaggedDict):
# don't propagate empty dictionary layers
if mapping:
yield _AnsibleLazyTemplateMixin._try_create(mapping)
else:
raise NotImplementedError(f"unsupported mapping type in Jinja vars: {mapping_type}")
def _new_context(
*,
environment: Environment,
template_name: str | None,
blocks: dict[str, t.Callable[[Context], c.Iterator[str]]],
shared: bool = False,
jinja_locals: c.Mapping[str, t.Any] | None = None,
jinja_vars: c.Mapping[str, t.Any] | None = None,
jinja_globals: c.MutableMapping[str, t.Any] | None = None,
) -> Context:
"""Override Jinja's context vars setup to use ChainMaps and containers that support lazy templating."""
layers = []
if jinja_locals:
# Omit values set to Jinja's internal `missing` sentinel; they are locals that have not yet been
# initialized in the current context, and should not be exposed to child contexts. e.g.: {% import 'a' as b with context %}.
# The `b` local will be `missing` in the `a` context and should not be propagated as a local to the child context we're creating.
layers.append(_AnsibleLazyTemplateMixin._try_create({k: v for k, v in jinja_locals.items() if v is not missing}))
if jinja_vars:
layers.extend(_flatten_and_lazify_vars(jinja_vars))
if jinja_globals and not shared:
# Even though we don't currently support templating globals, it's easier to ensure that everything is template-able rather than trying to
# pick apart the ChainMaps to enforce non-template-able globals, or to risk things that *should* be template-able not being lazified.
layers.extend(_flatten_and_lazify_vars(jinja_globals))
if not layers:
# ensure we have at least one layer (which should be lazy), since _flatten_and_lazify_vars eliminates most empty layers
layers.append(_AnsibleLazyTemplateMixin._try_create({}))
# only return a ChainMap if we're combining layers, or we have none
parent = layers[0] if len(layers) == 1 else ChainMap(*layers)
# the `parent` cast is only to satisfy Jinja's overly-strict type hint
return environment.context_class(environment, t.cast(dict, parent), template_name, blocks, globals=jinja_globals)
def is_possibly_template(value: str, overrides: TemplateOverrides = TemplateOverrides.DEFAULT):
"""
A lightweight check to determine if the given string looks like it contains a template, even if that template is invalid.
Returns `True` if the given string starts with a Jinja overrides header or if it contains template start strings.
"""
return value.startswith(JINJA2_OVERRIDE) or overrides._contains_start_string(value)
def is_possibly_all_template(value: str, overrides: TemplateOverrides = TemplateOverrides.DEFAULT):
"""
A lightweight check to determine if the given string looks like it contains *only* a template, even if that template is invalid.
Returns `True` if the given string starts with a Jinja overrides header or if it starts and ends with Jinja template delimiters.
"""
return value.startswith(JINJA2_OVERRIDE) or overrides._starts_and_ends_with_jinja_delimiters(value)
| AnsibleEnvironment |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/util_test.py | {
"start": 33252,
"end": 37562
} | class ____(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
with self.session(use_gpu=use_gpu) as sess:
softplus = nn_ops.softplus(np_features)
softplus_inverse = du.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = sess.run([
softplus, softplus_inverse])
rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
str(np_features.dtype), 1e-6)
self.assertAllCloseAccordingToType(np_softplus, tf_softplus, rtol=rtol)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(
np.ones_like(tf_softplus).astype(np.bool_), tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(
np.ones_like(tf_softplus).astype(np.bool_), np.isfinite(tf_softplus))
self.assertAllEqual(
np.ones_like(tf_softplus_inverse).astype(np.bool_),
np.isfinite(tf_softplus_inverse))
@test_util.run_deprecated_v1
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
tf_logging.vlog(2, "softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testInverseSoftplusGradientNeverNan(self):
with self.cached_session():
# Note that this range contains both zero and inf.
x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(
np.zeros_like(grads).astype(np.bool_), np.isnan(grads))
@test_util.run_deprecated_v1
def testInverseSoftplusGradientFinite(self):
with self.cached_session():
# This range of x is all finite, and so is 1 / x. So the
# gradient and its approximations should be finite as well.
x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllTrue` (if it existed).
self.assertAllEqual(
np.ones_like(grads).astype(np.bool_), np.isfinite(grads))
@test_util.run_all_in_graph_and_eager_modes
| SoftplusTest |
python | sympy__sympy | sympy/solvers/polysys.py | {
"start": 978,
"end": 27168
} | class ____(Exception):
"""Raised when solver's conditions were not met. """
def solve_poly_system(seq, *gens, strict=False, **args):
"""
Return a list of solutions for the system of polynomial equations
or else None.
Parameters
==========
seq: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in seq for which we want the
solutions
strict: a boolean (default is False)
if strict is True, NotImplementedError will be raised if
the solution is known to be incomplete (which can occur if
not all solutions are expressible in radicals)
args: Keyword arguments
Special options for solving the equations.
Returns
=======
List[Tuple]
a list of tuples with elements being solutions for the
symbols in the order they were passed as gens
None
None is returned when the computed basis contains only the ground.
Examples
========
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -sqrt(2)), (2, sqrt(2))]
>>> solve_poly_system([x**5 - x + y**3, y**2 - 1], x, y, strict=True)
Traceback (most recent call last):
...
UnsolvableFactorError
"""
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('solve_poly_system', len(seq), exc)
if len(polys) == len(opt.gens) == 2:
f, g = polys
if all(i <= 2 for i in f.degree_list() + g.degree_list()):
try:
return solve_biquadratic(f, g, opt)
except SolveFailed:
pass
return solve_generic(polys, opt, strict=strict)
def solve_biquadratic(f, g, opt):
"""Solve a system of two bivariate quadratic polynomial equations.
Parameters
==========
f: a single Expr or Poly
First equation
g: a single Expr or Poly
Second Equation
opt: an Options object
For specifying keyword arguments and generators
Returns
=======
List[Tuple]
a list of tuples with elements being solutions for the
symbols in the order they were passed as gens
None
None is returned when the computed basis contains only the ground.
Examples
========
>>> from sympy import Options, Poly
>>> from sympy.abc import x, y
>>> from sympy.solvers.polysys import solve_biquadratic
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')
>>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(1/3, 3), (41/27, 11/9)]
>>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')
>>> b = Poly(-y + x - 4, y, x, domain='ZZ')
>>> solve_biquadratic(a, b, NewOption)
[(7/2 - sqrt(29)/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \
sqrt(29)/2)]
"""
G = groebner([f, g])
if len(G) == 1 and G[0].is_ground:
return None
if len(G) != 2:
raise SolveFailed
x, y = opt.gens
p, q = G
if not p.gcd(q).is_ground:
# not 0-dimensional
raise SolveFailed
p = Poly(p, x, expand=False)
p_roots = [rcollect(expr, y) for expr in roots(p).keys()]
q = q.ltrim(-1)
q_roots = list(roots(q).keys())
solutions = [(p_root.subs(y, q_root), q_root) for q_root, p_root in
itertools.product(q_roots, p_roots)]
return sorted(solutions, key=default_sort_key)
def solve_generic(polys, opt, strict=False):
"""
Solve a generic system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions. If the basis contains only the ground, None is
returned.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
Parameters
==========
polys: a list/tuple/set
Listing all the polynomial equations that are needed to be solved
opt: an Options object
For specifying keyword arguments and generators
strict: a boolean
If strict is True, NotImplementedError will be raised if the solution
is known to be incomplete
Returns
=======
List[Tuple]
a list of tuples with elements being solutions for the
symbols in the order they were passed as gens
None
None is returned when the computed basis contains only the ground.
References
==========
.. [Buchberger01] B. Buchberger, Groebner Bases: A Short
Introduction for Systems Theorists, In: R. Moreno-Diaz,
B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
February, 2001
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
and Algorithms, Springer, Second Edition, 1997, pp. 112
Raises
========
NotImplementedError
If the system is not zero-dimensional (does not have a finite
number of solutions)
UnsolvableFactorError
If ``strict`` is True and not all solution components are
expressible in radicals
Examples
========
>>> from sympy import Poly, Options
>>> from sympy.solvers.polysys import solve_generic
>>> from sympy.abc import x, y
>>> NewOption = Options((x, y), {'domain': 'ZZ'})
>>> a = Poly(x - y + 5, x, y, domain='ZZ')
>>> b = Poly(x + y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(-1, 4)]
>>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
>>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(11/3, 13/3)]
>>> a = Poly(x**2 + y, x, y, domain='ZZ')
>>> b = Poly(x + y*4, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption)
[(0, 0), (1/4, -1/16)]
>>> a = Poly(x**5 - x + y**3, x, y, domain='ZZ')
>>> b = Poly(y**2 - 1, x, y, domain='ZZ')
>>> solve_generic([a, b], NewOption, strict=True)
Traceback (most recent call last):
...
UnsolvableFactorError
"""
def _is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.monoms():
if any(monom[:-1]):
return False
return True
def _subs_root(f, gen, zero):
"""Replace generator with a root so that the result is nice. """
p = f.as_expr({gen: zero})
if f.degree(gen) >= 2:
p = p.expand(deep=False)
return p
def _solve_reduced_system(system, gens, entry=False):
"""Recursively solves reduced polynomial systems. """
if len(system) == len(gens) == 1:
# the below line will produce UnsolvableFactorError if
# strict=True and the solution from `roots` is incomplete
zeros = list(roots(system[0], gens[-1], strict=strict).keys())
return [(zero,) for zero in zeros]
basis = groebner(system, gens, polys=True)
if len(basis) == 1 and basis[0].is_ground:
if not entry:
return []
else:
return None
univariate = list(filter(_is_univariate, basis))
if len(basis) < len(gens):
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
if len(univariate) == 1:
f = univariate.pop()
else:
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
gens = f.gens
gen = gens[-1]
# the below line will produce UnsolvableFactorError if
# strict=True and the solution from `roots` is incomplete
zeros = list(roots(f.ltrim(gen), strict=strict).keys())
if not zeros:
return []
if len(basis) == 1:
return [(zero,) for zero in zeros]
solutions = []
for zero in zeros:
new_system = []
new_gens = gens[:-1]
for b in basis[:-1]:
eq = _subs_root(b, gen, zero)
if eq is not S.Zero:
new_system.append(eq)
for solution in _solve_reduced_system(new_system, new_gens):
solutions.append(solution + (zero,))
if solutions and len(solutions[0]) != len(gens):
raise NotImplementedError(filldedent('''
only zero-dimensional systems supported
(finite number of solutions)
'''))
return solutions
try:
result = _solve_reduced_system(polys, opt.gens, entry=True)
except CoercionFailed:
raise NotImplementedError
if result is not None:
return sorted(result, key=default_sort_key)
def solve_triangulated(polys, *gens, **args):
"""
Solve a polynomial system using Gianni-Kalkbrenner algorithm.
The algorithm proceeds by computing one Groebner basis in the ground
domain and then by iteratively computing polynomial factorizations in
appropriately constructed algebraic extensions of the ground domain.
Parameters
==========
polys: a list/tuple/set
Listing all the equations that are needed to be solved
gens: generators
generators of the equations in polys for which we want the
solutions
args: Keyword arguments
Special options for solving the equations
Returns
=======
List[Tuple]
A List of tuples. Solutions for symbols that satisfy the
equations listed in polys
Examples
========
>>> from sympy import solve_triangulated
>>> from sympy.abc import x, y, z
>>> F = [x**2 + y + z - 1, x + y**2 + z - 1, x + y + z**2 - 1]
>>> solve_triangulated(F, x, y, z)
[(0, 0, 1), (0, 1, 0), (1, 0, 0)]
Using extension for algebraic solutions.
>>> solve_triangulated(F, x, y, z, extension=True) #doctest: +NORMALIZE_WHITESPACE
[(0, 0, 1), (0, 1, 0), (1, 0, 0),
(CRootOf(x**2 + 2*x - 1, 0), CRootOf(x**2 + 2*x - 1, 0), CRootOf(x**2 + 2*x - 1, 0)),
(CRootOf(x**2 + 2*x - 1, 1), CRootOf(x**2 + 2*x - 1, 1), CRootOf(x**2 + 2*x - 1, 1))]
References
==========
1. Patrizia Gianni, Teo Mora, Algebraic Solution of System of
Polynomial Equations using Groebner Bases, AAECC-5 on Applied Algebra,
Algebraic Algorithms and Error-Correcting Codes, LNCS 356 247--257, 1989
"""
opt = build_options(gens, args)
G = groebner(polys, gens, polys=True)
G = list(reversed(G))
extension = opt.get('extension', False)
if extension:
def _solve_univariate(f):
return [r for r, _ in f.all_roots(multiple=False, radicals=False)]
else:
domain = opt.get('domain')
if domain is not None:
for i, g in enumerate(G):
G[i] = g.set_domain(domain)
def _solve_univariate(f):
return list(f.ground_roots().keys())
f, G = G[0].ltrim(-1), G[1:]
dom = f.get_domain()
zeros = _solve_univariate(f)
if extension:
solutions = {((zero,), dom.algebraic_field(zero)) for zero in zeros}
else:
solutions = {((zero,), dom) for zero in zeros}
var_seq = reversed(gens[:-1])
vars_seq = postfixes(gens[1:])
for var, vars in zip(var_seq, vars_seq):
_solutions = set()
for values, dom in solutions:
H, mapping = [], list(zip(vars, values))
for g in G:
_vars = (var,) + vars
if g.has_only_gens(*_vars) and g.degree(var) != 0:
if extension:
g = g.set_domain(g.domain.unify(dom))
h = g.ltrim(var).eval(dict(mapping))
if g.degree(var) == h.degree():
H.append(h)
p = min(H, key=lambda h: h.degree())
zeros = _solve_univariate(p)
for zero in zeros:
if not (zero in dom):
dom_zero = dom.algebraic_field(zero)
else:
dom_zero = dom
_solutions.add(((zero,) + values, dom_zero))
solutions = _solutions
return sorted((s for s, _ in solutions), key=default_sort_key)
def factor_system(eqs: Sequence[Expr | complex], gens: Sequence[Expr] = (), **kwargs: Any) -> list[list[Expr]]:
"""
Factorizes a system of polynomial equations into
irreducible subsystems.
Parameters
==========
eqs : list
List of expressions to be factored.
Each expression is assumed to be equal to zero.
gens : list, optional
Generator(s) of the polynomial ring.
If not provided, all free symbols will be used.
**kwargs : dict, optional
Same optional arguments taken by ``factor``
Returns
=======
list[list[Expr]]
A list of lists of expressions, where each sublist represents
an irreducible subsystem. When solved, each subsystem gives
one component of the solution. Only generic solutions are
returned (cases not requiring parameters to be zero).
Examples
========
>>> from sympy.solvers.polysys import factor_system, factor_system_cond
>>> from sympy.abc import x, y, a, b, c
A simple system with multiple solutions:
>>> factor_system([x**2 - 1, y - 1])
[[x + 1, y - 1], [x - 1, y - 1]]
A system with no solution:
>>> factor_system([x, 1])
[]
A system where any value of the symbol(s) is a solution:
>>> factor_system([x - x, (x + 1)**2 - (x**2 + 2*x + 1)])
[[]]
A system with no generic solution:
>>> factor_system([a*x*(x-1), b*y, c], [x, y])
[]
If c is added to the unknowns then the system has a generic solution:
>>> factor_system([a*x*(x-1), b*y, c], [x, y, c])
[[x - 1, y, c], [x, y, c]]
Alternatively :func:`factor_system_cond` can be used to get degenerate
cases as well:
>>> factor_system_cond([a*x*(x-1), b*y, c], [x, y])
[[x - 1, y, c], [x, y, c], [x - 1, b, c], [x, b, c], [y, a, c], [a, b, c]]
Each of the above cases is only satisfiable in the degenerate case `c = 0`.
The solution set of the original system represented
by eqs is the union of the solution sets of the
factorized systems.
An empty list [] means no generic solution exists.
A list containing an empty list [[]] means any value of
the symbol(s) is a solution.
See Also
========
factor_system_cond : Returns both generic and degenerate solutions
factor_system_bool : Returns a Boolean combination representing all solutions
sympy.polys.polytools.factor : Factors a polynomial into irreducible factors
over the rational numbers
"""
systems = _factor_system_poly_from_expr(eqs, gens, **kwargs)
systems_generic = [sys for sys in systems if not _is_degenerate(sys)]
systems_expr = [[p.as_expr() for p in system] for system in systems_generic]
return systems_expr
def _is_degenerate(system: list[Poly]) -> bool:
"""Helper function to check if a system is degenerate"""
return any(p.is_ground for p in system)
def factor_system_bool(eqs: Sequence[Expr | complex], gens: Sequence[Expr] = (), **kwargs: Any) -> Boolean:
"""
Factorizes a system of polynomial equations into irreducible DNF.
The system of expressions(eqs) is taken and a Boolean combination
of equations is returned that represents the same solution set.
The result is in disjunctive normal form (OR of ANDs).
Parameters
==========
eqs : list
List of expressions to be factored.
Each expression is assumed to be equal to zero.
gens : list, optional
Generator(s) of the polynomial ring.
If not provided, all free symbols will be used.
**kwargs : dict, optional
Optional keyword arguments
Returns
=======
Boolean:
A Boolean combination of equations. The result is typically in
the form of a conjunction (AND) of a disjunctive normal form
with additional conditions.
Examples
========
>>> from sympy.solvers.polysys import factor_system_bool
>>> from sympy.abc import x, y, a, b, c
>>> factor_system_bool([x**2 - 1])
Eq(x - 1, 0) | Eq(x + 1, 0)
>>> factor_system_bool([x**2 - 1, y - 1])
(Eq(x - 1, 0) & Eq(y - 1, 0)) | (Eq(x + 1, 0) & Eq(y - 1, 0))
>>> eqs = [a * (x - 1), b]
>>> factor_system_bool([a*(x - 1), b])
(Eq(a, 0) & Eq(b, 0)) | (Eq(b, 0) & Eq(x - 1, 0))
>>> factor_system_bool([a*x**2 - a, b*(x + 1), c], [x])
(Eq(c, 0) & Eq(x + 1, 0)) | (Eq(a, 0) & Eq(b, 0) & Eq(c, 0)) | (Eq(b, 0) & Eq(c, 0) & Eq(x - 1, 0))
>>> factor_system_bool([x**2 + 2*x + 1 - (x + 1)**2])
True
The result is logically equivalent to the system of equations
i.e. eqs. The function returns ``True`` when all values of
the symbol(s) is a solution and ``False`` when the system
cannot be solved.
See Also
========
factor_system : Returns factors and solvability condition separately
factor_system_cond : Returns both factors and conditions
"""
systems = factor_system_cond(eqs, gens, **kwargs)
return Or(*[And(*[Eq(eq, 0) for eq in sys]) for sys in systems])
def factor_system_cond(eqs: Sequence[Expr | complex], gens: Sequence[Expr] = (), **kwargs: Any) -> list[list[Expr]]:
"""
Factorizes a polynomial system into irreducible components and returns
both generic and degenerate solutions.
Parameters
==========
eqs : list
List of expressions to be factored.
Each expression is assumed to be equal to zero.
gens : list, optional
Generator(s) of the polynomial ring.
If not provided, all free symbols will be used.
**kwargs : dict, optional
Optional keyword arguments.
Returns
=======
list[list[Expr]]
A list of lists of expressions, where each sublist represents
an irreducible subsystem. Includes both generic solutions and
degenerate cases requiring equality conditions on parameters.
Examples
========
>>> from sympy.solvers.polysys import factor_system_cond
>>> from sympy.abc import x, y, a, b, c
>>> factor_system_cond([x**2 - 4, a*y, b], [x, y])
[[x + 2, y, b], [x - 2, y, b], [x + 2, a, b], [x - 2, a, b]]
>>> factor_system_cond([a*x*(x-1), b*y, c], [x, y])
[[x - 1, y, c], [x, y, c], [x - 1, b, c], [x, b, c], [y, a, c], [a, b, c]]
An empty list [] means no solution exists.
A list containing an empty list [[]] means any value of
the symbol(s) is a solution.
See Also
========
factor_system : Returns only generic solutions
factor_system_bool : Returns a Boolean combination representing all solutions
sympy.polys.polytools.factor : Factors a polynomial into irreducible factors
over the rational numbers
"""
systems_poly = _factor_system_poly_from_expr(eqs, gens, **kwargs)
systems = [[p.as_expr() for p in system] for system in systems_poly]
return systems
def _factor_system_poly_from_expr(
eqs: Sequence[Expr | complex], gens: Sequence[Expr], **kwargs: Any
) -> list[list[Poly]]:
"""
Convert expressions to polynomials and factor the system.
Takes a sequence of expressions, converts them to
polynomials, and factors the resulting system. Handles both regular
polynomial systems and purely numerical cases.
"""
try:
polys, opts = parallel_poly_from_expr(eqs, *gens, **kwargs)
only_numbers = False
except (GeneratorsNeeded, PolificationFailed):
_u = Dummy('u')
polys, opts = parallel_poly_from_expr(eqs, [_u], **kwargs)
assert opts['domain'].is_Numerical
only_numbers = True
if only_numbers:
return [[]] if all(p == 0 for p in polys) else []
return factor_system_poly(polys)
def factor_system_poly(polys: list[Poly]) -> list[list[Poly]]:
"""
Factors a system of polynomial equations into irreducible subsystems
Core implementation that works directly with Poly instances.
Parameters
==========
polys : list[Poly]
A list of Poly instances to be factored.
Returns
=======
list[list[Poly]]
A list of lists of polynomials, where each sublist represents
an irreducible component of the solution. Includes both
generic and degenerate cases.
Examples
========
>>> from sympy import symbols, Poly, ZZ
>>> from sympy.solvers.polysys import factor_system_poly
>>> a, b, c, x = symbols('a b c x')
>>> p1 = Poly((a - 1)*(x - 2), x, domain=ZZ[a,b,c])
>>> p2 = Poly((b - 3)*(x - 2), x, domain=ZZ[a,b,c])
>>> p3 = Poly(c, x, domain=ZZ[a,b,c])
The equation to be solved for x is ``x - 2 = 0`` provided either
of the two conditions on the parameters ``a`` and ``b`` is nonzero
and the constant parameter ``c`` should be zero.
>>> sys1, sys2 = factor_system_poly([p1, p2, p3])
>>> sys1
[Poly(x - 2, x, domain='ZZ[a,b,c]'),
Poly(c, x, domain='ZZ[a,b,c]')]
>>> sys2
[Poly(a - 1, x, domain='ZZ[a,b,c]'),
Poly(b - 3, x, domain='ZZ[a,b,c]'),
Poly(c, x, domain='ZZ[a,b,c]')]
An empty list [] when returned means no solution exists.
Whereas a list containing an empty list [[]] means any value is a solution.
See Also
========
factor_system : Returns only generic solutions
factor_system_bool : Returns a Boolean combination representing the solutions
factor_system_cond : Returns both generic and degenerate solutions
sympy.polys.polytools.factor : Factors a polynomial into irreducible factors
over the rational numbers
"""
if not all(isinstance(poly, Poly) for poly in polys):
raise TypeError("polys should be a list of Poly instances")
if not polys:
return [[]]
base_domain = polys[0].domain
base_gens = polys[0].gens
if not all(poly.domain == base_domain and poly.gens == base_gens for poly in polys[1:]):
raise DomainError("All polynomials must have the same domain and generators")
factor_sets = []
for poly in polys:
constant, factors_mult = poly.factor_list()
if constant.is_zero is True:
continue
elif constant.is_zero is False:
if not factors_mult:
return []
factor_sets.append([f for f, _ in factors_mult])
else:
constant = sqf_part(factor_terms(constant).as_coeff_Mul()[1])
constp = Poly(constant, base_gens, domain=base_domain)
factors = [f for f, _ in factors_mult]
factors.append(constp)
factor_sets.append(factors)
if not factor_sets:
return [[]]
result = _factor_sets(factor_sets)
return _sort_systems(result)
def _factor_sets_slow(eqs: list[list]) -> set[frozenset]:
"""
Helper to find the minimal set of factorised subsystems that is
equivalent to the original system.
The result is in DNF.
"""
if not eqs:
return {frozenset()}
systems_set = {frozenset(sys) for sys in cartes(*eqs)}
return {s1 for s1 in systems_set if not any(s1 > s2 for s2 in systems_set)}
def _factor_sets(eqs: list[list]) -> set[frozenset]:
"""
Helper that builds factor combinations.
"""
if not eqs:
return {frozenset()}
current_set = min(eqs, key=len)
other_sets = [s for s in eqs if s is not current_set]
stack = [(factor, [s for s in other_sets if factor not in s], {factor})
for factor in current_set]
result = set()
while stack:
factor, remaining_sets, current_solution = stack.pop()
if not remaining_sets:
result.add(frozenset(current_solution))
continue
next_set = min(remaining_sets, key=len)
next_remaining = [s for s in remaining_sets if s is not next_set]
for next_factor in next_set:
valid_remaining = [s for s in next_remaining if next_factor not in s]
new_solution = current_solution | {next_factor}
stack.append((next_factor, valid_remaining, new_solution))
return {s1 for s1 in result if not any(s1 > s2 for s2 in result)}
def _sort_systems(systems: Iterable[Iterable[Poly]]) -> list[list[Poly]]:
"""Sorts a list of lists of polynomials"""
systems_list = [sorted(s, key=_poly_sort_key, reverse=True) for s in systems]
return sorted(systems_list, key=_sys_sort_key, reverse=True)
def _poly_sort_key(poly):
"""Sort key for polynomials"""
if poly.domain.is_FF:
poly = poly.set_domain(ZZ)
return poly.degree_list(), poly.rep.to_list()
def _sys_sort_key(sys):
"""Sort key for lists of polynomials"""
return list(zip(*map(_poly_sort_key, sys)))
| SolveFailed |
python | huggingface__transformers | tests/models/udop/test_tokenization_udop.py | {
"start": 1278,
"end": 83357
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/udop-large"
tokenizer_class = UdopTokenizer
rust_tokenizer_class = UdopTokenizer
from_pretrained_filter = filter_non_english
test_seq2seq = False
test_sentencepiece = True
def get_words_and_boxes(self):
words = ["a", "weirdly", "test", "hello"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129], [961, 885, 992, 912]]
return words, boxes
def get_words_and_boxes_batch(self):
words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return words, boxes
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ["a", "weirdly", "test"]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", "how is he called?"]
words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],
]
return questions, words, boxes
def get_empty_words_and_boxes(self):
words = ["test", "empty", ""]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return words, boxes
def get_empty_words_and_boxes_batch(self):
words = [["test", "empty", ""], ["one", "more", "empty", ""]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57]],
]
return words, boxes
def get_empty_question_words_and_boxes(self):
question = ""
words = ["test", "empty", ""]
boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]
return question, words, boxes
def get_empty_question_words_and_boxes_batch(self):
questions = ["what's his name?", ""]
words = [["test", "empty", ""], ["one", "more", "empty", ""]]
boxes = [
[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],
[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57]],
]
return questions, words, boxes
# @classmethod
# def setUpClass(cls):
# super().setUpClass()
# # We have a SentencePiece fixture for testing
# # Create tokenizer from SentencePiece model using converter
# from transformers.convert_slow_tokenizer import UdopConverter
# from transformers.tokenization_utils import PreTrainedTokenizer
# # Create a minimal mock tokenizer for the converter
# class _MockTokenizer:
# def __init__(self, vocab_file_path, eos_token, sep_token):
# self.vocab_file = vocab_file_path
# self._eos_token = eos_token
# self._sep_token = sep_token
# import sentencepiece as spm
# self._sp_model = spm.SentencePieceProcessor()
# self._sp_model.Load(vocab_file_path)
# def convert_tokens_to_ids(self, token):
# return self._sp_model.piece_to_id(token)
# mock_tokenizer = _MockTokenizer(SAMPLE_VOCAB, "</s>", "</s>")
# converter = UdopConverter(mock_tokenizer)
# tokenizer_object = converter.converted()
# # Extract vocab from the tokenizer object's model (Unigram model)
# # The model has a vocab attribute that's a list of (token, score) tuples
# model = tokenizer_object.model
# if hasattr(model, "vocab"):
# vocab = model.vocab
# else:
# # Fallback: get vocab from tokenizer's get_vocab() method
# vocab_dict = tokenizer_object.get_vocab()
# vocab = [(token, 0.0) for token in vocab_dict.keys()]
# tokenizer = UdopTokenizer(vocab=vocab)
# tokenizer.save_pretrained(cls.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00e9d,running"
output_text = "unwanted, running"
return input_text, output_text
def convert_batch_encode_plus_format_to_encode_plus(self, batch_encode_plus_sequences):
"""Helper method to convert batch_encode_plus output to list of encode_plus outputs"""
# Get the batch size
first_key = list(batch_encode_plus_sequences.keys())[0]
batch_size = len(batch_encode_plus_sequences[first_key])
# Convert to list of dicts
encode_plus_sequences = []
for i in range(batch_size):
single_sequence = {}
for key, value in batch_encode_plus_sequences.items():
if key != "encodings": # Skip the encodings attribute
single_sequence[key] = value[i]
encode_plus_sequences.append(single_sequence)
return encode_plus_sequences
def _check_no_pad_token_padding(self, tokenizer, sequences):
"""Override to handle UdopTokenizer's requirement for boxes parameter"""
# if tokenizer does not have pad_token_id, an error should be thrown
if tokenizer.pad_token_id is None:
with self.assertRaises(ValueError):
# For UdopTokenizer, we need boxes, so create dummy boxes
if isinstance(sequences, list) and sequences and isinstance(sequences[0], list):
# Batch of sequences
boxes = [[[0, 0, 0, 0] for _ in seq] for seq in sequences]
tokenizer(sequences, boxes=boxes, padding="longest")
elif isinstance(sequences, list):
# Single sequence (list of words)
boxes = [[0, 0, 0, 0] for _ in sequences]
tokenizer(sequences, boxes=boxes, padding=True)
else:
# Single string (shouldn't happen for Udop, but handle it)
tokenizer(sequences, padding=True)
# add pad_token_id to pass subsequent tests
tokenizer.add_special_tokens({"pad_token": "<PAD>"})
# override test in `test_tokenization_common.py` because of the required input format of the `__call__`` method of
# this tokenizer
def test_save_sentencepiece_tokenizer(self) -> None:
if not self.test_sentencepiece:
self.skipTest(reason="test_sentencepiece is set to False")
# We want to verify that we will be able to save the tokenizer even if the original files that were used to
# build the tokenizer have been deleted in the meantime.
words, boxes = self.get_words_and_boxes()
tokenizer_slow_1 = self.get_tokenizer()
encoding_tokenizer_slow_1 = tokenizer_slow_1(
words,
boxes=boxes,
)
tmpdirname_1 = tempfile.mkdtemp()
tmpdirname_2 = tempfile.mkdtemp()
tokenizer_slow_1.save_pretrained(tmpdirname_1)
tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1)
encoding_tokenizer_slow_2 = tokenizer_slow_2(
words,
boxes=boxes,
)
shutil.rmtree(tmpdirname_1)
tokenizer_slow_2.save_pretrained(tmpdirname_2)
tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2)
encoding_tokenizer_slow_3 = tokenizer_slow_3(
words,
boxes=boxes,
)
shutil.rmtree(tmpdirname_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2)
self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("microsoft/udop-large")
question, words, boxes = self.get_question_words_and_boxes()
text = tokenizer.encode_boxes(
question.split(),
boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],
add_special_tokens=False,
)
text_2 = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == text + [1] + text_2 + [1]
def test_pad_token_initialization(self):
"""Test that passing pad_token when creating a tokenizer works correctly."""
# UdopTokenizer requires pretokenized input with boxes
tokenizer = self.get_tokenizer(pad_token="[PAD]")
# Verify the pad_token was set correctly
self.assertEqual(tokenizer.pad_token, "[PAD]")
self.assertIsNotNone(tokenizer.pad_token_id)
# Test with two sequences of different lengths to trigger padding
seq_0 = ["Test", "this", "method"]
seq_1 = ["With", "these", "inputs", "and", "some", "extra"]
boxes_0 = [[1, 2, 3, 4] for _ in seq_0]
boxes_1 = [[1, 2, 3, 4] for _ in seq_1]
# Test padding works with the custom pad_token
output_with_padding = tokenizer(
[seq_0, seq_1],
boxes=[boxes_0, boxes_1],
padding=True,
)
# Check padding was applied correctly
self.assertEqual(len(output_with_padding["input_ids"][0]), len(output_with_padding["input_ids"][1]))
def test_add_special_tokens(self):
tokenizers: list[UdopTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode_boxes(
[special_token], boxes=[special_token_box], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode_boxes(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@parameterized.expand([(True,), (False,)])
def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus_boxes(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus_boxes(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus_boxes(
words,
boxes=boxes,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
# Test right padding
tokenizer_kwargs_right = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "right"
else:
tokenizer_kwargs_right["padding_side"] = "right"
right_padded_sequence = tokenizer.encode_plus_boxes(words, boxes=boxes, **tokenizer_kwargs_right)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
# Test left padding
tokenizer_kwargs_left = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "left"
else:
tokenizer_kwargs_left["padding_side"] = "left"
left_padded_sequence = tokenizer.encode_plus_boxes(words, boxes=boxes, **tokenizer_kwargs_left)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = "a weirdly test hello"
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus_boxes(words, boxes=boxes, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
sequences = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
# test 2: two sequences
question, words, boxes = self.get_question_words_and_boxes()
sequences = tokenizer.encode_boxes(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode_boxes(question, words, boxes=boxes, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer = self.get_tokenizer(pretrained_name, **kwargs)
pad_token_id = tokenizer.pad_token_id
# Encode - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer.encode_boxes(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer.encode_boxes(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer.encode_boxes(words, boxes=boxes, padding="longest")
input_p = tokenizer.encode_boxes(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer.encode_boxes(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer.encode_boxes(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer.encode_boxes(question, words, boxes=boxes, padding=True)
input_p = tokenizer.encode_boxes(question, words, boxes=boxes, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
words, boxes = self.get_words_and_boxes()
input_r = tokenizer.encode_plus_boxes(words, boxes=boxes, max_length=max_length, padding="max_length")
input_p = tokenizer.encode_plus_boxes(words, boxes=boxes, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer.encode_plus_boxes(words, boxes=boxes, padding="longest")
input_p = tokenizer.encode_plus_boxes(words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
question, words, boxes = self.get_question_words_and_boxes()
input_r = tokenizer.encode_plus_boxes(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
input_p = tokenizer.encode_plus_boxes(
question, words, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer.encode_plus_boxes(question, words, boxes=boxes, padding="longest")
input_p = tokenizer.encode_plus_boxes(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=max_length,
padding="longest",
)
input_p = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer.batch_encode_plus_boxes(words, boxes=boxes, padding="longest")
input_p = tokenizer.batch_encode_plus_boxes(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
questions, words, boxes = self.get_question_words_and_boxes_batch()
input_r = tokenizer.batch_encode_plus_boxes(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer.batch_encode_plus_boxes(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer.batch_encode_plus_boxes(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding=True,
)
input_p = tokenizer.batch_encode_plus_boxes(
list(zip(questions, words)),
is_pair=True,
boxes=boxes,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
words, boxes = self.get_words_and_boxes()
input_r = tokenizer.encode_plus_boxes(words, boxes=boxes)
input_r = tokenizer.pad(input_r)
input_p = tokenizer.encode_plus_boxes(words, boxes=boxes)
input_p = tokenizer.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer.encode_plus_boxes(words, boxes=boxes)
input_r = tokenizer.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer.encode_plus_boxes(words, boxes=boxes)
input_p = tokenizer.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
)
input_r = tokenizer.pad(input_r)
input_p = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
)
input_p = tokenizer.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
words, boxes = self.get_words_and_boxes_batch()
input_r = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
)
input_r = tokenizer.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
)
input_p = tokenizer.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Test not batched
words, boxes = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus_boxes(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
question, words, boxes = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus_boxes(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus_boxes(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
encoded_sequences = [
tokenizer.encode_plus_boxes(words_example, boxes=boxes_example)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, padding=False
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [
tokenizer.encode_plus_boxes(
words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus_boxes(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
words, boxes = self.get_words_and_boxes_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [
tokenizer.encode_plus_boxes(
words_example, boxes=boxes_example, max_length=max_length, padding="max_length"
)
for words_example, boxes_example in zip(words, boxes)
]
encoded_sequences_batch = tokenizer.batch_encode_plus_boxes(
words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
else:
words, boxes = self.get_words_and_boxes()
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
words,
boxes=boxes,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus_boxes(
words,
boxes=boxes,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
# Testing single inputs
encoded_sequence = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus_boxes(
words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
words, boxes = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode_boxes(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
@unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self):
pass
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode_boxes(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode_boxes(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode_boxes(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode_boxes(
words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode_boxes(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode_boxes(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode_boxes(words, boxes=boxes, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode_boxes(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode_boxes(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
words, boxes = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + words)
question, words, boxes = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus_boxes(
text,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus_boxes(
text,
pair,
boxes=boxes,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template(self):
pass
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_return_assistant_tokens_mask(self):
pass
@unittest.skip("Chat is not supported")
def test_chat_template_return_assistant_tokens_mask_truncated(self):
pass
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self):
pass
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
words, boxes = self.get_words_and_boxes()
# tokenize()
no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode_boxes(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_boxes(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus_boxes(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus_boxes(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens:
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
words, boxes = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus_boxes(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus_boxes(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens:
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_udop_truncation_integration_test(self):
words, boxes = self.get_words_and_boxes()
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode_boxes(words, boxes=boxes, max_length=i, truncation=True)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode_boxes(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode_boxes(words, boxes=boxes, truncation=True)
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.get_tokenizer(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
words = "Hey this is a <special> token".split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode_boxes(words, boxes=boxes)
special_token_id = tokenizer_r.encode_boxes(
["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False
)[0]
self.assertTrue(special_token_id in r_output)
def test_training_new_tokenizer(self):
tokenizer = self.get_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
text = [["this", "is", "the"], ["how", "are", "you"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is the"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
# max_len_single_sentence = model_max_length - num_special_tokens_to_add(pair=False)
self.assertEqual(
tokenizer.model_max_length - tokenizer.num_special_tokens_to_add(pair=False),
new_tokenizer.model_max_length - new_tokenizer.num_special_tokens_to_add(pair=False),
)
# max_len_sentences_pair = model_max_length - num_special_tokens_to_add(pair=True)
self.assertEqual(
tokenizer.model_max_length - tokenizer.num_special_tokens_to_add(pair=True),
new_tokenizer.model_max_length - new_tokenizer.num_special_tokens_to_add(pair=True),
)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens,
new_tokenizer.all_special_tokens,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
tokenizer = self.get_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens}",
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens)
# Test we can use the new tokenizer with something not seen during training
words = [["this", "is"], ["hello", "🤗"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "this is"
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
# only test prepare_for_model for the slow tokenizer
if tokenizer.__class__.__name__ == "UdopTokenizer":
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model_boxes(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus_boxes(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
returned_tensor = "pt"
# Single example
words, boxes = self.get_words_and_boxes()
tokens = tokenizer.encode_plus_boxes(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
words, boxes = self.get_words_and_boxes_batch()
tokens = tokenizer.batch_encode_plus_boxes(
words,
boxes=boxes,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if key != "bbox":
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip(reason="TO DO: overwrite this very extensive test.")
def test_alignment_methods(self):
pass
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
pass
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip(reason="UDOP tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip(reason="UDOP fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@unittest.skip(reason="UDOP tokenizer requires pre-tokenized words, not strings.")
def test_bos_token_with_add_bos_token_false(self):
pass
@unittest.skip(reason="UDOP tokenizer requires pre-tokenized words, not strings.")
def test_bos_token_with_add_bos_token_true(self):
pass
@slow
def test_only_label_first_subword(self):
words = ["hello", "niels"]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large")
encoding = tokenizer(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [0, 1, -100, -100, -100])
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large", only_label_first_subword=False)
encoding = tokenizer(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [0, 1, 1, 1, -100])
@slow
def test_udop_integration_test(self):
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only words and normalized bounding boxes are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
words, boxes = self.get_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [3, 9, 10088, 120, 794, 21820, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [961, 885, 992, 912], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding = tokenizer(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding), expected_results)
# CASE 1: batched
words, boxes = self.get_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[3, 9, 10088, 120, 794, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [21820, 82, 564, 19, 3, 17396, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding = tokenizer(words, boxes=boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding), expected_results)
# CASE 2: not batched
words, boxes = self.get_words_and_boxes()
word_labels = [1, 2, 3, 4]
# fmt: off
expected_results = {'input_ids': [3, 9, 10088, 120, 794, 21820, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [961, 885, 992, 912], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [1, -100, 2, -100, 3, 4, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding = tokenizer(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding), expected_results)
# CASE 2: batched
words, boxes = self.get_words_and_boxes_batch()
word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
# fmt: off
expected_results = {'input_ids': [[3, 9, 10088, 120, 794, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [21820, 82, 564, 19, 3, 17396, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'bbox': [[[423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[1, -100, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [2, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding_p = tokenizer(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
# CASE 3: not batched
question, words, boxes = self.get_question_words_and_boxes()
# fmt: off
expected_results = {'input_ids': [125, 31, 7, 112, 564, 58, 1, 3, 9, 10088, 120, 794, 1, 0, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]} # noqa: E231
# fmt: on
encoding = tokenizer(question, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding), expected_results)
# CASE 3: batched
questions, words, boxes = self.get_question_words_and_boxes_batch()
# fmt: off
expected_results = {'input_ids': [[125, 31, 7, 112, 564, 58, 1, 3, 9, 10088, 120, 794, 1, 0, 0, 0, 0, 0, 0, 0], [149, 19, 3, 88, 718, 58, 1, 125, 3, 9, 50, 99, 1807, 17, 29, 1, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]} # noqa: E231
# fmt: on
encoding = tokenizer(questions, words, boxes, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding), expected_results)
@unittest.skip(reason="Doesn't support returning Numpy arrays")
def test_np_encode_plus_sent_to_model(self):
pass
@unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
pass
@unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_decode(self):
pass
def test_text_target(self):
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large")
text = "hello world"
expected_decoding = "hello world</s>"
# should raise an error if we don't provide it using the `text_target` argument
with self.assertRaises(ValueError):
tokenizer(text)
encoding = tokenizer(text_target=text)
self.assertListEqual(encoding["input_ids"], [21820, 296, 1])
self.assertListEqual(encoding["attention_mask"], [1, 1, 1])
self.assertEqual(tokenizer.decode(encoding["input_ids"]), expected_decoding)
def test_special_tokens(self):
tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large")
# encode
text = "paragraph<loc_58>. Hey"
encoding = tokenizer.encode(text)
assert encoding == [8986, 32942, 3, 5, 9459, 1]
# decode
ids = [0, 8986, 32942, 32966, 32554, 32551, 1]
decoding = tokenizer.decode(ids)
excepted_decoding = "<pad> paragraph<loc_58><loc_34><loc_446><loc_449></s>"
assert decoding == excepted_decoding
def test_split_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
special_token = "<my_new_token>"
special_sentence = f"Hey this is a {special_token} token"
_, _, boxes = self.get_question_words_and_boxes()
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_rust = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=[special_token], split_special_tokens=True, **kwargs
)
tokenizer_py = self.tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=[special_token], split_special_tokens=True, **kwargs
)
special_token_id = tokenizer_py.convert_tokens_to_ids(special_token)
encoded_special_token_unsplit = tokenizer_py.encode(
special_token, add_special_tokens=False, split_special_tokens=False
)
self.assertTrue(special_token_id in encoded_special_token_unsplit)
encoded_special_token_split = tokenizer_py.encode(special_token, add_special_tokens=False)
self.assertTrue(special_token_id not in encoded_special_token_split)
py_tokens_output = tokenizer_py.tokenize(special_sentence)
rust_tokens_output = tokenizer_rust.tokenize(special_sentence)
self.assertTrue(special_token not in py_tokens_output)
self.assertTrue(special_token not in rust_tokens_output)
py_tokens_output_unsplit = tokenizer_py.tokenize(special_sentence, split_special_tokens=False)
rust_tokens_output_unsplit = tokenizer_rust.tokenize(special_sentence, split_special_tokens=False)
self.assertTrue(special_token in py_tokens_output_unsplit)
self.assertTrue(special_token in rust_tokens_output_unsplit)
tmpdirname = tempfile.mkdtemp()
tokenizer_py.save_pretrained(tmpdirname)
fast_from_saved = self.tokenizer_class.from_pretrained(tmpdirname)
output_tokens_reloaded_split = fast_from_saved.tokenize(special_sentence)
self.assertTrue(special_token not in output_tokens_reloaded_split)
output_tokens_reloaded_unsplit = fast_from_saved.tokenize(special_sentence, split_special_tokens=False)
self.assertTrue(special_token in output_tokens_reloaded_unsplit)
def test_empty_input_string(self):
tokenizer_return_type = []
output_tensor_type = []
if is_torch_available():
import numpy as np
import torch
tokenizer_return_type.append("pt")
output_tensor_type.append(torch.int64)
tokenizer_return_type.append("np")
output_tensor_type.append(np.int64)
if is_mlx_available():
import mlx.core as mx
tokenizer_return_type.append("mlx")
output_tensor_type.append(mx.int32)
if len(tokenizer_return_type) == 0:
self.skipTest(reason="No expected framework from PT, or MLX found")
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
words, boxes = self.get_empty_words_and_boxes()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(words, boxes=boxes, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
question, words, boxes = self.get_empty_question_words_and_boxes()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(words, boxes=boxes, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
words, boxes = self.get_empty_words_and_boxes_batch()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(words, boxes=boxes, padding=True, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
question, words, boxes = self.get_empty_question_words_and_boxes_batch()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(words, boxes=boxes, padding=True, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
| UdopTokenizationTest |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_table_row_count_to_be_between.py | {
"start": 2728,
"end": 15973
} | class ____(BatchExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectTableRowCountToBeBetween is a \
Batch Expectation.
BatchExpectations are one of the most common types of Expectation.
They are evaluated for an entire Batch, and answer a semantic question about the Batch itself.
Args:
min_value (int or None): \
{MIN_VALUE_DESCRIPTION}
max_value (int or None): \
{MAX_VALUE_DESCRIPTION}
strict_min (boolean): \
{STRICT_MIN_DESCRIPTION}
strict_max (boolean): \
{STRICT_MAX_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
[ExpectTableRowCountToEqual](https://greatexpectations.io/expectations/expect_table_row_count_to_equal)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1.00 2
1 2.30 5
2 4.33 0
Code Examples:
Passing Case:
Input:
ExpectTableRowCountToBeBetween(
min_value=1,
max_value=4
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 3
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectTableRowCountToBeBetween(
max_value=2
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 3
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
min_value: Union[int, SuiteParameterDict, datetime, None] = pydantic.Field(
default=None, description=MIN_VALUE_DESCRIPTION
)
max_value: Union[int, SuiteParameterDict, datetime, None] = pydantic.Field(
default=None, description=MAX_VALUE_DESCRIPTION
)
strict_min: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MAX_DESCRIPTION
)
strict_max: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MIN_DESCRIPTION
)
row_condition: RowConditionType = None
condition_parser: Union[ConditionParser, None] = None
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "table expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("table.row_count",)
domain_keys: ClassVar[Tuple[str, ...]] = ("row_condition", "condition_parser")
success_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
args_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
class Config:
title = "Expect table row count to be between"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectTableRowCountToBeBetween]
) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@pydantic.root_validator
def _root_validate(cls, values: dict) -> dict:
min_value = values.get("min_value")
max_value = values.get("max_value")
if (
min_value is not None
and max_value is not None
and not isinstance(min_value, dict)
and not isinstance(max_value, dict)
and min_value > max_value
):
raise ValueError( # noqa: TRY003 # Error message gets swallowed by Pydantic
f"min_value ({min_value}) must be less than or equal to max_value ({max_value})"
)
if isinstance(min_value, dict) and "$PARAMETER" not in min_value:
raise ValueError( # noqa: TRY003 # Error message gets swallowed by Pydantic
"min_value dict must contain key $PARAMETER"
)
if isinstance(max_value, dict) and "$PARAMETER" not in max_value:
raise ValueError( # noqa: TRY003 # Error message gets swallowed by Pydantic
"max_value dict must contain key $PARAMETER"
)
return values
@classmethod
@override
def _prescriptive_template(
cls, renderer_configuration: RendererConfiguration
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.min_value and not params.max_value:
template_str = "May have any number of rows."
else:
at_least_str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if params.min_value and params.max_value:
template_str = (
f"Must have {at_least_str} $min_value and {at_most_str} $max_value rows."
)
elif not params.min_value:
template_str = f"Must have {at_most_str} $max_value rows."
else:
template_str = f"Must have {at_least_str} $min_value rows."
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
@override
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> list[RenderedStringTemplateContent]:
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs, # type: ignore[union-attr] # FIXME CoP
[
"min_value",
"max_value",
"strict_min",
"strict_max",
],
)
if params["min_value"] is None and params["max_value"] is None:
template_str = "May have any number of rows."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = (
f"Must have {at_least_str} $min_value and {at_most_str} $max_value rows."
)
elif params["min_value"] is None:
template_str = f"Must have {at_most_str} $max_value rows."
elif params["max_value"] is None:
template_str = f"Must have {at_least_str} $min_value rows."
else:
raise ValueError("unresolvable template_str") # noqa: TRY003 # FIXME CoP
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="table.row_count",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectTableRowCountToBeBetween |
python | jupyterlab__jupyterlab | jupyterlab/semver.py | {
"start": 1677,
"end": 9495
} | class ____(list):
def __setitem__(self, i, v):
try:
list.__setitem__(self, i, v)
except IndexError:
if len(self) == i:
self.append(v)
else:
raise
def list_get(xs, i):
try:
return xs[i]
except IndexError:
return None
R = _R(0)
src = Extendlist()
regexp = {}
# The following Regular Expressions can be used for tokenizing,
# validating, and parsing SemVer version strings.
# ## Numeric Identifier
# A single `0`, or a non-zero digit followed by zero or more digits.
NUMERICIDENTIFIER = R()
src[NUMERICIDENTIFIER] = "0|[1-9]\\d*"
NUMERICIDENTIFIERLOOSE = R()
src[NUMERICIDENTIFIERLOOSE] = "[0-9]+"
# ## Non-numeric Identifier
# Zero or more digits, followed by a letter or hyphen, and then zero or
# more letters, digits, or hyphens.
NONNUMERICIDENTIFIER = R()
src[NONNUMERICIDENTIFIER] = "\\d*[a-zA-Z-][a-zA-Z0-9-]*"
# ## Main Version
# Three dot-separated numeric identifiers.
MAINVERSION = R()
src[MAINVERSION] = (
"("
+ src[NUMERICIDENTIFIER]
+ ")\\."
+ "("
+ src[NUMERICIDENTIFIER]
+ ")\\."
+ "("
+ src[NUMERICIDENTIFIER]
+ ")"
)
MAINVERSIONLOOSE = R()
src[MAINVERSIONLOOSE] = (
"("
+ src[NUMERICIDENTIFIERLOOSE]
+ ")\\."
+ "("
+ src[NUMERICIDENTIFIERLOOSE]
+ ")\\."
+ "("
+ src[NUMERICIDENTIFIERLOOSE]
+ ")"
)
# ## Pre-release Version Identifier
# A numeric identifier, or a non-numeric identifier.
PRERELEASEIDENTIFIER = R()
src[PRERELEASEIDENTIFIER] = "(?:" + src[NUMERICIDENTIFIER] + "|" + src[NONNUMERICIDENTIFIER] + ")"
PRERELEASEIDENTIFIERLOOSE = R()
src[PRERELEASEIDENTIFIERLOOSE] = (
"(?:" + src[NUMERICIDENTIFIERLOOSE] + "|" + src[NONNUMERICIDENTIFIER] + ")"
)
# ## Pre-release Version
# Hyphen, followed by one or more dot-separated pre-release version
# identifiers.
PRERELEASE = R()
src[PRERELEASE] = (
"(?:-(" + src[PRERELEASEIDENTIFIER] + "(?:\\." + src[PRERELEASEIDENTIFIER] + ")*))"
)
PRERELEASELOOSE = R()
src[PRERELEASELOOSE] = (
"(?:-?(" + src[PRERELEASEIDENTIFIERLOOSE] + "(?:\\." + src[PRERELEASEIDENTIFIERLOOSE] + ")*))"
)
# ## Build Metadata Identifier
# Any combination of digits, letters, or hyphens.
BUILDIDENTIFIER = R()
src[BUILDIDENTIFIER] = "[0-9A-Za-z-]+"
# ## Build Metadata
# Plus sign, followed by one or more period-separated build metadata
# identifiers.
BUILD = R()
src[BUILD] = "(?:\\+(" + src[BUILDIDENTIFIER] + "(?:\\." + src[BUILDIDENTIFIER] + ")*))"
# ## Full Version String
# A main version, followed optionally by a pre-release version and
# build metadata.
# Note that the only major, minor, patch, and pre-release sections of
# the version string are capturing groups. The build metadata is not a
# capturing group, because it should not ever be used in version
# comparison.
FULL = R()
FULLPLAIN = "v?" + src[MAINVERSION] + src[PRERELEASE] + "?" + src[BUILD] + "?"
src[FULL] = "^" + FULLPLAIN + "$"
# like full, but allows v1.2.3 and =1.2.3, which people do sometimes.
# also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty
# common in the npm registry.
LOOSEPLAIN = "[v=\\s]*" + src[MAINVERSIONLOOSE] + src[PRERELEASELOOSE] + "?" + src[BUILD] + "?"
LOOSE = R()
src[LOOSE] = "^" + LOOSEPLAIN + "$"
GTLT = R()
src[GTLT] = "((?:<|>)?=?)"
# Something like "2.*" or "1.2.x".
# Note that "x.x" is a valid xRange identifier, meaning "any version"
# Only the first item is strictly required.
XRANGEIDENTIFIERLOOSE = R()
src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + "|x|X|\\*"
XRANGEIDENTIFIER = R()
src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + "|x|X|\\*"
XRANGEPLAIN = R()
src[XRANGEPLAIN] = (
"[v=\\s]*("
+ src[XRANGEIDENTIFIER]
+ ")"
+ "(?:\\.("
+ src[XRANGEIDENTIFIER]
+ ")"
+ "(?:\\.("
+ src[XRANGEIDENTIFIER]
+ ")"
+ "(?:"
+ src[PRERELEASE]
+ ")?"
+ src[BUILD]
+ "?"
+ ")?)?"
)
XRANGEPLAINLOOSE = R()
src[XRANGEPLAINLOOSE] = (
"[v=\\s]*("
+ src[XRANGEIDENTIFIERLOOSE]
+ ")"
+ "(?:\\.("
+ src[XRANGEIDENTIFIERLOOSE]
+ ")"
+ "(?:\\.("
+ src[XRANGEIDENTIFIERLOOSE]
+ ")"
+ "(?:"
+ src[PRERELEASELOOSE]
+ ")?"
+ src[BUILD]
+ "?"
+ ")?)?"
)
XRANGE = R()
src[XRANGE] = "^" + src[GTLT] + "\\s*" + src[XRANGEPLAIN] + "$"
XRANGELOOSE = R()
src[XRANGELOOSE] = "^" + src[GTLT] + "\\s*" + src[XRANGEPLAINLOOSE] + "$"
# Tilde ranges.
# Meaning is "reasonably at or greater than"
LONETILDE = R()
src[LONETILDE] = "(?:~>?)"
TILDETRIM = R()
src[TILDETRIM] = "(\\s*)" + src[LONETILDE] + "\\s+"
regexp[TILDETRIM] = re.compile(src[TILDETRIM], re.M)
tildeTrimReplace = r"\1~"
TILDE = R()
src[TILDE] = "^" + src[LONETILDE] + src[XRANGEPLAIN] + "$"
TILDELOOSE = R()
src[TILDELOOSE] = "^" + src[LONETILDE] + src[XRANGEPLAINLOOSE] + "$"
# Caret ranges.
# Meaning is "at least and backwards compatible with"
LONECARET = R()
src[LONECARET] = "(?:\\^)"
CARETTRIM = R()
src[CARETTRIM] = "(\\s*)" + src[LONECARET] + "\\s+"
regexp[CARETTRIM] = re.compile(src[CARETTRIM], re.M)
caretTrimReplace = r"\1^"
CARET = R()
src[CARET] = "^" + src[LONECARET] + src[XRANGEPLAIN] + "$"
CARETLOOSE = R()
src[CARETLOOSE] = "^" + src[LONECARET] + src[XRANGEPLAINLOOSE] + "$"
# A simple gt/lt/eq thing, or just "" to indicate "any version"
COMPARATORLOOSE = R()
src[COMPARATORLOOSE] = "^" + src[GTLT] + "\\s*(" + LOOSEPLAIN + ")$|^$"
COMPARATOR = R()
src[COMPARATOR] = "^" + src[GTLT] + "\\s*(" + FULLPLAIN + ")$|^$"
# An expression to strip any whitespace between the gtlt and the thing
# it modifies, so that `> 1.2.3` ==> `>1.2.3`
COMPARATORTRIM = R()
src[COMPARATORTRIM] = "(\\s*)" + src[GTLT] + "\\s*(" + LOOSEPLAIN + "|" + src[XRANGEPLAIN] + ")"
# this one has to use the /g flag
regexp[COMPARATORTRIM] = re.compile(src[COMPARATORTRIM], re.M)
comparatorTrimReplace = r"\1\2\3"
# Something like `1.2.3 - 1.2.4`
# Note that these all use the loose form, because they'll be
# checked against either the strict or loose comparator form
# later.
HYPHENRANGE = R()
src[HYPHENRANGE] = (
"^\\s*(" + src[XRANGEPLAIN] + ")" + "\\s+-\\s+" + "(" + src[XRANGEPLAIN] + ")" + "\\s*$"
)
HYPHENRANGELOOSE = R()
src[HYPHENRANGELOOSE] = (
"^\\s*("
+ src[XRANGEPLAINLOOSE]
+ ")"
+ "\\s+-\\s+"
+ "("
+ src[XRANGEPLAINLOOSE]
+ ")"
+ "\\s*$"
)
# Star ranges basically just allow anything at all.
STAR = R()
src[STAR] = "(<|>)?=?\\s*\\*"
# version name recovery for convenient
RECOVERYVERSIONNAME = R()
_n = src[NUMERICIDENTIFIER]
_pre = src[PRERELEASELOOSE]
src[RECOVERYVERSIONNAME] = f"v?({_n})(?:\\.({_n}))?{_pre}?"
# Compile to actual regexp objects.
# All are flag-free, unless they were created above with a flag.
for i in range(R.value()):
logger.debug("genregxp %s %s", i, src[i])
if i not in regexp:
regexp[i] = re.compile(src[i])
def parse(version, loose):
r = regexp[LOOSE] if loose else regexp[FULL]
m = r.search(version)
if m:
return semver(version, loose)
else:
return None
def valid(version, loose):
v = parse(version, loose)
if v.version:
return v
else:
return None
def clean(version, loose):
s = parse(version, loose)
if s:
return s.version
else:
return None
NUMERIC = re.compile(r"^\d+$")
def semver(version, loose):
if isinstance(version, SemVer):
if version.loose == loose:
return version
else:
version = version.version
elif not isinstance(version, string_type): # xxx:
raise ValueError(f"Invalid Version: {version}")
"""
if (!(this instanceof SemVer))
return new SemVer(version, loose);
"""
return SemVer(version, loose)
make_semver = semver
| Extendlist |
python | huggingface__transformers | examples/modular-transformers/modeling_roberta.py | {
"start": 1587,
"end": 5723
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, config.pad_token_id
)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
self.pad_token_id = config.pad_token_id
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None and attention_mask.ndim == 4:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| RobertaEmbeddings |
python | realpython__materials | python-getter-setter/employee4.py | {
"start": 28,
"end": 302
} | class ____:
def __set_name__(self, owner, name):
self._name = name
def __get__(self, instance, owner):
return instance.__dict__[self._name]
def __set__(self, instance, value):
instance.__dict__[self._name] = date.fromisoformat(value)
| Date |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/knowledge_graph/retrievers.py | {
"start": 1315,
"end": 2075
} | class ____(str, Enum):
"""
Query mode enum for Knowledge Graphs.
Can be passed as the enum struct, or as the underlying string.
Attributes:
KEYWORD ("keyword"): Default query mode, using keywords to find triplets.
EMBEDDING ("embedding"): Embedding mode, using embeddings to find
similar triplets.
HYBRID ("hybrid"): Hybrid mode, combining both keywords and embeddings
to find relevant triplets.
"""
KEYWORD = "keyword"
EMBEDDING = "embedding"
HYBRID = "hybrid"
@deprecated.deprecated(
version="0.10.53",
reason=(
"KGTableRetriever is deprecated, it is recommended to use "
"PropertyGraphIndex and associated retrievers instead."
),
)
| KGRetrieverMode |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-score-subarray-queries.py | {
"start": 42,
"end": 723
} | class ____(object):
def maximumSubarrayXor(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
dp = [[nums[i] if j == 0 else 0 for j in xrange(len(nums)-i)] for i in xrange(len(nums))]
for i in reversed(xrange(len(nums))):
for l in xrange(1, len(nums)-i):
dp[i][l] = dp[i][l-1]^dp[i+1][l-1]
for i in reversed(xrange(len(nums))):
for l in xrange(1, len(nums)-i):
dp[i][l] = max(dp[i][l], dp[i][l-1], dp[i+1][l-1])
return [dp[i][j-i] for i, j in queries]
# Time: O(n^2 + q)
# Space: O(n^2)
# dp
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/distributions/util.py | {
"start": 53782,
"end": 55725
} | class ____:
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError("Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
| AppendDocstring |
python | tensorflow__tensorflow | tensorflow/python/types/distribute.py | {
"start": 12650,
"end": 21596
} | class ____(Iterable):
# pylint: disable=line-too-long
"""Represents a dataset distributed among devices and machines.
A `tf.distribute.DistributedDataset` could be thought of as a "distributed"
dataset. When you use `tf.distribute` API to scale training to multiple
devices or machines, you also need to distribute the input data, which leads
to a `tf.distribute.DistributedDataset` instance, instead of a
`tf.data.Dataset` instance in the non-distributed case. In TF 2.x,
`tf.distribute.DistributedDataset` objects are Python iterables.
Note: `tf.distribute.DistributedDataset` instances are *not* of type
`tf.data.Dataset`. It only supports two usages we will mention below:
iteration and `element_spec`. We don't support any other APIs to transform or
inspect the dataset.
There are two APIs to create a `tf.distribute.DistributedDataset` object:
`tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and
`tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`.
*When to use which?* When you have a `tf.data.Dataset` instance, and the
regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance
with a new batch size that is equal to the global batch size divided by the
number of replicas in sync) and autosharding (i.e. the
`tf.data.experimental.AutoShardPolicy` options) work for you, use the former
API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance,
or you would like to customize the batch splitting or sharding, you can wrap
these logic in a `dataset_fn` and use the latter API. Both API handles
prefetch to device for the user. For more details and examples, follow the
links to the APIs.
There are two main usages of a `DistributedDataset` object:
1. Iterate over it to generate the input for a single device or multiple
devices, which is a `tf.distribute.DistributedValues` instance. To do this,
you can:
* use a pythonic for-loop construct:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> @tf.function
... def train_step(input):
... features, labels = input
... return labels - 0.3 * features
>>> for x in dist_dataset:
... # train_step trains the model using the dataset elements
... loss = strategy.run(train_step, args=(x,))
... print("Loss is", loss)
Loss is PerReplica:{
0: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[0.7]
[0.7]], shape=(2, 1), dtype=float32)
}
Placing the loop inside a `tf.function` will give a performance boost.
However `break` and `return` are currently not supported if the loop is
placed inside a `tf.function`. We also don't support placing the loop
inside a `tf.function` when using
`tf.distribute.experimental.MultiWorkerMirroredStrategy` or
`tf.distribute.experimental.TPUStrategy` with multiple workers.
* use `__iter__` to create an explicit iterator, which is of type
`tf.distribute.DistributedIterator`
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size)
>>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
>>> @tf.function
... def distributed_train_step(dataset_inputs):
... def train_step(input):
... loss = tf.constant(0.1)
... return loss
... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None)
>>> EPOCHS = 2
>>> STEPS = 3
>>> for epoch in range(EPOCHS):
... total_loss = 0.0
... num_batches = 0
... dist_dataset_iterator = iter(train_dist_dataset)
... for _ in range(STEPS):
... total_loss += distributed_train_step(next(dist_dataset_iterator))
... num_batches += 1
... average_train_loss = total_loss / num_batches
... template = ("Epoch {}, Loss: {:.4f}")
... print (template.format(epoch+1, average_train_loss))
Epoch 1, Loss: 0.2000
Epoch 2, Loss: 0.2000
To achieve a performance improvement, you can also wrap the `strategy.run`
call with a `tf.range` inside a `tf.function`. This runs multiple steps in a
`tf.function`. Autograph will convert it to a `tf.while_loop` on the worker.
However, it is less flexible comparing with running a single step inside
`tf.function`. For example, you cannot run things eagerly or arbitrary
python code within the steps.
2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`.
`tf.distribute.DistributedDataset` generates
`tf.distribute.DistributedValues` as input to the devices. If you pass the
input to a `tf.function` and would like to specify the shape and type of
each Tensor argument to the function, you can pass a `tf.TypeSpec` object to
the `input_signature` argument of the `tf.function`. To get the
`tf.TypeSpec` of the input, you can use the `element_spec` property of the
`tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator`
object.
For example:
>>> global_batch_size = 4
>>> epochs = 1
>>> steps_per_epoch = 1
>>> mirrored_strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size)
>>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
>>> @tf.function(input_signature=[dist_dataset.element_spec])
... def train_step(per_replica_inputs):
... def step_fn(inputs):
... return tf.square(inputs)
... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,))
>>> for _ in range(epochs):
... iterator = iter(dist_dataset)
... for _ in range(steps_per_epoch):
... output = train_step(next(iterator))
... print(output)
PerReplica:{
0: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32),
1: tf.Tensor(
[[4.]
[4.]], shape=(2, 1), dtype=float32)
}
Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input)
on distributed input for more examples and caveats.
"""
def __iter__(self):
"""Creates an iterator for the `tf.distribute.DistributedDataset`.
The returned iterator implements the Python Iterator protocol.
Example usage:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> print(next(distributed_iterator))
PerReplica:{
0: tf.Tensor([1 2], shape=(2,), dtype=int32),
1: tf.Tensor([3 4], shape=(2,), dtype=int32)
}
Returns:
An `tf.distribute.DistributedIterator` instance for the given
`tf.distribute.DistributedDataset` object to enumerate over the
distributed data.
"""
raise NotImplementedError("Must be implemented in descendants")
@property
def element_spec(self):
"""The type specification of an element of this `tf.distribute.DistributedDataset`.
Example usage:
>>> global_batch_size = 16
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size)
>>> dist_dataset = strategy.experimental_distribute_dataset(dataset)
>>> dist_dataset.element_spec
(PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)),
PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(None, 1), dtype=tf.int32, name=None)))
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this `tf.distribute.DistributedDataset`. This returned value is
typically a `tf.distribute.DistributedValues` object and specifies the
`tf.TensorSpec` of individual components.
"""
raise NotImplementedError(
"DistributedDataset.element_spec must be implemented in descendants.")
@doc_controls.do_not_generate_docs
def reduce(self, initial_state, reduce_func):
raise NotImplementedError(
"DistributedDataset.reduce must be implemented in descendants.")
| DistributedDatasetInterface |
python | Textualize__textual | docs/examples/widgets/vertical_rules.py | {
"start": 127,
"end": 917
} | class ____(App):
CSS_PATH = "vertical_rules.tcss"
def compose(self) -> ComposeResult:
with Horizontal():
yield Label("solid")
yield Rule(orientation="vertical")
yield Label("heavy")
yield Rule(orientation="vertical", line_style="heavy")
yield Label("thick")
yield Rule(orientation="vertical", line_style="thick")
yield Label("dashed")
yield Rule(orientation="vertical", line_style="dashed")
yield Label("double")
yield Rule(orientation="vertical", line_style="double")
yield Label("ascii")
yield Rule(orientation="vertical", line_style="ascii")
if __name__ == "__main__":
app = VerticalRulesApp()
app.run()
| VerticalRulesApp |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 40600,
"end": 53173
} | class ____:
def test_year(self):
result = list(
arrow.Arrow.span_range("year", datetime(2013, 2, 1), datetime(2016, 3, 31))
)
assert result == [
(
arrow.Arrow(2013, 1, 1),
arrow.Arrow(2013, 12, 31, 23, 59, 59, 999999),
),
(
arrow.Arrow(2014, 1, 1),
arrow.Arrow(2014, 12, 31, 23, 59, 59, 999999),
),
(
arrow.Arrow(2015, 1, 1),
arrow.Arrow(2015, 12, 31, 23, 59, 59, 999999),
),
(
arrow.Arrow(2016, 1, 1),
arrow.Arrow(2016, 12, 31, 23, 59, 59, 999999),
),
]
def test_quarter(self):
result = list(
arrow.Arrow.span_range(
"quarter", datetime(2013, 2, 2), datetime(2013, 5, 15)
)
)
assert result == [
(arrow.Arrow(2013, 1, 1), arrow.Arrow(2013, 3, 31, 23, 59, 59, 999999)),
(arrow.Arrow(2013, 4, 1), arrow.Arrow(2013, 6, 30, 23, 59, 59, 999999)),
]
def test_month(self):
result = list(
arrow.Arrow.span_range("month", datetime(2013, 1, 2), datetime(2013, 4, 15))
)
assert result == [
(arrow.Arrow(2013, 1, 1), arrow.Arrow(2013, 1, 31, 23, 59, 59, 999999)),
(arrow.Arrow(2013, 2, 1), arrow.Arrow(2013, 2, 28, 23, 59, 59, 999999)),
(arrow.Arrow(2013, 3, 1), arrow.Arrow(2013, 3, 31, 23, 59, 59, 999999)),
(arrow.Arrow(2013, 4, 1), arrow.Arrow(2013, 4, 30, 23, 59, 59, 999999)),
]
def test_week(self):
result = list(
arrow.Arrow.span_range("week", datetime(2013, 2, 2), datetime(2013, 2, 28))
)
assert result == [
(arrow.Arrow(2013, 1, 28), arrow.Arrow(2013, 2, 3, 23, 59, 59, 999999)),
(arrow.Arrow(2013, 2, 4), arrow.Arrow(2013, 2, 10, 23, 59, 59, 999999)),
(
arrow.Arrow(2013, 2, 11),
arrow.Arrow(2013, 2, 17, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 2, 18),
arrow.Arrow(2013, 2, 24, 23, 59, 59, 999999),
),
(arrow.Arrow(2013, 2, 25), arrow.Arrow(2013, 3, 3, 23, 59, 59, 999999)),
]
def test_day(self):
result = list(
arrow.Arrow.span_range(
"day", datetime(2013, 1, 1, 12), datetime(2013, 1, 4, 12)
)
)
assert result == [
(
arrow.Arrow(2013, 1, 1, 0),
arrow.Arrow(2013, 1, 1, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 2, 0),
arrow.Arrow(2013, 1, 2, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 3, 0),
arrow.Arrow(2013, 1, 3, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 4, 0),
arrow.Arrow(2013, 1, 4, 23, 59, 59, 999999),
),
]
def test_days(self):
result = list(
arrow.Arrow.span_range(
"days", datetime(2013, 1, 1, 12), datetime(2013, 1, 4, 12)
)
)
assert result == [
(
arrow.Arrow(2013, 1, 1, 0),
arrow.Arrow(2013, 1, 1, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 2, 0),
arrow.Arrow(2013, 1, 2, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 3, 0),
arrow.Arrow(2013, 1, 3, 23, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 4, 0),
arrow.Arrow(2013, 1, 4, 23, 59, 59, 999999),
),
]
def test_hour(self):
result = list(
arrow.Arrow.span_range(
"hour", datetime(2013, 1, 1, 0, 30), datetime(2013, 1, 1, 3, 30)
)
)
assert result == [
(
arrow.Arrow(2013, 1, 1, 0),
arrow.Arrow(2013, 1, 1, 0, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 1),
arrow.Arrow(2013, 1, 1, 1, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 2),
arrow.Arrow(2013, 1, 1, 2, 59, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 3),
arrow.Arrow(2013, 1, 1, 3, 59, 59, 999999),
),
]
result = list(
arrow.Arrow.span_range(
"hour", datetime(2013, 1, 1, 3, 30), datetime(2013, 1, 1, 3, 30)
)
)
assert result == [
(arrow.Arrow(2013, 1, 1, 3), arrow.Arrow(2013, 1, 1, 3, 59, 59, 999999))
]
def test_minute(self):
result = list(
arrow.Arrow.span_range(
"minute", datetime(2013, 1, 1, 0, 0, 30), datetime(2013, 1, 1, 0, 3, 30)
)
)
assert result == [
(
arrow.Arrow(2013, 1, 1, 0, 0),
arrow.Arrow(2013, 1, 1, 0, 0, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 1),
arrow.Arrow(2013, 1, 1, 0, 1, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 2),
arrow.Arrow(2013, 1, 1, 0, 2, 59, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 3),
arrow.Arrow(2013, 1, 1, 0, 3, 59, 999999),
),
]
def test_second(self):
result = list(
arrow.Arrow.span_range(
"second", datetime(2013, 1, 1), datetime(2013, 1, 1, 0, 0, 3)
)
)
assert result == [
(
arrow.Arrow(2013, 1, 1, 0, 0, 0),
arrow.Arrow(2013, 1, 1, 0, 0, 0, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 0, 1),
arrow.Arrow(2013, 1, 1, 0, 0, 1, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 0, 2),
arrow.Arrow(2013, 1, 1, 0, 0, 2, 999999),
),
(
arrow.Arrow(2013, 1, 1, 0, 0, 3),
arrow.Arrow(2013, 1, 1, 0, 0, 3, 999999),
),
]
def test_naive_tz(self):
tzinfo = ZoneInfo("US/Pacific")
result = arrow.Arrow.span_range(
"hour", datetime(2013, 1, 1, 0), datetime(2013, 1, 1, 3, 59), "US/Pacific"
)
for f, c in result:
assert f.tzinfo == tzinfo
assert c.tzinfo == tzinfo
def test_aware_same_tz(self):
tzinfo = ZoneInfo("US/Pacific")
result = arrow.Arrow.span_range(
"hour",
datetime(2013, 1, 1, 0, tzinfo=tzinfo),
datetime(2013, 1, 1, 2, 59, tzinfo=tzinfo),
)
for f, c in result:
assert f.tzinfo == tzinfo
assert c.tzinfo == tzinfo
def test_aware_different_tz(self):
tzinfo1 = ZoneInfo("US/Pacific")
tzinfo2 = ZoneInfo("US/Eastern")
result = arrow.Arrow.span_range(
"hour",
datetime(2013, 1, 1, 0, tzinfo=tzinfo1),
datetime(2013, 1, 1, 2, 59, tzinfo=tzinfo2),
)
for f, c in result:
assert f.tzinfo == tzinfo1
assert c.tzinfo == tzinfo1
def test_aware_tz(self):
result = arrow.Arrow.span_range(
"hour",
datetime(2013, 1, 1, 0, tzinfo=ZoneInfo("US/Eastern")),
datetime(2013, 1, 1, 2, 59, tzinfo=ZoneInfo("US/Eastern")),
tz="US/Central",
)
for f, c in result:
assert f.tzinfo == ZoneInfo("US/Central")
assert c.tzinfo == ZoneInfo("US/Central")
def test_bounds_param_is_passed(self):
result = list(
arrow.Arrow.span_range(
"quarter", datetime(2013, 2, 2), datetime(2013, 5, 15), bounds="[]"
)
)
assert result == [
(arrow.Arrow(2013, 1, 1), arrow.Arrow(2013, 4, 1)),
(arrow.Arrow(2013, 4, 1), arrow.Arrow(2013, 7, 1)),
]
def test_exact_bound_exclude(self):
result = list(
arrow.Arrow.span_range(
"hour",
datetime(2013, 5, 5, 12, 30),
datetime(2013, 5, 5, 17, 15),
bounds="[)",
exact=True,
)
)
expected = [
(
arrow.Arrow(2013, 5, 5, 12, 30),
arrow.Arrow(2013, 5, 5, 13, 29, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 13, 30),
arrow.Arrow(2013, 5, 5, 14, 29, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 14, 30),
arrow.Arrow(2013, 5, 5, 15, 29, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 15, 30),
arrow.Arrow(2013, 5, 5, 16, 29, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 16, 30),
arrow.Arrow(2013, 5, 5, 17, 14, 59, 999999),
),
]
assert result == expected
def test_exact_floor_equals_end(self):
result = list(
arrow.Arrow.span_range(
"minute",
datetime(2013, 5, 5, 12, 30),
datetime(2013, 5, 5, 12, 40),
exact=True,
)
)
expected = [
(
arrow.Arrow(2013, 5, 5, 12, 30),
arrow.Arrow(2013, 5, 5, 12, 30, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 31),
arrow.Arrow(2013, 5, 5, 12, 31, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 32),
arrow.Arrow(2013, 5, 5, 12, 32, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 33),
arrow.Arrow(2013, 5, 5, 12, 33, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 34),
arrow.Arrow(2013, 5, 5, 12, 34, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 35),
arrow.Arrow(2013, 5, 5, 12, 35, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 36),
arrow.Arrow(2013, 5, 5, 12, 36, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 37),
arrow.Arrow(2013, 5, 5, 12, 37, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 38),
arrow.Arrow(2013, 5, 5, 12, 38, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 12, 39),
arrow.Arrow(2013, 5, 5, 12, 39, 59, 999999),
),
]
assert result == expected
def test_exact_bound_include(self):
result = list(
arrow.Arrow.span_range(
"hour",
datetime(2013, 5, 5, 2, 30),
datetime(2013, 5, 5, 6, 00),
bounds="(]",
exact=True,
)
)
expected = [
(
arrow.Arrow(2013, 5, 5, 2, 30, 00, 1),
arrow.Arrow(2013, 5, 5, 3, 30, 00, 0),
),
(
arrow.Arrow(2013, 5, 5, 3, 30, 00, 1),
arrow.Arrow(2013, 5, 5, 4, 30, 00, 0),
),
(
arrow.Arrow(2013, 5, 5, 4, 30, 00, 1),
arrow.Arrow(2013, 5, 5, 5, 30, 00, 0),
),
(
arrow.Arrow(2013, 5, 5, 5, 30, 00, 1),
arrow.Arrow(2013, 5, 5, 6, 00),
),
]
assert result == expected
def test_small_interval_exact_open_bounds(self):
result = list(
arrow.Arrow.span_range(
"minute",
datetime(2013, 5, 5, 2, 30),
datetime(2013, 5, 5, 2, 31),
bounds="()",
exact=True,
)
)
expected = [
(
arrow.Arrow(2013, 5, 5, 2, 30, 00, 1),
arrow.Arrow(2013, 5, 5, 2, 30, 59, 999999),
),
]
assert result == expected
| TestArrowSpanRange |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 79325,
"end": 85129
} | class ____(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma : float, default=1.0
Parameter gamma of the pairwise kernel specified by metric. It should
be positive.
gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'gamma'.
If set to "fixed", 'gamma' cannot be changed during
hyperparameter tuning.
metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
"rbf", "laplacian", "sigmoid", "cosine"} or callable, \
default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default=None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import PairwiseKernel
>>> X, y = load_iris(return_X_y=True)
>>> kernel = PairwiseKernel(metric='rbf')
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733
>>> gpc.predict_proba(X[:2,:])
array([[0.8880, 0.05663, 0.05532],
[0.8676, 0.07073, 0.06165]])
"""
def __init__(
self,
gamma=1.0,
gamma_bounds=(1e-5, 1e5),
metric="linear",
pairwise_kernels_kwargs=None,
):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs,
)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=np.exp(gamma),
filter_params=True,
**pairwise_kernels_kwargs,
)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric
)
| PairwiseKernel |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/providers/workspace.py | {
"start": 663,
"end": 7734
} | class ____:
@send_notification(method=CompletionRequestTypes.WORKSPACE_CONFIGURATION_CHANGE)
def send_configurations(self, configurations, *args):
self.configurations = configurations
params = {
'settings': configurations
}
return params
@send_response
@handles(CompletionRequestTypes.WORKSPACE_FOLDERS)
def send_workspace_folders(self, response):
workspace_folders = []
for folder_name in self.watched_folders:
folder_uri = self.watched_folders[folder_name]
workspace_folders.append({
'uri': folder_uri,
'name': folder_name
})
return workspace_folders
@send_notification(method=CompletionRequestTypes.WORKSPACE_FOLDERS_CHANGE)
def send_workspace_folders_change(self, params):
folder = params['folder']
workspace_watcher = params['instance']
folder_uri = path_as_uri(folder)
added_folders = []
removed_folders = []
if params['kind'] == WorkspaceUpdateKind.ADDITION:
if folder not in self.watched_folders:
self.watched_folders[folder] = {
'uri': folder_uri,
'instance': workspace_watcher
}
added_folders.append({
'uri': folder_uri,
'name': folder
})
elif params['kind'] == WorkspaceUpdateKind.DELETION:
if folder in self.watched_folders:
self.watched_folders.pop(folder)
removed_folders.append({
'uri': folder_uri,
'name': folder
})
workspace_settings = self.server_capabilites['workspace']
request_params = {
'event': {
'added': added_folders,
'removed': removed_folders
}
}
if workspace_settings['workspaceFolders']['supported']:
logger.debug(
u'Workspace folders change: {0} -> {1}'.format(
folder, params['kind'])
)
else:
request_params[ClientConstants.CANCEL] = True
return request_params
@send_response
@handles(CompletionRequestTypes.WORKSPACE_CONFIGURATION)
def send_workspace_configuration(self, params):
logger.debug(params)
return self.configurations
@send_notification(method=CompletionRequestTypes.WORKSPACE_WATCHED_FILES_UPDATE)
def send_watched_files_change(self, params):
changes = []
entries = params.get('params', [])
for entry in entries:
changes.append({
'uri': path_as_uri(entry['file']),
'type': entry['kind']
})
params = {
'changes': changes
}
return params
@send_request(method=CompletionRequestTypes.WORKSPACE_SYMBOL)
def send_symbol_request(self, params):
params = {
'query': params['query']
}
return params
@handles(CompletionRequestTypes.WORKSPACE_SYMBOL)
def handle_symbol_response(self, response):
folders = list(self.watched_folders.keys())
assigned_symbols = {folder: [] for folder in self.watched_folders}
for symbol_info in response:
location = symbol_info['location']
path = process_uri(location['uri'])
location['file'] = path
workspace = match_path_to_folder(folders, path)
assigned_symbols[workspace].append(symbol_info)
for workspace in assigned_symbols:
workspace_edits = assigned_symbols[workspace]
workspace_instance = self.watched_folders[workspace]['instance']
workspace_instance.handle_response(
CompletionRequestTypes.WORKSPACE_SYMBOL,
{'params': workspace_edits})
@send_request(method=CompletionRequestTypes.WORKSPACE_EXECUTE_COMMAND)
def send_execute_command(self, params):
# It is not clear how this call is invoked
params = {
'command': params['command'],
'arguments': params['args']
}
return params
@handles(CompletionRequestTypes.WORKSPACE_EXECUTE_COMMAND)
def handle_execute_command_response(self, response, req_id):
if req_id in self.req_reply:
self.req_reply[req_id](
CompletionRequestTypes.WORKSPACE_EXECUTE_COMMAND,
{'params': response}
)
return response
@send_response(method=CompletionRequestTypes.WORKSPACE_APPLY_EDIT)
def send_edit_response(self, edits):
params = {
'applied': edits['applied']
}
if 'error' in edits:
params['failureReason'] = edits['error']
return params
@handles(CompletionRequestTypes.WORKSPACE_APPLY_EDIT)
def apply_edit(self, response):
logger.debug("Editing: {0}".format(response['label']))
response = response['edit']
folders = list(self.watched_folders.keys())
assigned_files = {folder: [] for folder in self.watched_folders}
if 'documentChanges' in response:
for change in response['documentChanges']:
if 'textDocument' in change:
uri = change['textDocument']['uri']
path = process_uri(uri)
change['textDocument']['path'] = path
workspace = match_path_to_folder(folders, path)
assigned_files[workspace].append({path: change})
elif 'uri' in change:
path = process_uri(change['uri'])
change['path'] = path
workspace = match_path_to_folder(folders, path)
assigned_files[workspace].append({path: change})
elif 'oldUri' in change:
old_path = process_uri(change['oldUri'])
change['old_path'] = old_path
new_path = process_uri(change['newUri'])
change['new_path'] = new_path
workspace = match_path_to_folder(folders, new_path)
assigned_files[workspace].append({old_path: change})
elif 'changes' in response:
changes = response['changes']
uris = list(changes.keys())
for uri in uris:
path = process_uri(uri)
change = changes.pop(uri)
workspace = match_path_to_folder(folders, path)
assigned_files[workspace].append({path: change})
for workspace in assigned_files:
workspace_edits = assigned_files[workspace]
workspace_instance = self.watched_folders[workspace]['instance']
workspace_instance.handle_response(
CompletionRequestTypes.WORKSPACE_APPLY_EDIT,
{'params': {'edits': workspace_edits,
'language': self.language}})
| WorkspaceProvider |
python | pytorch__pytorch | torch/export/_trace.py | {
"start": 4987,
"end": 27750
} | class ____:
aten: ATenExportArtifact
in_spec: TreeSpec
out_spec: TreeSpec
fake_mode: FakeTensorMode
module_call_specs: dict[str, dict[str, pytree.TreeSpec]]
DEFAULT_EXPORT_DYNAMO_CONFIG = ExportDynamoConfig()
DEFAULT_EXPORT_DYNAMO_CONFIG.reorderable_logging_functions = {
logging.critical,
logging.debug,
logging.error,
logging.exception,
logging.info,
logging.log,
logging.warning,
print,
warnings.warn,
}
@contextmanager
def _ignore_backend_decomps():
orig_mkldnn_flag = torch.backends.mkldnn.set_flags(False)
orig_nnpack_flag = torch.backends.nnpack.set_flags(False)
try:
yield
finally:
torch.backends.mkldnn.set_flags(*orig_mkldnn_flag)
torch.backends.nnpack.set_flags(*orig_nnpack_flag)
@contextmanager
def _disable_custom_triton_op_functional_decomposition():
old = torch._functorch.config.decompose_custom_triton_ops
try:
# pyrefly: ignore [bad-assignment]
torch._functorch.config.decompose_custom_triton_ops = False
yield torch._functorch.config.decompose_custom_triton_ops
finally:
torch._functorch.config.decompose_custom_triton_ops = old
def custom_triton_ops_decomposition_disabled():
return not torch._functorch.config.decompose_custom_triton_ops
def _fixup_key(x):
return "L__self__" + _strip_root(x)
def _strip_root(x):
if isinstance(x, str) and x.startswith("_export_root"):
stripped = x[len("_export_root") :]
return stripped.removeprefix(".")
return x
def _is_bogus_const_name(name: str):
splitted_names = name.split(".")
if len(splitted_names) < 1:
return True
return splitted_names[-1].startswith("lifted_tensor")
def _rewrite_tracepoint_node(gm: torch.fx.GraphModule):
"""
In-place modify input graph module by replacing the export tracepoint with a new node
that has the same target and args, but with the _export_root stripped from path.
"""
for node in gm.graph.nodes:
if node.target is torch.ops.higher_order._export_tracepoint:
if "path" in node.kwargs:
path = _strip_root(node.kwargs["path"])
with gm.graph.inserting_before(node):
new_node = gm.graph.create_node(
"call_function",
torch.ops.higher_order._export_tracepoint,
args=node.args,
kwargs={
"path": path,
"kind": node.kwargs["kind"],
},
)
new_node.meta = node.meta
node.replace_all_uses_with(new_node)
gm.graph.erase_node(node)
def detect_shape_env(inputs: Any = None):
shape_envs = []
for i, flat_input in enumerate(inputs):
if isinstance(flat_input, torch.SymInt):
shape_envs.append((flat_input.node.shape_env, "symint input", i))
if shape_envs:
shape_env, desc1, i1 = shape_envs[0]
for m, desc2, i2 in shape_envs[1:]:
assert shape_env is m, (
f"shape env ({shape_env}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\n\n"
f"shape env from {desc1} {i1} allocated at:\n{shape_env.stack}\n"
f"shape env from {desc2} {i2} allocated at:\n{m.stack}"
)
return shape_env
else:
return None
def _extract_fake_inputs(gm, args, kwargs):
"""
Given a graph module, extract fakified input tensors from the metadata of
its placeholders, and map them to the structure of given args and kwargs.
Also return the fake mode used to fakify those inputs.
"""
fake_inps: list[Any] = []
fake_vals: list[Any] = []
for node in gm.graph.nodes:
if node.op == "placeholder":
fake_inps.append(node.meta.get("val"))
else:
fake_vals.append(node.meta.get("example_value"))
if in_shuffle_graph := getattr(gm, "_in_shuffle_graph", None):
flat_args = pytree.tree_leaves((args, kwargs))
node_map = {
node: i
for i, node in enumerate(
next(iter(reversed(in_shuffle_graph.graph.nodes))).args[0]
)
if node.op == "placeholder"
}
new_fake_inps: list[Any] = []
for i, node in enumerate(
in_shuffle_graph.graph.find_nodes(op="placeholder")[1:]
):
if node in node_map:
new_fake_inps.append(fake_inps[node_map[node]])
else:
new_fake_inps.append(flat_args[i])
fake_inps = new_fake_inps
# We get both because now we might have a combination of symint and tensor
# inputs, and we want to check that the shape env is consistent between
# both. Unfortunately we can't see what fake mode is attached to the shape
# env, then we can just compare fake modes.
detected_fake_mode = detect_fake_mode(fake_inps + fake_vals)
detected_shape_env = detect_shape_env(fake_inps + fake_vals)
if detected_fake_mode:
if detected_shape_env:
assert detected_shape_env is detected_fake_mode.shape_env, (
"Detected shape env does not match fake mode's shape env"
)
fake_mode = detected_fake_mode
elif detected_shape_env:
fake_mode = FakeTensorMode(shape_env=detected_shape_env, export=True)
else:
fake_mode = FakeTensorMode(shape_env=ShapeEnv(), export=True)
count = 0
def lookup_fake(x):
nonlocal count
val = fake_inps[count] if isinstance(x, (int, torch.Tensor)) else x
count += 1
return val
fake_args = pytree.tree_map(lookup_fake, args)
fake_kwargs = pytree.tree_map(lookup_fake, kwargs)
return fake_args, fake_kwargs, fake_mode
def _replace_param_buffer_names(param_buffer_table, sig):
for spec in sig.input_specs:
if spec.kind in (
InputKind.PARAMETER,
InputKind.BUFFER,
):
spec.target = param_buffer_table[spec.target]
for spec in sig.output_specs:
if spec.kind in (
OutputKind.BUFFER_MUTATION,
OutputKind.GRADIENT_TO_PARAMETER,
):
spec.target = param_buffer_table[spec.target]
def _convert_to_positional_args(orig_arg_names, args, kwargs):
assert len(orig_arg_names) == len(args) + len(kwargs), (
f"Total number of arg names is expected to be {len(orig_arg_names)} "
f"but got {len(args)} positional args, {len(kwargs)} kwargs."
)
reordered_kwargs = [kwargs[kw_name] for kw_name in orig_arg_names[len(args) :]]
return (
*args,
*reordered_kwargs,
)
def _normalize_nn_module_stack(gm_torch_level, root_cls):
# Append a root module to every nn_module_stack.
root = "L['self']"
root_key = re.sub(r"[^a-zA-Z0-9]", "_", root)
for gm in gm_torch_level.modules():
if not isinstance(gm, torch.fx.GraphModule):
continue
for node in gm.graph.nodes:
if node.op in ["placeholder", "output"]:
continue
add_root = True
if nn_module_stack := node.meta.get("nn_module_stack", {}):
path, ty = next(iter(nn_module_stack.values()))
# After deserializing the class `ty` might not exist anymore so
# it could be a string
if inspect.isclass(ty) and issubclass(ty, torch.nn.Module):
# TODO Figure out why sometimes we have root sometimes we don't.
if path == root and ty is root_cls:
add_root = False
else:
assert isinstance(ty, str)
if add_root:
def normalize_path(path):
if path == "L['self']":
return ""
if path.startswith("L['self']."):
return path[len("L['self'].") :]
return path
nn_module_stack = {
root_key: (root, root_cls.__module__ + "." + root_cls.__qualname__),
# pyrefly: ignore [unbound-name]
**nn_module_stack,
}
node.meta["nn_module_stack"] = {
key: (normalize_path(path), ty)
for key, (path, ty) in nn_module_stack.items()
}
def _get_param_buffer_mapping(
original_module: torch.nn.Module,
traced_module: torch.nn.Module,
) -> dict[str, str]:
"""
Returns a mapping of parameter/buffer names from the new module to the
original model. This is to help with restoring the FQN for parameter/buffers
of a traced module to what the original module contains.
"""
param_lookup: dict[int, str] = {}
buffer_lookup: dict[int, str] = {}
for name, param in original_module.named_parameters(remove_duplicate=False):
if param_lookup.get(id(param)) is None:
# we only want to keep the first occurrence of a parameter to guarantee parity of original and traced module.
param_lookup[id(param)] = name
for name, buffer in original_module.named_buffers(remove_duplicate=False):
buffer_lookup[id(buffer)] = name
param_buffer_table: dict[str, str] = {}
for dynamo_name, dynamo_param in traced_module.named_parameters(
remove_duplicate=False
):
assert dynamo_name not in param_buffer_table
if id(dynamo_param) in param_lookup:
param_buffer_table[dynamo_name] = param_lookup[id(dynamo_param)]
for dynamo_name, dynamo_buffer in traced_module.named_buffers(
remove_duplicate=False
):
assert dynamo_name not in param_buffer_table
if id(dynamo_buffer) in buffer_lookup:
param_buffer_table[dynamo_name] = buffer_lookup[id(dynamo_buffer)]
return param_buffer_table
def _preserve_requires_grad_pass(
gm: torch.fx.GraphModule,
sig: ExportGraphSignature,
fake_params_buffers: dict[str, torch.Tensor],
constants: dict[str, _ConstantAttributeType],
flat_fake_args: list[Any],
):
placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"]
assert len(sig.input_specs) == len(placeholders)
i = 0
for node, spec in zip(placeholders, sig.input_specs):
if spec.kind in (
InputKind.PARAMETER,
InputKind.BUFFER,
):
assert spec.target is not None
node.meta["val"].requires_grad = fake_params_buffers[
spec.target
].requires_grad
elif spec.kind == InputKind.USER_INPUT:
fake_arg = flat_fake_args[i]
if isinstance(fake_arg, torch.Tensor):
node.meta["val"].requires_grad = fake_arg.requires_grad
i += 1
elif spec.kind == InputKind.CONSTANT_TENSOR:
assert spec.target is not None
constant = constants[spec.target]
if isinstance(constant, torch.Tensor):
# If the tensor is not leaf, it should already have a correct requires grad field
if node.meta["val"].is_leaf:
node.meta["val"].requires_grad = constant.requires_grad
else:
assert node.meta["val"].requires_grad == constant.requires_grad
elif spec.kind in (InputKind.CUSTOM_OBJ, InputKind.TOKEN):
continue
else:
raise AssertionError(spec.kind)
def _remap_constants(
orig_constant_attrs: ConstantAttrMap,
graph_signature: ExportGraphSignature,
constants: dict[str, _ConstantAttributeType],
) -> None:
"""Rewrite the graph signature and constants table to use the FQN from the original module."""
remap_table: dict[str, list[str]] = {}
for name, value in constants.items():
if value in orig_constant_attrs:
remap_table[name] = orig_constant_attrs[value]
for spec in graph_signature.input_specs:
if spec.kind in (
InputKind.CONSTANT_TENSOR,
InputKind.CUSTOM_OBJ,
):
orig_target = spec.target
assert orig_target is not None
targets = remap_table.get(orig_target, [orig_target])
spec.target = targets[0]
constant = constants[orig_target]
del constants[orig_target]
for target in targets:
constants[target] = constant
def _replace_unbacked_bindings(gm: torch.fx.GraphModule) -> None:
"""
When we run an interpreter-based pass over a GraphModule, execution of data-dependent operators
will produce example values with new unbacked symbols. To track that the new/old symbols are equivalent,
we used to rely on the unbacked_renamings mapping. This led to problematic metadata where the unbacked_bindings
keys mapped new symbols (u2) to paths containing old symbols (u0) in the example values, or worse, backed symbols
or constants (e.g. if the original unbacked was replaced/specialized). Additionally this created problems with
de/serialized programs, since we didn't comprehensively serialize ShapeEnv/unbacked renamings/node bindings.
This pass attempts a simpler way of handling these for export, by throwing away the previously computed bindings, and re-running
the pattern match used in compute_unbacked_bindings. This ensures we keep the original symbols contained in the example values,
or delete bindings if they've been replaced/specialized.
"""
from torch._export.utils import _get_shape_env_from_gm
from torch.fx.experimental.symbolic_shapes import _free_unbacked_symbols_with_path
from torch.utils._sympy.symbol import symbol_is_type, SymT
if (shape_env := _get_shape_env_from_gm(gm)) is None:
return
base_unbacked_symbols = {
symbol
for symbol in shape_env.var_to_range
if symbol_is_type(symbol, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT))
and symbol not in shape_env.unbacked_renamings
}
for node in gm.graph.nodes:
node.meta.pop("unbacked_bindings", None)
if (val := node.meta.get("val")) is not None and (
unbacked_bindings := _free_unbacked_symbols_with_path(
val,
(),
shape_env=shape_env,
pending=base_unbacked_symbols,
simplify=True,
)
):
node.meta["unbacked_bindings"] = unbacked_bindings
def _produce_aten_artifact(
*,
gm: torch.fx.GraphModule,
mod,
constant_attrs,
graph_signature,
pre_dispatch,
fake_args,
fake_kwargs,
fake_params_buffers,
_prettify_placeholder_names=True,
) -> ATenExportArtifact:
"""
This is a helper function that is shared between export_to_aten_ir and export_to_aten_ir_make_fx
to produce the aten artifact. (export compatible graph module + signature)
It does:
1. Applies runtime assertion pass
2. Recompute unbacked_bindings pass
3. Populate meta val when missing
4. Lift constants as placeholders
5. Replace raw autograd and autocast ops with HOPs
6. Prettify names for placeholders
7. Preserve requires_grad value on node meta val
"""
# Run runtime asserts pass before creating input/output specs, since size-related CSE/DCE might affect output signature.
# Overwrite output specs afterwards.
flat_fake_args = pytree.tree_leaves((fake_args, fake_kwargs))
gm, graph_signature = apply_runtime_assertion_pass(gm, graph_signature)
# Simplify unbacked_bindings by recomputing them.
# Useful for any pass that's interpreter-based and might call rebind_unbacked(),
# e.g. AOTAutograd in this case.
_replace_unbacked_bindings(gm)
total_non_user_inputs = (
len(graph_signature.parameters)
+ len(graph_signature.buffers)
+ len(graph_signature.input_tokens)
)
set_missing_meta_vals(gm, flat_fake_args, total_non_user_inputs)
export_graph_signature: Optional[ExportGraphSignature]
export_graph_signature = _convert_to_export_graph_signature(
graph_signature, gm, _get_non_persistent_buffers(mod)
)
# script objects are always stored in constants no matter whether they're initial inputs or
# they're lifted in aot" before rewrite_script_object_meta
constants = _materialize_and_lift_constants(
gm, export_graph_signature, constant_attrs
)
if pre_dispatch:
from torch._export.passes.replace_autocast_with_hop_pass import (
replace_autocast_with_hop_pass,
)
from torch._export.passes.replace_set_grad_with_hop_pass import (
replace_set_grad_with_hop_pass,
)
# Note: replace_set_grad_with_hop_pass need to be after lift_constant_pass because
# a getattr of a constant tensor doesn't have meta["val"] until after lift_constant_pass.
# If replace_set_grad_with_hop_pass is before lift_constant_pass,
# and the constant_tensor is passed as input of the set grad hop, the placeholder's
# meta["val"] will be None and fails our verifier for placeholder.
gm, export_graph_signature = replace_set_grad_with_hop_pass(
gm, export_graph_signature
)
gm, export_graph_signature = replace_autocast_with_hop_pass(
gm, export_graph_signature
)
# Remove nn_module_stack, stack_trace metadata from all placeholders/inputs nodes.
for _mod in gm.modules():
if not isinstance(_mod, torch.fx.GraphModule):
continue
for node in _mod.graph.nodes:
if node.op in ["placeholder", "output"]:
node.meta.pop("nn_module_stack", None)
node.meta.pop("stack_trace", None)
# Prettify names for placeholder nodes.
assert export_graph_signature is not None
if _prettify_placeholder_names:
placeholder_naming_pass(
gm,
export_graph_signature,
mod,
fake_args,
fake_kwargs,
fake_params_buffers,
constants,
)
_preserve_requires_grad_pass(
gm, export_graph_signature, fake_params_buffers, constants, flat_fake_args
)
return ATenExportArtifact(
gm,
export_graph_signature,
constants,
)
def _rename_constants_nodes(
gm: torch.fx.GraphModule,
graph_signature: ExportGraphSignature,
) -> None:
"""
For strict mode, rename constants nodes that were previously annotated as buffers.
"""
# handle name collisions with existing constants
node_names = {node.name for node in gm.graph.nodes}
def rename_constant(name):
if name in node_names:
n = 1
while (dup_name := f"{name}_{n}") in node_names:
n += 1
name = dup_name
node_names.add(name)
return name
# use input specs to map names from buffers to constants
buffer_prefix = placeholder_prefixes[InputKind.BUFFER]
const_prefix = placeholder_prefixes[InputKind.CONSTANT_TENSOR]
buffer_to_constant = {}
for spec in graph_signature.input_specs:
if spec.kind == InputKind.CONSTANT_TENSOR and not spec.arg.name.startswith(
const_prefix
):
if spec.arg.name.startswith(buffer_prefix): # map from buffer to constants
c_name = rename_constant(
const_prefix + spec.arg.name[len(buffer_prefix) :]
)
else: # lifted constant
c_name = rename_constant(const_prefix + spec.arg.name)
buffer_to_constant[spec.arg.name] = c_name
spec.arg.name = c_name
for spec in graph_signature.output_specs:
if spec.arg.name in buffer_to_constant:
spec.arg.name = buffer_to_constant[spec.arg.name]
# Rename constants nodes for all modules
for mod in gm.modules():
if not isinstance(mod, torch.fx.GraphModule):
continue
for node in mod.graph.nodes:
if node.name in buffer_to_constant:
node.name = node.target = buffer_to_constant[node.name]
mod.recompile()
def _restore_state_dict(
original_module: torch.nn.Module, traced_module: torch.fx.GraphModule
) -> None:
"""
Restores the state dict of the traced module to that of the original module.
"""
param_buffer_table = _get_param_buffer_mapping(original_module, traced_module)
# Don't want to change the convention of previous call.
param_buffer_table_reverse = {v: k for k, v in param_buffer_table.items()}
# Replace state dict attr names with the fqn
for name, _ in list(
chain(
original_module.named_parameters(remove_duplicate=False),
# pyrefly: ignore [bad-argument-type]
original_module.named_buffers(remove_duplicate=False),
)
):
if name in param_buffer_table_reverse:
dynamo_name = param_buffer_table_reverse[name]
param = torch.fx.graph_module._get_attr(traced_module, dynamo_name)
torch.fx.graph_module._assign_attr(param, traced_module, name)
torch.fx.graph_module._del_attr(traced_module, dynamo_name)
# Replace graph getattr nodes with the correct name
for node in traced_module.graph.nodes:
if node.op == "get_attr":
attr_name = node.target
if attr_name in param_buffer_table:
node.target = param_buffer_table[attr_name]
traced_module.recompile()
def _get_module_hierarchy(mod: torch.nn.Module) -> dict[str, str]:
return {
name: type(m).__name__ for name, m in mod.named_modules(remove_duplicate=False)
}
def _make_module_call_graph(
in_spec: TreeSpec,
out_spec: TreeSpec,
module_call_signatures: dict[str, ModuleCallSignature],
forward_arg_names: Optional[list[str]] = None,
) -> list[ModuleCallEntry]:
original = [
ModuleCallEntry(fqn=fqn, signature=module_call_signatures.get(fqn))
for fqn in _EXPORT_MODULE_HIERARCHY # type: ignore[union-attr]
]
assert original[0].fqn == ""
original[0].signature = ModuleCallSignature(
inputs=[],
outputs=[],
in_spec=in_spec,
out_spec=out_spec,
forward_arg_names=forward_arg_names,
)
additional = [
ModuleCallEntry(fqn=fqn, signature=signature)
for fqn, signature in module_call_signatures.items()
if fqn not in _EXPORT_MODULE_HIERARCHY # type: ignore[operator]
]
return [*original, *additional]
| ExportArtifact |
python | getsentry__sentry | src/sentry/replays/lib/new_query/conditions.py | {
"start": 2375,
"end": 2731
} | class ____(GenericBase):
"""Boolean scalar condition class."""
@staticmethod
def visit_eq(expression: Expression, value: bool) -> Condition:
return Condition(expression, Op.EQ, value)
@staticmethod
def visit_neq(expression: Expression, value: bool) -> Condition:
return Condition(expression, Op.NEQ, value)
| BooleanScalar |
python | realpython__materials | django-vue-graphql/source_code_final/back_end/blog/schema.py | {
"start": 304,
"end": 384
} | class ____(DjangoObjectType):
class Meta:
model = models.Post
| PostType |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 22486,
"end": 22679
} | class ____(NotFoundError):
"""
64-bit Lapack libraries not found.
Known libraries in numpy/distutils/site.cfg file are:
openblas64_, openblas_ilp64
"""
| LapackILP64NotFoundError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 574273,
"end": 575031
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for DeploymentStatus."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("DeploymentStatusEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("DeploymentStatus"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| DeploymentStatusConnection |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 34863,
"end": 36014
} | class ____:
array: AnyArray
# TODO: develop Array generic and move zarr.Array[np.intp] | zarr.Array[np.bool_] to ArrayOfIntOrBool
def __getitem__(self, selection: OrthogonalSelection | AnyArray) -> NDArrayLikeOrScalar:
from zarr.core.array import Array
# if input is a Zarr array, we materialize it now.
if isinstance(selection, Array):
selection = _zarr_array_to_int_or_bool_array(selection)
fields, new_selection = pop_fields(selection)
new_selection = ensure_tuple(new_selection)
new_selection = replace_lists(new_selection)
return self.array.get_orthogonal_selection(
cast("OrthogonalSelection", new_selection), fields=fields
)
def __setitem__(self, selection: OrthogonalSelection, value: npt.ArrayLike) -> None:
fields, new_selection = pop_fields(selection)
new_selection = ensure_tuple(new_selection)
new_selection = replace_lists(new_selection)
return self.array.set_orthogonal_selection(
cast("OrthogonalSelection", new_selection), value, fields=fields
)
@dataclass(frozen=True)
| OIndex |
python | PyCQA__isort | isort/exceptions.py | {
"start": 3632,
"end": 4241
} | class ____(ISortError):
"""Raised when one of isorts literal sorting comments is used but isort can't parse the
the given data structure.
"""
def __init__(self, code: str, original_error: Exception | type[Exception]):
super().__init__(
f"isort failed to parse the given literal {code}. It's important to note "
"that isort literal sorting only supports simple literals parsable by "
f"ast.literal_eval which gave the exception of {original_error}."
)
self.code = code
self.original_error = original_error
| LiteralParsingFailure |
python | getsentry__sentry | tests/sentry/tsdb/test_snuba.py | {
"start": 600,
"end": 19074
} | class ____(OutcomesSnubaTest):
def setUp(self) -> None:
super().setUp()
self.db = SnubaTSDB()
# Set up the times
self.now = datetime.now(timezone.utc)
self.start_time = self.now - timedelta(days=7)
self.one_day_later = self.start_time + timedelta(days=1)
self.day_before_start_time = self.start_time - timedelta(days=1)
def test_organization_outcomes(self) -> None:
other_organization = self.create_organization()
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.start_time,
"quantity": 1,
},
3,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"quantity": 1,
},
4,
)
# security and default should be included in these queries
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SECURITY,
"timestamp": self.start_time,
"quantity": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.DEFAULT,
"timestamp": self.one_day_later,
"quantity": 1,
},
1,
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
{
"org_id": other_organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"quantity": 1,
},
5,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.day_before_start_time,
"quantity": 1,
},
6,
)
# we also shouldn't see any other datacategories in these queries
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.TRANSACTION,
"timestamp": self.one_day_later,
"quantity": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ATTACHMENT,
"timestamp": self.one_day_later,
"quantity": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SESSION,
"timestamp": self.one_day_later,
"quantity": 1,
},
1,
)
# Add client-discards (which we shouldn't show in total queries)
self.store_outcomes(
{
"org_id": other_organization.id,
"project_id": self.project.id,
"outcome": Outcome.CLIENT_DISCARD.value,
"category": DataCategory.ERROR,
"timestamp": self.start_time,
"quantity": 1,
},
5,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.organization_total_received, 3600, floor_to_hour_epoch, 4 * 3, 5 * 3),
(TSDBModel.organization_total_rejected, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.organization_total_blacklisted, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.organization_total_received, 10, floor_to_10s_epoch, 4 * 3, 5 * 3),
(TSDBModel.organization_total_rejected, 10, floor_to_10s_epoch, 4, 5),
(TSDBModel.organization_total_blacklisted, 10, floor_to_10s_epoch, 4, 5),
]:
# Query SnubaTSDB
response = self.db.get_range(
tsdb_model,
[self.organization.id],
self.start_time,
self.now,
granularity,
None,
tenant_ids={"referrer": "tests", "organization_id": 1},
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.organization.id in response
response_dict = {k: v for (k, v) in response[self.organization.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[self.organization.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_project_outcomes(self) -> None:
other_project = self.create_project(organization=self.organization)
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.start_time,
"key_id": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.start_time,
"key_id": 1,
"quantity": 2,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"key_id": 1,
},
4,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SECURITY,
"timestamp": self.start_time,
"key_id": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.DEFAULT,
"timestamp": self.one_day_later,
"key_id": 1,
},
1,
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": other_project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"key_id": 1,
},
5,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.day_before_start_time,
"key_id": 1,
},
6,
)
# we also shouldn't see any other datacategories in these queries
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.TRANSACTION,
"timestamp": self.one_day_later,
"key_id": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ATTACHMENT,
"timestamp": self.one_day_later,
"key_id": 1,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SESSION,
"timestamp": self.one_day_later,
"key_id": 1,
},
1,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.project_total_received, 3600, floor_to_hour_epoch, 4 * 3, 5 * 3),
(TSDBModel.project_total_rejected, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.project_total_blacklisted, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.project_total_received, 10, floor_to_10s_epoch, 4 * 3, 5 * 3),
(TSDBModel.project_total_rejected, 10, floor_to_10s_epoch, 4, 5),
(TSDBModel.project_total_blacklisted, 10, floor_to_10s_epoch, 4, 5),
]:
response = self.db.get_range(
tsdb_model,
[self.project.id],
self.start_time,
self.now,
granularity,
None,
tenant_ids={"referrer": "tests", "organization_id": 1},
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.project.id in response
response_dict = {k: v for (k, v) in response[self.project.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[self.project.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_key_outcomes(self) -> None:
project_key = self.create_project_key(project=self.project)
other_project = self.create_project(organization=self.organization)
other_project_key = self.create_project_key(project=other_project)
for outcome in [Outcome.ACCEPTED, Outcome.RATE_LIMITED, Outcome.FILTERED]:
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.start_time,
"key_id": project_key.id,
},
3,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"key_id": project_key.id,
},
4,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SECURITY,
"timestamp": self.start_time,
"key_id": project_key.id,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.DEFAULT,
"timestamp": self.one_day_later,
"key_id": project_key.id,
},
1,
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.one_day_later,
"key_id": other_project_key.id,
},
5,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ERROR,
"timestamp": self.day_before_start_time,
"key_id": project_key.id,
},
6,
)
# we also shouldn't see any other datacategories in these queries
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.TRANSACTION,
"timestamp": self.one_day_later,
"key_id": project_key.id,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.ATTACHMENT,
"timestamp": self.one_day_later,
"key_id": project_key.id,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"project_id": self.project.id,
"outcome": outcome.value,
"category": DataCategory.SESSION,
"timestamp": self.one_day_later,
"key_id": project_key.id,
},
1,
)
for tsdb_model, granularity, floor_func, start_time_count, day_later_count in [
(TSDBModel.key_total_received, 3600, floor_to_hour_epoch, 4 * 3, 5 * 3),
(TSDBModel.key_total_rejected, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.key_total_blacklisted, 3600, floor_to_hour_epoch, 4, 5),
(TSDBModel.key_total_received, 10, floor_to_10s_epoch, 4 * 3, 5 * 3),
(TSDBModel.key_total_rejected, 10, floor_to_10s_epoch, 4, 5),
(TSDBModel.key_total_blacklisted, 10, floor_to_10s_epoch, 4, 5),
]:
response = self.db.get_range(
# with [project_key.id, str(project_key.id), we are imitating the hack in
# project_key_stats.py cause that is what `get_range` will be called with.
tsdb_model,
[project_key.id, str(project_key.id)],
self.start_time,
self.now,
granularity,
None,
tenant_ids={"referrer": "tests", "organization_id": 123},
)
# Assert that the response has values set for the times we expect, and nothing more
assert project_key.id in response
response_dict = {k: v for (k, v) in response[project_key.id]}
assert response_dict[floor_func(self.start_time)] == start_time_count
assert response_dict[floor_func(self.one_day_later)] == day_later_count
for time, count in response[project_key.id]:
if time not in [floor_func(self.start_time), floor_func(self.one_day_later)]:
assert count == 0
def test_all_tsdb_models_have_an_entry_in_model_query_settings(self) -> None:
# Ensure that the models we expect to be using Snuba are using Snuba
exceptions = [
TSDBModel.project_total_forwarded # this is not outcomes and will be moved separately
]
# does not include the internal TSDB model
models = [
model for model in list(TSDBModel) if 0 < model.value < 700 and model not in exceptions
]
for model in models:
assert model in SnubaTSDB.model_query_settings
| SnubaTSDBTest |
python | geekcomputers__Python | insta_monitering/insta_api.py | {
"start": 4280,
"end": 5529
} | class ____(tornado.web.RequestHandler):
def get(self):
try:
q = self.get_argument("q")
user = self.get_argument("userId")
type = self.get_argument("type")
productId = self.get_argument("productId")
date = self.get_argument("date")
limit = self.get_argument("limit")
except:
self.send_error(400)
recordsobj = DBDataFetcher(user=user, tags=q, type=type, productId=productId)
data = recordsobj.DBFetcherGreater(limit=limit, date=date)
# print("{0}, {1}, {2}, {3}".format(temp["userId"], temp["productId"], temp["query"], temp["status"]))
self.write(data)
if __name__ == "__main__":
application = tornado.web.Application(
[
(r"/instagram/monitoring/start", StartHandlerinsta),
(r"/instagram/monitoring/stop", StopHandlerinsta),
(r"/instagram/monitoring/status", StatusHandlerinsta),
(r"/instagram/monitoring/less", SenderHandlerinstaLess),
(r"/instagram/monitoring/greater", SenderHandlerinstaGreater),
]
)
application.listen(7074)
print("server running")
tornado.ioloop.IOLoop.instance().start()
| SenderHandlerinstaGreater |
python | huggingface__transformers | tests/models/beit/test_modeling_beit.py | {
"start": 1740,
"end": 8798
} | class ____:
def __init__(
self,
parent,
vocab_size=100,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
num_labels=3,
scope=None,
out_indices=[1, 2, 3, 4],
out_features=["stage1", "stage2", "stage3", "stage4"],
attn_implementation="eager",
mask_ratio=0.5,
):
self.parent = parent
self.vocab_size = vocab_size
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
self.out_indices = out_indices
self.out_features = out_features
self.num_labels = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
self.mask_length = self.seq_length - 1
self.num_masks = int(mask_ratio * self.seq_length)
self.attn_implementation = attn_implementation
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
pixel_labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels, pixel_labels
def get_config(self):
return BeitConfig(
vocab_size=self.vocab_size,
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
out_indices=self.out_indices,
out_features=self.out_features,
attn_implementation=self.attn_implementation,
)
def create_and_check_model(self, config, pixel_values, labels, pixel_labels):
model = BeitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_backbone(self, config, pixel_values, labels, pixel_labels):
model = BeitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
expected_height = expected_width = self.image_size // config.patch_size
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width]
)
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
# verify backbone works with out_features=None
config.out_features = None
model = BeitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(
list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, expected_height, expected_width]
)
# verify channels
self.parent.assertEqual(len(model.channels), 1)
def create_and_check_for_masked_lm(self, config, pixel_values, labels, pixel_labels):
model = BeitForMaskedImageModeling(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size))
def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.type_sequence_label_size
model = BeitForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
config.num_channels = 1
model = BeitForImageClassification(config)
model.to(torch_device)
model.eval()
pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels):
config.num_labels = self.num_labels
model = BeitForSemanticSegmentation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)
)
result = model(pixel_values, labels=pixel_labels)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels, pixel_labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| BeitModelTester |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ext.py | {
"start": 15280,
"end": 17759
} | class ____(_regconfig_fn):
"""The PostgreSQL ``ts_headline`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_types.TEXT`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.ts_headline` will be used automatically when invoking
``sqlalchemy.func.ts_headline()``, ensuring the correct argument and return
type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = TEXT
def __init__(self, *args, **kwargs):
args = list(args)
# parse types according to
# https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-HEADLINE
if len(args) < 2:
# invalid args; don't do anything
has_regconfig = False
elif (
isinstance(args[1], elements.ColumnElement)
and args[1].type._type_affinity is types.TSQUERY
):
# tsquery is second argument, no regconfig argument
has_regconfig = False
else:
has_regconfig = True
if has_regconfig:
initial_arg = coercions.expect(
roles.ExpressionElementRole,
args.pop(0),
apply_propagate_attrs=self,
name=getattr(self, "name", None),
type_=types.REGCONFIG,
)
initial_arg = [initial_arg]
else:
initial_arg = []
addtl_args = [
coercions.expect(
roles.ExpressionElementRole,
c,
name=getattr(self, "name", None),
apply_propagate_attrs=self,
)
for c in args
]
super().__init__(*(initial_arg + addtl_args), **kwargs)
def distinct_on(*expr: _ColumnExpressionArgument[Any]) -> DistinctOnClause:
"""apply a DISTINCT_ON to a SELECT statement
e.g.::
stmt = select(tbl).ext(distinct_on(t.c.some_col))
this supersedes the previous approach of using
``select(tbl).distinct(t.c.some_col))`` to apply a similar construct.
.. versionadded:: 2.1
"""
return DistinctOnClause(expr)
| ts_headline |
python | matplotlib__matplotlib | galleries/examples/widgets/polygon_selector_demo.py | {
"start": 341,
"end": 3267
} | class ____:
"""
Select indices from a matplotlib collection using `PolygonSelector`.
Selected indices are saved in the `ind` attribute. This tool fades out the
points that are not part of the selection (i.e., reduces their alpha
values). If your collection has alpha < 1, this tool will permanently
alter the alpha values.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes to interact with.
collection : `matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to *alpha_other*.
"""
def __init__(self, ax, collection, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, (self.Npts, 1))
self.poly = PolygonSelector(ax, self.onselect, draw_bounding_box=True)
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero(path.contains_points(self.xys))[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.poly.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
grid_size = 5
grid_x = np.tile(np.arange(grid_size), grid_size)
grid_y = np.repeat(np.arange(grid_size), grid_size)
pts = ax.scatter(grid_x, grid_y)
selector = SelectFromCollection(ax, pts)
print("Select points in the figure by enclosing them within a polygon.")
print("Press the 'esc' key to start a new polygon.")
print("Try holding the 'shift' key to move all of the vertices.")
print("Try holding the 'ctrl' key to move a single vertex.")
plt.show()
selector.disconnect()
# After figure is closed print the coordinates of the selected points
print('\nSelected points:')
print(selector.xys[selector.ind])
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.widgets.PolygonSelector`
# - `matplotlib.path.Path`
| SelectFromCollection |
python | hyperopt__hyperopt | hyperopt/pyll/base.py | {
"start": 24965,
"end": 35318
} | class ____:
"""Placeholder representing a garbage-collected value"""
def rec_eval(
expr,
deepcopy_inputs=False,
memo=None,
max_program_len=None,
memo_gc=True,
print_trace=False,
print_node_on_error=True,
):
"""
expr - pyll Apply instance to be evaluated
memo - optional dictionary of values to use for particular nodes
deepcopy_inputs - deepcopy inputs to every node prior to calling that
node's function on those inputs. If this leads to a different return
value, then some function (XXX add more complete DebugMode
functionality) in your graph is modifying its inputs and causing
mis-calculation. XXX: This is not a fully-functional DebugMode because
if the offender happens on account of the toposort order to be the last
user of said input, then it will not be detected as a potential
problem.
"""
if max_program_len == None:
max_program_len = DEFAULT_MAX_PROGRAM_LEN
if deepcopy_inputs not in (0, 1, False, True):
# -- I've been calling rec_eval(expr, memo) by accident a few times
# this error would have been appreciated.
raise ValueError("deepcopy_inputs should be bool", deepcopy_inputs)
node = as_apply(expr)
topnode = node
if memo is None:
memo = {}
else:
memo = dict(memo)
# -- hack for speed
# since the inputs are constant during rec_eval
# but not constant in general
node_inputs = {}
node_list = []
dfs(node, node_list, seqset=node_inputs)
# TODO: optimize dfs to not recurse past the items in memo
# this is especially important for evaluating Lambdas
# which cause rec_eval to recurse
#
# N.B. that Lambdas may expand the graph during the evaluation
# so that this iteration may be an incomplete
if memo_gc:
clients = {}
for aa in node_list:
clients.setdefault(aa, set())
for ii in node_inputs[aa]:
clients.setdefault(ii, set()).add(aa)
def set_memo(k, v):
assert v is not GarbageCollected
memo[k] = v
for ii in node_inputs[k]:
# -- if all clients of ii are already in the memo
# then we can free memo[ii] by replacing it
# with a dummy symbol
if all(iic in memo for iic in clients[ii]):
memo[ii] = GarbageCollected
else:
def set_memo(k, v):
memo[k] = v
todo = deque([topnode])
while todo:
if len(todo) > max_program_len:
raise RuntimeError("Probably infinite loop in document")
node = todo.pop()
if print_trace:
print("rec_eval:print_trace", len(todo), node.name)
if node in memo:
# -- we've already computed this, move on.
continue
# -- different kinds of nodes are treated differently:
if node.name == "switch":
# -- switch is the conditional evaluation node
switch_i_var = node.pos_args[0]
if switch_i_var in memo:
switch_i = memo[switch_i_var]
if isinstance(switch_i, np.ndarray):
switch_i = switch_i.item()
if isinstance(switch_i, int):
if switch_i < 0:
raise ValueError("switch pos must be positive int", switch_i)
else:
raise TypeError("switch argument was", switch_i)
rval_var = node.pos_args[switch_i + 1]
if rval_var in memo:
set_memo(node, memo[rval_var])
continue
else:
waiting_on = [rval_var]
else:
waiting_on = [switch_i_var]
elif isinstance(node, Literal):
# -- constants go straight into the memo
set_memo(node, node.obj)
continue
else:
# -- normal instruction-type nodes have inputs
waiting_on = [v for v in node_inputs[node] if v not in memo]
if waiting_on:
# -- Necessary inputs have yet to be evaluated.
# push the node back in the queue, along with the
# inputs it still needs
todo.append(node)
todo.extend(waiting_on)
else:
# -- not waiting on anything;
# this instruction can be evaluated.
args = _args = [memo[v] for v in node.pos_args]
kwargs = _kwargs = {k: memo[v] for (k, v) in node.named_args}
if memo_gc:
for aa in args + list(kwargs.values()):
assert aa is not GarbageCollected
if deepcopy_inputs:
args = copy.deepcopy(_args)
kwargs = copy.deepcopy(_kwargs)
try:
rval = scope._impls[node.name](*args, **kwargs)
except Exception as e:
if print_node_on_error:
print("=" * 80)
print("ERROR in rec_eval")
print("EXCEPTION", type(e), str(e))
print("NODE")
print(node) # -- typically a multi-line string
print("=" * 80)
raise
if isinstance(rval, Apply):
# -- if an instruction returns a Pyll apply node
# it means evaluate that too. Lambdas do this.
#
# XXX: consider if it is desirable, efficient, buggy
# etc. to keep using the same memo dictionary
foo = rec_eval(rval, deepcopy_inputs, memo, memo_gc=memo_gc)
set_memo(node, foo)
else:
set_memo(node, rval)
return memo[topnode]
############################################################################
############################################################################
@scope.define_pure
def pos_args(*args):
return args
@scope.define_pure
def identity(obj):
return obj
# -- We used to define these as Python functions in this file, but the operator
# module already provides them, is slightly more efficient about it. Since
# searchspaces uses the same convention, we can more easily map graphs back
# and forth and reduce the amount of code in both codebases.
scope.define_pure(operator.getitem)
scope.define_pure(operator.add)
scope.define_pure(operator.sub)
scope.define_pure(operator.mul)
try:
scope.define_pure(operator.div)
except AttributeError:
pass # No more operator.div in Python3, but truediv also exists since Python2.2
scope.define_pure(operator.truediv)
scope.define_pure(operator.floordiv)
scope.define_pure(operator.neg)
scope.define_pure(operator.eq)
scope.define_pure(operator.lt)
scope.define_pure(operator.le)
scope.define_pure(operator.gt)
scope.define_pure(operator.ge)
@scope.define_pure
def exp(a):
return np.exp(a)
@scope.define_pure
def log(a):
return np.log(a)
@scope.define_pure
def pow(a, b):
return a**b
@scope.define_pure
def sin(a):
return np.sin(a)
@scope.define_pure
def cos(a):
return np.cos(a)
@scope.define_pure
def tan(a):
return np.tan(a)
@scope.define_pure
def sum(x, axis=None):
if axis is None:
return np.sum(x)
else:
return np.sum(x, axis=axis)
@scope.define_pure
def sqrt(x):
return np.sqrt(x)
@scope.define_pure
def minimum(x, y):
return np.minimum(x, y)
@scope.define_pure
def maximum(x, y):
return np.maximum(x, y)
@scope.define_pure
def array_union1(args):
s = set()
for a in args:
s.update(a)
return np.asarray(sorted(s))
@scope.define_pure
def array_union(*args):
return array_union1(args)
@scope.define_pure
def asarray(a, dtype=None):
if dtype is None:
return np.asarray(a)
else:
return np.asarray(a, dtype=dtype)
@scope.define_pure
def str_join(s, seq):
return s.join(seq)
@scope.define_pure
def bincount(x, offset=0, weights=None, minlength: int = 0, p=None):
y = np.asarray(x, dtype="int")
# hack for pchoice, p is passed as [ np.repeat(p, obs.size) ],
# so scope.len(p) gives incorrect #dimensions, need to get just the first one
if p is not None and p.ndim == 2:
assert np.all(p == p[0])
minlength = len(p[0])
return np.bincount(y - offset, weights, minlength)
@scope.define_pure
def repeat(n_times, obj):
return [obj] * n_times
@scope.define
def call_method(obj, methodname, *args, **kwargs):
method = getattr(obj, methodname)
return method(*args, **kwargs)
@scope.define_pure
def call_method_pure(obj, methodname, *args, **kwargs):
method = getattr(obj, methodname)
return method(*args, **kwargs)
@scope.define_pure
def copy_call_method_pure(obj, methodname, *args, **kwargs):
# -- this method copies object before calling the method
# so that in the case where args and kwargs are not modified
# the call_method can be done in a no-side-effect way.
#
# It is a mistake to use this method when args or kwargs are modified
# by the call to method.
method = getattr(copy.copy(obj), methodname)
return method(*args, **kwargs)
@scope.define_pure
def switch(pos, *args):
# switch is an unusual expression, in that it affects control flow
# when executed with rec_eval. args are not all evaluated, only
# args[pos] is evaluated.
# raise RuntimeError('switch is not meant to be evaluated')
#
# .. However, in quick-evaluation schemes it is handy that this be defined
# as follows:
return args[pos]
def _kwswitch(kw, **kwargs):
"""conditional evaluation according to string value"""
# Get the index of the string in kwargs to use switch
keys, values = list(zip(*sorted(kwargs.items())))
match_idx = scope.call_method_pure(keys, "index", kw)
return scope.switch(match_idx, *values)
scope.kwswitch = _kwswitch
@scope.define_pure
def Raise(etype, *args, **kwargs):
raise etype(*args, **kwargs)
@scope.define_info(o_len=2)
def curtime(obj):
return time.time(), obj
@scope.define
def pdb_settrace(obj):
import pdb
pdb.set_trace()
return obj
| GarbageCollected |
python | html5lib__html5lib-python | html5lib/tests/tree_construction.py | {
"start": 813,
"end": 2876
} | class ____(pytest.Collector):
def __init__(self, name, parent=None, config=None, session=None, testdata=None):
super(TreeConstructionTest, self).__init__(name, parent, config, session)
self.testdata = testdata
def collect(self):
for treeName, treeAPIs in sorted(treeTypes.items()):
for x in itertools.chain(self._getParserTests(treeName, treeAPIs),
self._getTreeWalkerTests(treeName, treeAPIs)):
yield x
def _getParserTests(self, treeName, treeAPIs):
if treeAPIs is not None and "adapter" in treeAPIs:
return
for namespaceHTMLElements in (True, False):
if namespaceHTMLElements:
nodeid = "%s::parser::namespaced" % treeName
else:
nodeid = "%s::parser::void-namespace" % treeName
item = ParserTest.from_parent(self,
name=nodeid,
test=self.testdata,
treeClass=treeAPIs["builder"] if treeAPIs is not None else None,
namespaceHTMLElements=namespaceHTMLElements)
item.add_marker(getattr(pytest.mark, treeName))
item.add_marker(pytest.mark.parser)
if namespaceHTMLElements:
item.add_marker(pytest.mark.namespaced)
yield item
def _getTreeWalkerTests(self, treeName, treeAPIs):
nodeid = "%s::treewalker" % treeName
item = TreeWalkerTest.from_parent(self,
name=nodeid,
test=self.testdata,
treeAPIs=treeAPIs)
item.add_marker(getattr(pytest.mark, treeName))
item.add_marker(pytest.mark.treewalker)
yield item
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
| TreeConstructionTest |
python | ray-project__ray | rllib/algorithms/tests/test_placement_groups.py | {
"start": 321,
"end": 743
} | class ____(Callback):
def on_step_end(self, iteration, trials, **info):
num_running = len([t for t in trials if t.status == Trial.RUNNING])
# All 3 trials (3 different learning rates) should be scheduled.
assert 3 == min(3, len(trials))
# Cannot run more than 2 at a time
# (due to different resource restrictions in the test cases).
assert num_running <= 2
| _TestCallback |
python | spack__spack | lib/spack/spack/vendor/jinja2/exceptions.py | {
"start": 2424,
"end": 4171
} | class ____(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(
self,
message: str,
lineno: int,
name: t.Optional[str] = None,
filename: t.Optional[str] = None,
) -> None:
super().__init__(message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source: t.Optional[str] = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self) -> str:
# for translated errors we only return the message
if self.translated:
return t.cast(str, self.message)
# otherwise attach some stuff
location = f"line {self.lineno}"
name = self.filename or self.name
if name:
location = f'File "{name}", {location}'
lines = [t.cast(str, self.message), " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
pass
else:
lines.append(" " + line.strip())
return "\n".join(lines)
def __reduce__(self): # type: ignore
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
| TemplateSyntaxError |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py | {
"start": 3293,
"end": 5903
} | class ____(APITestCase):
__test__ = Abstract(__module__, __qualname__)
@cached_property
def organization(self):
return self.create_organization()
@cached_property
def project(self):
return self.create_project(organization=self.organization)
@cached_property
def user(self):
return self.create_user()
@cached_property
def alert_rule_dict(self):
return {
"aggregate": "count()",
"query": "",
"timeWindow": "300",
"resolveThreshold": 100,
"thresholdType": 0,
"triggers": [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
},
{
"label": "warning",
"alertThreshold": 150,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id},
{"type": "email", "targetType": "user", "targetIdentifier": self.user.id},
],
},
],
"projects": [self.project.slug],
"owner": self.user.id,
"name": "JustAValidTestRule",
}
@cached_property
def dynamic_alert_rule_dict(self):
return {
"aggregate": "count()",
"query": "",
"time_window": 30,
"detection_type": AlertRuleDetectionType.DYNAMIC,
"sensitivity": AlertRuleSensitivity.LOW,
"seasonality": AlertRuleSeasonality.AUTO,
"thresholdType": 0,
"triggers": [
{
"label": "critical",
"alertThreshold": 0,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id}
],
},
{
"label": "warning",
"alertThreshold": 0,
"actions": [
{"type": "email", "targetType": "team", "targetIdentifier": self.team.id},
{"type": "email", "targetType": "user", "targetIdentifier": self.user.id},
],
},
],
"projects": [self.project.slug],
"owner": self.user.id,
"name": "JustAValidTestRule",
}
| AlertRuleBase |
python | docker__docker-py | docker/types/services.py | {
"start": 23000,
"end": 25250
} | class ____(dict):
"""
Indicate whether a service or a job should be deployed as a replicated
or global service, and associated parameters
Args:
mode (string): Can be either ``replicated``, ``global``,
``replicated-job`` or ``global-job``
replicas (int): Number of replicas. For replicated services only.
concurrency (int): Number of concurrent jobs. For replicated job
services only.
"""
def __init__(self, mode, replicas=None, concurrency=None):
replicated_modes = ('replicated', 'replicated-job')
supported_modes = replicated_modes + ('global', 'global-job')
if mode not in supported_modes:
raise errors.InvalidArgument(
'mode must be either "replicated", "global", "replicated-job"'
' or "global-job"'
)
if mode not in replicated_modes:
if replicas is not None:
raise errors.InvalidArgument(
'replicas can only be used for "replicated" or'
' "replicated-job" mode'
)
if concurrency is not None:
raise errors.InvalidArgument(
'concurrency can only be used for "replicated-job" mode'
)
service_mode = self._convert_mode(mode)
self.mode = service_mode
self[service_mode] = {}
if replicas is not None:
if mode == 'replicated':
self[service_mode]['Replicas'] = replicas
if mode == 'replicated-job':
self[service_mode]['MaxConcurrent'] = concurrency or 1
self[service_mode]['TotalCompletions'] = replicas
@staticmethod
def _convert_mode(original_mode):
if original_mode == 'global-job':
return 'GlobalJob'
if original_mode == 'replicated-job':
return 'ReplicatedJob'
return original_mode
@property
def replicas(self):
if 'replicated' in self:
return self['replicated'].get('Replicas')
if 'ReplicatedJob' in self:
return self['ReplicatedJob'].get('TotalCompletions')
return None
| ServiceMode |
python | getsentry__sentry | tests/sentry/tasks/test_base.py | {
"start": 4850,
"end": 10018
} | class ____(Exception):
pass
@patch("sentry_sdk.capture_exception")
def test_retry_timeout_enabled_taskbroker(capture_exception) -> None:
@retry(timeouts=True)
def timeout_retry_task():
raise ProcessingDeadlineExceeded()
with pytest.raises(RetryTaskError):
timeout_retry_task()
assert capture_exception.call_count == 1
@patch("sentry.taskworker.retry.current_task")
@patch("sentry_sdk.capture_exception")
def test_retry_timeout_disabled_taskbroker(capture_exception, current_task) -> None:
@retry(timeouts=False)
def timeout_no_retry_task():
raise ProcessingDeadlineExceeded()
with pytest.raises(ProcessingDeadlineExceeded):
timeout_no_retry_task()
assert capture_exception.call_count == 0
assert current_task.retry.call_count == 0
@patch("sentry_sdk.capture_exception")
def test_retry_timeout_enabled(capture_exception) -> None:
@retry(timeouts=True)
def soft_timeout_retry_task():
raise ProcessingDeadlineExceeded()
with pytest.raises(RetryTaskError):
soft_timeout_retry_task()
assert capture_exception.call_count == 1
@patch("sentry.taskworker.retry.current_task")
@patch("sentry_sdk.capture_exception")
def test_retry_timeout_disabled(capture_exception, current_task) -> None:
current_task.retry.side_effect = ExpectedException("retry called")
@retry(on=(ValueError,), timeouts=False)
def soft_timeout_retry_task():
raise ProcessingDeadlineExceeded()
with pytest.raises(ProcessingDeadlineExceeded):
soft_timeout_retry_task()
assert capture_exception.call_count == 0
assert current_task.retry.call_count == 0
def test_instrumented_task_parameters() -> None:
registry = TaskRegistry()
namespace = registry.create_namespace("registertest")
@instrumented_task(
name="hello_task",
namespace=namespace,
retry=Retry(times=3, on=(RuntimeError,)),
processing_deadline_duration=60,
compression_type=CompressionType.ZSTD,
)
def hello_task():
pass
decorated = namespace.get("hello_task")
assert decorated
assert decorated.compression_type == CompressionType.ZSTD
assert decorated.retry
assert decorated.retry._times == 3
assert decorated.retry._allowed_exception_types == (RuntimeError,)
@patch("sentry.tasks.base.current_task")
def test_retry_raise_if_no_retries_false(mock_current_task):
mock_task_state = MagicMock(spec=CurrentTaskState)
mock_task_state.retries_remaining = False
mock_current_task.return_value = mock_task_state
@retry(on=(Exception,), raise_on_no_retries=False)
def task_that_raises_retry_error():
raise RetryTaskError("try again")
# No exception.
task_that_raises_retry_error()
mock_task_state.retries_remaining = True
with pytest.raises(RetryTaskError):
task_that_raises_retry_error()
def test_instrumented_task_with_alias_same_namespace() -> None:
assert test_tasks.contains("tests.tasks.test_base.primary_task")
assert task_with_alias("test") == "Task with alias test"
assert test_tasks.contains("tests.tasks.test_base.alias_task")
assert test_tasks.get("tests.tasks.test_base.alias_task")("test") == "Task with alias test"
def test_instrumented_task_with_alias_different_namespaces() -> None:
assert test_tasks.contains("tests.tasks.test_base.primary_task_primary_namespace")
task_result = task_with_alias_and_alias_namespace("test")
assert task_result == "Task with alias and alias namespace test"
assert exampletasks.contains("tests.tasks.test_base.alias_task_alias_namespace")
assert (
exampletasks.get("tests.tasks.test_base.alias_task_alias_namespace")("test")
== "Task with alias and alias namespace test"
)
@override_settings(SILO_MODE=SiloMode.REGION)
def test_instrumented_task_with_alias_silo_limit_call_region() -> None:
assert test_tasks.contains("tests.tasks.test_base.region_primary_task")
assert region_task_with_alias("test") == "Region task with alias test"
assert test_tasks.contains("tests.tasks.test_base.region_alias_task")
assert (
test_tasks.get("tests.tasks.test_base.region_alias_task")("test")
== "Region task with alias test"
)
assert test_tasks.contains("tests.tasks.test_base.control_primary_task")
with pytest.raises(SiloLimit.AvailabilityError):
control_task_with_alias("test")
assert test_tasks.contains("tests.tasks.test_base.control_alias_task")
with pytest.raises(SiloLimit.AvailabilityError):
test_tasks.get("tests.tasks.test_base.control_alias_task")("test")
@override_settings(SILO_MODE=SiloMode.CONTROL)
def test_instrumented_task_with_alias_silo_limit_call_control() -> None:
assert test_tasks.contains("tests.tasks.test_base.region_primary_task")
with pytest.raises(SiloLimit.AvailabilityError):
region_task_with_alias("test")
assert test_tasks.contains("tests.tasks.test_base.region_alias_task")
with pytest.raises(SiloLimit.AvailabilityError):
test_tasks.get("tests.tasks.test_base.region_alias_task")("test")
| ExpectedException |
python | getsentry__sentry | src/sentry/migrations/0984_authprovider_json_field.py | {
"start": 244,
"end": 2075
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0983_create_groupopenperiodactivity_table"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
mod.to_jsonb("sentry_authprovider", "config"),
mod.to_jsonb("sentry_authproviderreplica", "config"),
],
state_operations=[
migrations.AlterField(
model_name="authprovider",
name="config",
field=models.JSONField(default=dict),
),
migrations.AlterField(
model_name="authproviderreplica",
name="config",
field=models.JSONField(default=dict),
),
],
)
]
| Migration |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataprep.py | {
"start": 1969,
"end": 12236
} | class ____(BaseHook):
"""
Hook for connection with Dataprep API.
To get connection Dataprep with Airflow you need Dataprep token.
https://clouddataprep.com/documentation/api#section/Authentication
It should be added to the Connection in Airflow in JSON format.
"""
conn_name_attr = "dataprep_conn_id"
default_conn_name = "google_cloud_dataprep_default"
conn_type = "dataprep"
hook_name = "Google Dataprep"
def __init__(self, dataprep_conn_id: str = default_conn_name, api_version: str = "v4", **kwargs) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id = dataprep_conn_id
self.api_version = api_version
conn = self.get_connection(self.dataprep_conn_id)
extras = conn.extra_dejson
self._token = _get_field(extras, "token")
self._base_url = _get_field(extras, "base_url") or "https://api.clouddataprep.com"
@property
def _headers(self) -> dict[str, str]:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._token}",
}
return headers
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_jobs_for_job_group(self, job_id: int) -> dict[str, Any]:
"""
Get information about the batch jobs within a Cloud Dataprep job.
:param job_id: The ID of the job that will be fetched
"""
endpoint_path = f"{self.api_version}/jobGroups/{job_id}/jobs"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.get(url, headers=self._headers)
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_job_group(self, job_group_id: int, embed: str, include_deleted: bool) -> dict[str, Any]:
"""
Get the specified job group.
A job group is a job that is executed from a specific node in a flow.
:param job_group_id: The ID of the job that will be fetched
:param embed: Comma-separated list of objects to pull in as part of the response
:param include_deleted: if set to "true", will include deleted objects
"""
params: dict[str, Any] = {"embed": embed, "includeDeleted": include_deleted}
endpoint_path = f"{self.api_version}/jobGroups/{job_group_id}"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.get(url, headers=self._headers, params=params)
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def run_job_group(self, body_request: dict) -> dict[str, Any]:
"""
Create a ``jobGroup``, which launches the specified job as the authenticated user.
This performs the same action as clicking on the Run Job button in the application.
To get recipe_id please follow the Dataprep API documentation
https://clouddataprep.com/documentation/api#operation/runJobGroup.
:param body_request: The identifier for the recipe you would like to run.
"""
endpoint_path = f"{self.api_version}/jobGroups"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def create_flow(self, *, body_request: dict) -> dict:
"""
Create flow.
:param body_request: Body of the POST request to be sent.
For more details check https://clouddataprep.com/documentation/api#operation/createFlow
"""
endpoint = f"/{self.api_version}/flows"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def copy_flow(
self, *, flow_id: int, name: str = "", description: str = "", copy_datasources: bool = False
) -> dict:
"""
Create a copy of the provided flow id, as well as all contained recipes.
:param flow_id: ID of the flow to be copied
:param name: Name for the copy of the flow
:param description: Description of the copy of the flow
:param copy_datasources: Bool value to define should copies of data inputs be made or not.
"""
endpoint_path = f"{self.api_version}/flows/{flow_id}/copy"
url: str = urljoin(self._base_url, endpoint_path)
body_request = {
"name": name,
"description": description,
"copyDatasources": copy_datasources,
}
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def delete_flow(self, *, flow_id: int) -> None:
"""
Delete the flow with the provided id.
:param flow_id: ID of the flow to be copied
"""
endpoint_path = f"{self.api_version}/flows/{flow_id}"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.delete(url, headers=self._headers)
self._raise_for_status(response)
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def run_flow(self, *, flow_id: int, body_request: dict) -> dict:
"""
Run the flow with the provided id copy of the provided flow id.
:param flow_id: ID of the flow to be copied
:param body_request: Body of the POST request to be sent.
"""
endpoint = f"{self.api_version}/flows/{flow_id}/run"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_job_group_status(self, *, job_group_id: int) -> JobGroupStatuses:
"""
Check the status of the Dataprep task to be finished.
:param job_group_id: ID of the job group to check
"""
endpoint = f"/{self.api_version}/jobGroups/{job_group_id}/status"
url: str = urljoin(self._base_url, endpoint)
response = requests.get(url, headers=self._headers)
self._raise_for_status(response)
return response.json()
def _raise_for_status(self, response: requests.models.Response) -> None:
try:
response.raise_for_status()
except HTTPError:
self.log.error(response.json().get("exception"))
raise
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def create_imported_dataset(self, *, body_request: dict) -> dict:
"""
Create imported dataset.
:param body_request: Body of the POST request to be sent.
For more details check https://clouddataprep.com/documentation/api#operation/createImportedDataset
"""
endpoint = f"/{self.api_version}/importedDatasets"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def create_wrangled_dataset(self, *, body_request: dict) -> dict:
"""
Create wrangled dataset.
:param body_request: Body of the POST request to be sent.
For more details check
https://clouddataprep.com/documentation/api#operation/createWrangledDataset
"""
endpoint = f"/{self.api_version}/wrangledDatasets"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def create_output_object(self, *, body_request: dict) -> dict:
"""
Create output.
:param body_request: Body of the POST request to be sent.
For more details check
https://clouddataprep.com/documentation/api#operation/createOutputObject
"""
endpoint = f"/{self.api_version}/outputObjects"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def create_write_settings(self, *, body_request: dict) -> dict:
"""
Create write settings.
:param body_request: Body of the POST request to be sent.
For more details check
https://clouddataprep.com/documentation/api#tag/createWriteSetting
"""
endpoint = f"/{self.api_version}/writeSettings"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def delete_imported_dataset(self, *, dataset_id: int) -> None:
"""
Delete imported dataset.
:param dataset_id: ID of the imported dataset for removal.
"""
endpoint = f"/{self.api_version}/importedDatasets/{dataset_id}"
url: str = urljoin(self._base_url, endpoint)
response = requests.delete(url, headers=self._headers)
self._raise_for_status(response)
| GoogleDataprepHook |
python | kamyu104__LeetCode-Solutions | Python/symmetric-tree.py | {
"start": 942,
"end": 1478
} | class ____(object):
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if root is None:
return True
return self.isSymmetricRecu(root.left, root.right)
def isSymmetricRecu(self, left, right):
if left is None and right is None:
return True
if left is None or right is None or left.val != right.val:
return False
return self.isSymmetricRecu(left.left, right.right) and self.isSymmetricRecu(left.right, right.left)
| Solution2 |
python | python-pillow__Pillow | src/PIL/BlpImagePlugin.py | {
"start": 1454,
"end": 1544
} | class ____(IntEnum):
UNCOMPRESSED = 1
DXT = 2
UNCOMPRESSED_RAW_BGRA = 3
| Encoding |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1573816,
"end": 1574019
} | class ____(VegaLiteSchema):
"""Vector10string schema wrapper."""
_schema = {"$ref": "#/definitions/Vector10<string>"}
def __init__(self, *args):
super().__init__(*args)
| Vector10string |
python | streamlit__streamlit | lib/tests/streamlit/data_mocks/ray_mocks.py | {
"start": 731,
"end": 1490
} | class ____:
"""This is dummy Dataset class, which imitates ray.data.dataset.Dataset class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a Ray Datasets.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "ray.data.dataset"
def __init__(self, data: pd.DataFrame):
self._data: pd.DataFrame = data
def to_pandas(self) -> pd.DataFrame:
return self._data
def limit(self, n: int) -> Dataset:
"""Returns the top n element of a mock version of Ray Dataset."""
return Dataset(self._data.head(n))
| Dataset |
python | django__django | django/db/models/fields/json.py | {
"start": 701,
"end": 5177
} | class ____(CheckFieldDefaultMixin, Field):
empty_strings_allowed = False
description = _("A JSON object")
default_error_messages = {
"invalid": _("Value must be valid JSON."),
}
_default_hint = ("dict", "{}")
def __init__(
self,
verbose_name=None,
name=None,
encoder=None,
decoder=None,
**kwargs,
):
if encoder and not callable(encoder):
raise ValueError("The encoder parameter must be a callable object.")
if decoder and not callable(decoder):
raise ValueError("The decoder parameter must be a callable object.")
self.encoder = encoder
self.decoder = decoder
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
databases = kwargs.get("databases") or []
errors.extend(self._check_supported(databases))
return errors
def _check_supported(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not (
"supports_json_field" in self.model._meta.required_db_features
or connection.features.supports_json_field
):
errors.append(
checks.Error(
"%s does not support JSONFields." % connection.display_name,
obj=self.model,
id="fields.E180",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.encoder is not None:
kwargs["encoder"] = self.encoder
if self.decoder is not None:
kwargs["decoder"] = self.decoder
return name, path, args, kwargs
def from_db_value(self, value, expression, connection):
if value is None:
return value
# Some backends (SQLite at least) extract non-string values in their
# SQL datatypes.
if isinstance(expression, KeyTransform) and not isinstance(value, str):
return value
try:
return json.loads(value, cls=self.decoder)
except json.JSONDecodeError:
return value
def get_internal_type(self):
return "JSONField"
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_json_value(value, self.encoder)
def get_db_prep_save(self, value, connection):
# This slightly involved logic is to allow for `None` to be used to
# store SQL `NULL` while `Value(None, JSONField())` can be used to
# store JSON `null` while preventing compilable `as_sql` values from
# making their way to `get_db_prep_value`, which is what the `super()`
# implementation does.
if value is None:
return value
if (
isinstance(value, expressions.Value)
and value.value is None
and isinstance(value.output_field, JSONField)
):
value = None
return super().get_db_prep_save(value, connection)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super().validate(value, model_instance)
try:
json.dumps(value, cls=self.encoder)
except TypeError:
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def value_to_string(self, obj):
return self.value_from_object(obj)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.JSONField,
"encoder": self.encoder,
"decoder": self.decoder,
**kwargs,
}
)
@deconstructible(path="django.db.models.JSONNull")
| JSONField |
python | huggingface__transformers | src/transformers/models/bartpho/tokenization_bartpho.py | {
"start": 1120,
"end": 14295
} | class ____(SentencePieceBackend):
"""
Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the
multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.
monolingual_vocab_file (`str`):
Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized
types extracted from the multilingual vocabulary vocab_file of 250K types.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
is_fast = False
def __init__(
self,
vocab_file,
monolingual_vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.monolingual_vocab_file = monolingual_vocab_file
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
self.fairseq_tokens_to_ids = {}
cnt = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(token) not in self.fairseq_tokens_to_ids:
self.fairseq_tokens_to_ids[str(token)] = cnt
cnt += 1
with open(monolingual_vocab_file, "r", encoding="utf-8") as f:
for line in f:
token = line.strip().split()[0]
self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids)
if str(mask_token) not in self.fairseq_tokens_to_ids:
self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
# Prepare sp_model_kwargs for parent class
if sp_model_kwargs is not None:
kwargs["sp_model_kwargs"] = sp_model_kwargs
# Call parent init (which will load sp_model)
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self._align_added_tokens_with_fairseq_vocab()
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An BARTPho sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
"""Override to return fairseq vocab size instead of sp_model vocab size"""
return len(self.fairseq_ids_to_tokens)
def get_vocab(self):
"""Override to use fairseq vocabulary"""
vocab = dict(self.fairseq_tokens_to_ids)
if hasattr(self, "_added_tokens_encoder"):
for token, idx in self._added_tokens_encoder.items():
if token not in vocab:
vocab[token] = idx
return vocab
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the fairseq vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _convert_token_to_id_with_added_voc(self, token):
"""Override to use fairseq vocab instead of sp_model vocab."""
if token is None:
return None
if token in self._added_tokens_encoder:
return self._added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the fairseq vocab."""
return self.fairseq_ids_to_tokens[index]
def _align_added_tokens_with_fairseq_vocab(self):
"""
The slow tokenizer base class populates `_added_tokens_*` using SentencePiece ids. Remap those entries so that
every token present in the reduced fairseq dictionary uses the same ids everywhere, otherwise conversions and
special-token setters observe two different vocabularies.
"""
if not hasattr(self, "_added_tokens_decoder") or not hasattr(self, "_added_tokens_encoder"):
return
remapped_decoder: dict[int, AddedToken] = {}
for original_id, token_obj in self._added_tokens_decoder.items():
token = token_obj.content
new_id = self.fairseq_tokens_to_ids.get(token, original_id)
remapped_decoder[new_id] = token_obj
self._added_tokens_decoder = remapped_decoder
self._added_tokens_encoder = {token.content: idx for idx, token in remapped_decoder.items()}
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
out_monolingual_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
out_monolingual_vocab_file
) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(token)} \n")
return out_vocab_file, out_monolingual_vocab_file
__all__ = ["BartphoTokenizer"]
| BartphoTokenizer |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 17095,
"end": 18074
} | class ____(ABC):
"""Execute rendezvous operations."""
@abstractmethod
def run(
self,
state_handler: Callable[[_RendezvousContext, float], _Action],
deadline: float,
update_deadline: Callable[[timedelta], float] | None = None,
) -> None:
"""Execute a rendezvous operation.
An operation is run inside a state machine and is expected to transition
the rendezvous from one state to another.
Args:
state_handler:
A callable that is expected to return the next state transition
action based on the current state of the rendezvous.
deadline:
The time, in seconds, at which the operation will be considered
timed-out.
update_deadline:
Function to generate a new operation deadline if the current
node may participate in the next rendezvous.
"""
| _RendezvousOpExecutor |
python | coleifer__peewee | tests/sqliteq.py | {
"start": 7081,
"end": 7373
} | class ____(BaseTestQueueDatabase, BaseTestCase):
database_config = {'use_gevent': True}
n_rows = 10
n_threads = 40
def create_thread(self, fn, *args):
return gevent.Greenlet(fn, *args)
def create_event(self):
return GreenEvent()
| TestThreadedDatabaseGreenlets |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-potholes-that-can-be-fixed.py | {
"start": 1450,
"end": 2062
} | class ____(object):
def maxPotholes(self, road, budget):
"""
:type road: str
:type budget: int
:rtype: int
"""
ls = []
l = 0
for i in xrange(len(road)):
l += 1
if i+1 == len(road) or road[i+1] != road[i]:
if road[i] == 'x':
ls.append(l)
l = 0
ls.sort()
result = 0
for l in reversed(ls):
c = min(l+1, budget)
if c-1 <= 0:
break
result += c-1
budget -= c
return result
| Solution2 |
python | huggingface__transformers | tests/models/falcon/test_modeling_falcon.py | {
"start": 1993,
"end": 7902
} | class ____(unittest.TestCase):
@slow
def test_lm_generate_falcon(self):
tokenizer = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b")
model = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b")
model.eval()
model.to(torch_device)
inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device)
EXPECTED_OUTPUT = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=19)
output_str = tokenizer.batch_decode(output_ids)[0]
self.assertEqual(output_str, EXPECTED_OUTPUT)
@slow
@require_bitsandbytes
def test_lm_generate_falcon_11b(self):
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-11B", padding_side="left")
model = FalconForCausalLM.from_pretrained(
"tiiuae/falcon-11B",
device_map={"": torch_device},
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
model.eval()
inputs = tokenizer(
"Two roads diverged in a yellow wood,", return_tensors="pt", return_token_type_ids=False
).to(torch_device)
EXPECTED_OUTPUT = "Two roads diverged in a yellow wood,\nAnd sorry I could not travel both\n"
output_ids = model.generate(**inputs, do_sample=False, max_new_tokens=9)
output_str = tokenizer.batch_decode(output_ids)[0]
self.assertEqual(output_str, EXPECTED_OUTPUT)
@slow
def test_lm_generation_big_models(self):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
tokenizer = AutoTokenizer.from_pretrained(repo)
model = FalconForCausalLM.from_pretrained(repo)
model.eval()
model.to(torch_device)
inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**inputs, do_sample=False, max_new_tokens=4)
model.generate(**inputs, do_sample=True, max_new_tokens=4)
model.generate(**inputs, num_beams=2, max_new_tokens=4)
@slow
def test_lm_generation_use_cache(self):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
tokenizer = AutoTokenizer.from_pretrained(repo)
model = FalconForCausalLM.from_pretrained(repo)
model.eval()
model.to(device=torch_device)
inputs = tokenizer("My favorite food is", return_tensors="pt").to(torch_device)
# Test results are the same with and without cache
outputs_no_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=False)
outputs_cache = model.generate(**inputs, do_sample=False, max_new_tokens=20, use_cache=True)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
@require_bitsandbytes
@slow
def test_batched_generation(self):
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", padding_side="left")
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
"tiiuae/falcon-7b",
device_map={"": torch_device},
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
test_text = "A sequence: 1, 2" # should generate the rest of the sequence
unpadded_inputs = tokenizer([test_text], return_tensors="pt").to(f"{torch_device}:0")
unpadded_gen_out = model.generate(**unpadded_inputs, max_new_tokens=20)
unpadded_gen_text = tokenizer.batch_decode(unpadded_gen_out, skip_special_tokens=True)
dummy_text = "This is a longer text " * 2 # forces left-padding on `test_text`
padded_inputs = tokenizer([test_text, dummy_text], return_tensors="pt", padding=True).to(f"{torch_device}:0")
padded_gen_out = model.generate(**padded_inputs, max_new_tokens=20)
padded_gen_text = tokenizer.batch_decode(padded_gen_out, skip_special_tokens=True)
expected_output = "A sequence: 1, 2, 3, 4, 5, 6, 7, 8, "
self.assertLess(unpadded_inputs.input_ids.shape[-1], padded_inputs.input_ids.shape[-1]) # left-padding exists
self.assertEqual(unpadded_gen_text[0], expected_output)
self.assertEqual(padded_gen_text[0], expected_output)
@slow
def test_falcon_alibi_sdpa_matches_eager(self):
input_ids = torch.randint(0, 1000, (5, 20))
config = FalconConfig(
vocab_size=1000,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
new_decoder_architecture=True,
alibi=True,
)
falcon = FalconForCausalLM(config)
falcon = falcon.eval()
with torch.no_grad():
# output_attentions=True dispatches to eager path
falcon_output_eager = falcon(input_ids, output_attentions=True)[0]
falcon_output_sdpa = falcon(input_ids)[0]
torch.testing.assert_close(falcon_output_eager, falcon_output_sdpa, rtol=1e-3, atol=1e-3)
| FalconLanguageGenerationTest |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/model_parallel.py | {
"start": 14351,
"end": 26647
} | class ____(AbstractContextManager):
def __init__(self, module: Module, enabled: bool) -> None:
self._module = module
self._enabled = enabled
def _set_requires_grad_sync(self, requires_grad_sync: bool) -> None:
from torch.distributed._composable.fsdp import FSDPModule
for mod in self._module.modules():
if isinstance(mod, FSDPModule):
mod.set_requires_gradient_sync(requires_grad_sync, recurse=False)
def __enter__(self) -> None:
self._set_requires_grad_sync(not self._enabled)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self._set_requires_grad_sync(self._enabled)
def _save_checkpoint(
path: Path,
state: dict[str, Union[Module, Optimizer, Any]],
full_state_dict: bool,
rank: int,
filter: Optional[dict[str, Callable[[str, Any], bool]]] = None,
) -> None:
if path.is_dir() and full_state_dict and not _is_sharded_checkpoint(path):
raise IsADirectoryError(f"The checkpoint path exists and is a directory: {path}")
modules = [module for module in state.values() if _has_dtensor_modules(module)]
if len(modules) == 0:
raise ValueError(
"Could not find a distributed model in the provided checkpoint state. Please provide the model as"
" part of the state like so: `save_checkpoint(..., state={'model': model, ...})`. Make sure"
" you set up the model (and optimizers if any) through the strategy before saving the checkpoint."
)
if len(modules) > 1:
raise ValueError(
"Found multiple distributed models in the given state. Saving distributed checkpoints is"
" currently limited to a single model per checkpoint. To save multiple models, call the"
" save method for each model separately with a different path."
)
module = modules[0]
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict, get_optimizer_state_dict
state_dict_options = StateDictOptions(full_state_dict=full_state_dict, cpu_offload=True)
# replace the modules and optimizer objects in the state with their local state dict
# and separate the user's metadata
converted_state: dict[str, Any] = {}
metadata: dict[str, Any] = {}
for key, obj in state.items():
converted: Any
if isinstance(obj, Module):
converted = get_model_state_dict(obj, options=state_dict_options)
target_dict = converted_state
elif isinstance(obj, Optimizer):
converted = get_optimizer_state_dict(module, obj, options=state_dict_options)
target_dict = converted_state
else: # everything not a module or optimizer is considered metadata
converted = obj.state_dict() if isinstance(obj, _Stateful) else obj
target_dict = metadata
_apply_filter(key, filter or {}, converted, target_dict)
if full_state_dict:
if _is_sharded_checkpoint(path):
shutil.rmtree(path)
converted_state.update(metadata)
if rank == 0:
torch.save(converted_state, path)
else:
if path.is_file():
path.unlink()
path.mkdir(parents=True, exist_ok=True)
_distributed_checkpoint_save(converted_state, path)
if rank == 0:
torch.save(metadata, path / _METADATA_FILENAME)
def _load_checkpoint(
path: Path,
state: dict[str, Union[Module, Optimizer, Any]],
strict: bool = True,
optimizer_states_from_list: bool = False,
weights_only: Optional[bool] = None,
) -> dict[str, Any]:
from torch.distributed.checkpoint.state_dict import (
StateDictOptions,
get_model_state_dict,
get_optimizer_state_dict,
set_optimizer_state_dict,
)
modules = {key: module for key, module in state.items() if _has_dtensor_modules(module)}
if len(modules) == 0:
raise ValueError(
"Could not find a distributed model in the provided checkpoint state. Please provide the model as"
" part of the state like so: `load_checkpoint(..., state={'model': model, ...})`. Make sure"
" you set up the model (and optimizers if any) through the strategy before loading the checkpoint."
)
optimizers = {key: optim for key, optim in state.items() if isinstance(optim, Optimizer)}
if len(modules) > 1:
raise ValueError(
"Found multiple distributed models in the given state. Loading distributed checkpoints is"
" currently limited to a single model per checkpoint. To load multiple models, call the"
" load method for each model separately with a different path."
)
module_key, module = list(modules.items())[0]
if _is_sharded_checkpoint(path):
state_dict_options = StateDictOptions(cpu_offload=True)
module_state = {module_key: get_model_state_dict(module)}
_distributed_checkpoint_load(module_state, path)
module.load_state_dict(module_state[module_key], strict=strict)
# the optimizer states must be loaded separately
for optim_key, optim in optimizers.items():
optim_state = {optim_key: get_optimizer_state_dict(module, optim)}
_distributed_checkpoint_load(optim_state, path)
set_optimizer_state_dict(module, optim, optim_state_dict=optim_state[optim_key], options=state_dict_options)
# Load metadata (anything not a module or optimizer)
metadata = torch.load(path / _METADATA_FILENAME, weights_only=weights_only)
requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys()
_validate_keys_for_strict_loading(requested_metadata_keys, metadata.keys(), strict=strict)
for key in requested_metadata_keys:
if key not in metadata:
continue
state[key] = metadata.pop(key)
# return the remaining metadata that wasn't requested as part of `state`
return metadata
if _is_full_checkpoint(path):
checkpoint = torch.load(path, mmap=True, map_location="cpu", weights_only=weights_only)
_load_raw_module_state(checkpoint.pop(module_key), module, strict=strict)
state_dict_options = StateDictOptions(
broadcast_from_rank0=True,
full_state_dict=True,
strict=strict,
)
for optimizer_idx, (optimizer_name, optimizer) in enumerate(optimizers.items()):
if optimizer_states_from_list:
# This code path is only used by `lightning.pytorch`, which saves optimizer states as a list
# rather than individual states at the top level.
optimizer_state = checkpoint["optimizer_states"][optimizer_idx]
else:
optimizer_state = checkpoint.pop(optimizer_name)
optimizer_state = _rekey_optimizer_state_if_needed(optimizer_state, module)
set_optimizer_state_dict(
module,
optimizer,
optim_state_dict=optimizer_state,
options=state_dict_options,
)
requested_metadata_keys = state.keys() - modules.keys() - optimizers.keys()
_validate_keys_for_strict_loading(requested_metadata_keys, checkpoint.keys(), strict=strict)
# Load metadata (anything not a module or optimizer)
_move_state_into(source=checkpoint, destination=state, keys=requested_metadata_keys)
# return the remaining metadata that wasn't requested as part of `state`
return checkpoint
raise ValueError(
f"The path {str(path)!r} does not point to a valid checkpoint. Make sure the path points to either a"
" directory with distributed checkpoint shards, or a single file with a full checkpoint."
)
def _setup_device_mesh(
data_parallel_size: int,
tensor_parallel_size: int,
world_size: int,
device: torch.device,
) -> "DeviceMesh":
from torch.distributed.device_mesh import init_device_mesh
if data_parallel_size * tensor_parallel_size != world_size:
raise RuntimeError(
f"The sizes `data_parallel_size={data_parallel_size}` and"
f" `tensor_parallel_size={tensor_parallel_size}` multiplied should equal the world size"
f" ({world_size})."
)
return init_device_mesh(
device_type=device.type,
mesh_shape=(data_parallel_size, tensor_parallel_size),
mesh_dim_names=("data_parallel", "tensor_parallel"),
)
def _has_dtensor_modules(module: object) -> TypeGuard[Module]:
from torch.distributed._tensor import DTensor
return isinstance(module, Module) and any(isinstance(t, DTensor) for t in module.parameters())
def _load_raw_module_state_from_path(path: Path, module: Module, world_size: int, strict: bool = True) -> None:
"""Loads the state dict from a file path into the FSDP module."""
if not _is_full_checkpoint(path):
raise ValueError(
"Failed to load checkpoint directly into the model. The given path must be a single file containing the"
f" full state dict: {path}"
)
# Use `lazy_load`/`mmap` instead to avoid storing a copy of the full checkpoint per rank
state_dict = torch.load(path, mmap=True, map_location="cpu") if _TORCH_GREATER_EQUAL_2_3 else _lazy_load(path)
_load_raw_module_state(state_dict=state_dict, module=module, world_size=world_size, strict=strict)
def _load_raw_module_state(
state_dict: dict[str, Any], module: Module, world_size: int = 1, strict: bool = True
) -> None:
"""Loads the state dict into the module by gathering all weights first and then and writing back to each shard."""
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
if _has_dtensor_modules(module):
from torch.distributed.checkpoint.state_dict import StateDictOptions, set_model_state_dict
state_dict_options = StateDictOptions(
broadcast_from_rank0=True,
full_state_dict=True,
# must be set False to allow loading each param separately below
strict=False,
)
for submodule_name, submodule in module.named_modules():
for param_name, _ in _named_parameters_and_buffers_to_load(submodule):
full_param_name = f"{submodule_name}{'.' if submodule_name else ''}{param_name}"
if full_param_name not in state_dict:
if not strict:
continue
raise KeyError(
f"The model contains a key '{full_param_name}' that does not exist in the loaded checkpoint."
" To disable strict loading, set `strict=False`."
)
local_state_dict = {param_name: state_dict[full_param_name]}
set_model_state_dict(submodule, local_state_dict, options=state_dict_options)
elif isinstance(module, FSDP):
with _get_full_state_dict_context(module, world_size=world_size, rank0_only=False):
module.load_state_dict(state_dict, strict=strict)
else:
module.load_state_dict(state_dict, strict=strict)
def _named_parameters_and_buffers_to_load(module: Module) -> Generator:
"""Returns parameters and buffers, with non-persistent buffers excluded."""
for param_name, param in itertools.chain(
module.named_buffers(recurse=False),
module.named_parameters(recurse=False),
):
if param_name in module._non_persistent_buffers_set:
continue
yield param_name, param
def _rekey_optimizer_state_if_needed(optimizer_state_dict: dict[str, Any], module: Module) -> dict[str, Any]:
"""Handles the case where the optimizer state is saved from a normal optimizer and converts the keys to parameter
names."""
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import OptimStateKeyType
if isinstance(list(optimizer_state_dict["state"].keys())[0], int):
optimizer_state_dict = FSDP.rekey_optim_state_dict(optimizer_state_dict, OptimStateKeyType.PARAM_NAME, module)
return optimizer_state_dict
| _FSDPNoSync |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_blank03.py | {
"start": 315,
"end": 1382
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_blank03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [44253568, 44269952]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.show_blanks_as("span")
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | hyperopt__hyperopt | hyperopt/rdists.py | {
"start": 5538,
"end": 7005
} | class ____:
"""Stats for Y = q * round(X / q) where X ~ N(mu, sigma)"""
def __init__(self, mu, sigma, q):
self.mu, self.sigma = list(map(float, (mu, sigma)))
self.q = q
# -- distfn for using the CDF
self._norm_logcdf = scipy.stats.norm(loc=mu, scale=sigma).logcdf
def in_domain(self, x):
return np.isclose(x, safe_int_cast(np.round(x / self.q)) * self.q)
def pmf(self, x):
return np.exp(self.logpmf(x))
def logpmf(self, x):
x1 = np.atleast_1d(x)
in_domain = self.in_domain(x1)
rval = np.zeros_like(x1, dtype=float) - np.inf
x_in_domain = x1[in_domain]
ubound = x_in_domain + self.q * 0.5
lbound = x_in_domain - self.q * 0.5
# -- reflect intervals right of mu to other side
# for more accurate calculation
flip = lbound > self.mu
tmp = lbound[flip].copy()
lbound[flip] = self.mu - (ubound[flip] - self.mu)
ubound[flip] = self.mu - (tmp - self.mu)
assert np.all(ubound > lbound)
a = self._norm_logcdf(ubound)
b = self._norm_logcdf(lbound)
rval[in_domain] = a + np.log1p(-np.exp(b - a))
if isinstance(x, np.ndarray):
return rval
return float(rval[0])
def rvs(self, size=()):
x = mtrand.normal(loc=self.mu, scale=self.sigma, size=size)
rval = safe_int_cast(np.round(x / self.q)) * self.q
return rval
| qnormal_gen |
python | sympy__sympy | sympy/functions/elementary/trigonometric.py | {
"start": 60913,
"end": 64414
} | class ____(ReciprocalTrigonometricFunction):
"""
The cosecant function.
Returns the cosecant of x (measured in radians).
Explanation
===========
See :func:`sin` for notes about automatic evaluation.
Examples
========
>>> from sympy import csc
>>> from sympy.abc import x
>>> csc(x**2).diff(x)
-2*x*cot(x**2)*csc(x**2)
>>> csc(1).diff(x)
0
See Also
========
sympy.functions.elementary.trigonometric.sin
sympy.functions.elementary.trigonometric.cos
sympy.functions.elementary.trigonometric.sec
sympy.functions.elementary.trigonometric.tan
sympy.functions.elementary.trigonometric.cot
sympy.functions.elementary.trigonometric.asin
sympy.functions.elementary.trigonometric.acsc
sympy.functions.elementary.trigonometric.acos
sympy.functions.elementary.trigonometric.asec
sympy.functions.elementary.trigonometric.atan
sympy.functions.elementary.trigonometric.acot
sympy.functions.elementary.trigonometric.atan2
References
==========
.. [1] https://en.wikipedia.org/wiki/Trigonometric_functions
.. [2] https://dlmf.nist.gov/4.14
.. [3] https://functions.wolfram.com/ElementaryFunctions/Csc
"""
_reciprocal_of = sin
_is_odd = True
def period(self, symbol=None):
return self._period(symbol)
def _eval_rewrite_as_sin(self, arg, **kwargs):
return (1/sin(arg))
def _eval_rewrite_as_sincos(self, arg, **kwargs):
return cos(arg)/(sin(arg)*cos(arg))
def _eval_rewrite_as_cot(self, arg, **kwargs):
cot_half = cot(arg/2)
return (1 + cot_half**2)/(2*cot_half)
def _eval_rewrite_as_cos(self, arg, **kwargs):
return 1/sin(arg).rewrite(cos, **kwargs)
def _eval_rewrite_as_sec(self, arg, **kwargs):
return sec(pi/2 - arg, evaluate=False)
def _eval_rewrite_as_tan(self, arg, **kwargs):
return (1/sin(arg).rewrite(tan, **kwargs))
def _eval_rewrite_as_besselj(self, arg, **kwargs):
from sympy.functions.special.bessel import besselj
return sqrt(2/pi)*(1/(sqrt(arg)*besselj(S.Half, arg)))
def fdiff(self, argindex=1):
if argindex == 1:
return -cot(self.args[0])*csc(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_complex(self):
arg = self.args[0]
if arg.is_real and (arg/pi).is_integer is False:
return True
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = n//2 + 1
return (S.NegativeOne**(k - 1)*2*(2**(2*k - 1) - 1)*
bernoulli(2*k)*x**(2*k - 1)/factorial(2*k))
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.functions.elementary.complexes import re
arg = self.args[0]
x0 = arg.subs(x, 0).cancel()
n = x0/pi
if n.is_integer:
lt = (arg - n*pi).as_leading_term(x)
return (S.NegativeOne**n)/lt
if x0 is S.ComplexInfinity:
x0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
if x0 in (S.Infinity, S.NegativeInfinity):
return AccumBounds(S.NegativeInfinity, S.Infinity)
return self.func(x0) if x0.is_finite else self
| csc |
python | doocs__leetcode | solution/0700-0799/0783.Minimum Distance Between BST Nodes/Solution.py | {
"start": 192,
"end": 594
} | class ____:
def minDiffInBST(self, root: Optional[TreeNode]) -> int:
def dfs(root: Optional[TreeNode]):
if root is None:
return
dfs(root.left)
nonlocal pre, ans
ans = min(ans, root.val - pre)
pre = root.val
dfs(root.right)
pre = -inf
ans = inf
dfs(root)
return ans
| Solution |
python | graphql-python__graphene | graphene/tests/issues/test_1293.py | {
"start": 701,
"end": 769
} | class ____(graphene.ObjectType):
goodbye = graphene.String()
| Query |
python | pezy__LeetCode | 001. Add Two Numbers/solution.py | {
"start": 140,
"end": 1199
} | class ____:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head = ListNode(0)
p = head
quot = 0
while l1 or l2 or quot != 0:
if l1:
quot += l1.val
l1 = l1.next
if l2:
quot += l2.val
l2 = l2.next
quot, rem = divmod(quot, 10)
p.next = ListNode(rem)
p = p.next
return head.next
def compareLinkedList(l1, l2):
while l1 or l2:
if not (l1 and l2) or l1.val != l2.val:
return False
l1 = l1.next
l2 = l2.next
return True
if __name__ == "__main__":
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
lsum = ListNode(7)
lsum.next = ListNode(0)
lsum.next.next = ListNode(8)
print(compareLinkedList(Solution().addTwoNumbers(l1, l2), lsum))
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 27777,
"end": 28371
} | class ____(sgqlc.types.Enum):
"""The possible states in which authentication can be configured with
an identity provider.
Enumeration Choices:
* `CONFIGURED`: Authentication with an identity provider is
configured but not enforced.
* `ENFORCED`: Authentication with an identity provider is
configured and enforced.
* `UNCONFIGURED`: Authentication with an identity provider is not
configured.
"""
__schema__ = github_schema
__choices__ = ("CONFIGURED", "ENFORCED", "UNCONFIGURED")
Int = sgqlc.types.Int
| IdentityProviderConfigurationState |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_getlimits.py | {
"start": 1140,
"end": 1368
} | class ____(TestCase):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
@skip(reason="torch.finfo is not a singleton. Why demanding it is?")
| TestHalf |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.