language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/command_palette.py
{ "start": 78, "end": 520 }
class ____(Provider): def goes_nowhere_does_nothing(self) -> None: pass async def search(self, query: str) -> Hits: matcher = self.matcher(query) for n in range(10): command = f"This is a test of this code {n}" yield Hit( n / 10, matcher.highlight(command), self.goes_nowhere_does_nothing, command, )
TestSource
python
spyder-ide__spyder
spyder/plugins/ipythonconsole/api.py
{ "start": 376, "end": 1303 }
class ____(TypedDict): """IPythonConsole python execution parameters.""" # True if the execution is using the current console. False otherwise current: bool # If True, then the console will start a debugging session if an error # occurs. False otherwise. post_mortem: bool # True if the console is using custom Python arguments. False otherwise. python_args_enabled: bool # Custom arguments to pass to the console. python_args: str # If True, then the console will clear all variables before execution. # False otherwise. clear_namespace: bool # If True, then the console will reuse the current namespace. If False, # then it will use an empty one. console_namespace: bool # If not None, then the console will use an alternative run method # (e.g. `runfile`, `debugfile` or `debugcell`). run_method: NotRequired[str]
IPythonConsolePyConfiguration
python
django__django
django/views/generic/edit.py
{ "start": 7959, "end": 8798 }
class ____(DeletionMixin, FormMixin, BaseDetailView): """ Base view for deleting an object. This requires subclassing to provide a response mixin. """ form_class = Form def post(self, request, *args, **kwargs): # Set self.object before the usual form processing flow. # Inlined because having DeletionMixin as the first base, for # get_success_url(), makes leveraging super() with ProcessFormView # overly complex. self.object = self.get_object() form = self.get_form() if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) def form_valid(self, form): success_url = self.get_success_url() self.object.delete() return HttpResponseRedirect(success_url)
BaseDeleteView
python
dagster-io__dagster
python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py
{ "start": 3096, "end": 3389 }
class ____(ConstraintWithMetadataException): def __init__(self, constraint_name, constraint_description, expectation, actual): super().__init__( constraint_name, constraint_description, expectation, "a malformed dataframe", actual )
DataFrameWithMetadataException
python
PyCQA__pylint
tests/pyreverse/functional/class_diagrams/relationships/comprehensions.py
{ "start": 584, "end": 1190 }
class ____: """Comprehensions using existing objects - aggregation.""" def __init__(self, existing_components: list[Component]): # Aggregation: comprehensions using existing objects (not creating) self.components: list[Component] = [comp for comp in existing_components] self.component_dict: dict[str, Component] = {f"key_{i}": comp for i, comp in enumerate(existing_components)} self.components_set: set[Component] = {comp for comp in existing_components} self.lazy_components: Generator[Component] = (comp for comp in existing_components)
AggregationContainer
python
django__django
tests/template_tests/templatetags/custom.py
{ "start": 10697, "end": 11034 }
class ____(template.Node): def __init__(self): self.count = 0 def render(self, context): count = self.count self.count = count + 1 return str(count) @register.tag("extra_data") def do_extra_data(parser, token): parser.extra_data["extra_data"] = "CUSTOM_DATA" return TextNode("")
CounterNode
python
tensorflow__tensorflow
tensorflow/python/training/input_test.py
{ "start": 10800, "end": 13528 }
class ____(test_lib.TestCase): def testNoShuffle(self): with ops.Graph().as_default(), self.cached_session(): num_epochs = 3 range_size = 5 queue = inp.range_input_producer( range_size, num_epochs=num_epochs, shuffle=False) dequeue_many = queue.dequeue_many(range_size * num_epochs) dequeue = queue.dequeue() self.evaluate(variables.global_variables_initializer()) variables.local_variables_initializer().run() threads = queue_runner_impl.start_queue_runners() # No randomness, so just see repeated copies of the input. output = self.evaluate(dequeue_many) self.assertAllEqual(list(range(range_size)) * num_epochs, output) # Reached the limit. with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(dequeue) for thread in threads: thread.join() def testShuffle(self): with ops.Graph().as_default(), self.cached_session(): num_epochs = 200 range_size = 2 queue = inp.range_input_producer( range_size, num_epochs=num_epochs, shuffle=True, seed=314159) dequeue_many = queue.dequeue_many(range_size) dequeue = queue.dequeue() self.evaluate(variables.global_variables_initializer()) variables.local_variables_initializer().run() threads = queue_runner_impl.start_queue_runners() # Validate that we only shuffle the integers within an epoch and # count how often each possible order appears. expected = [12, 21] frequency = {} for e in expected: frequency[e] = 0 for _ in range(num_epochs): output = self.evaluate(dequeue_many) key = 10 * (output[0] + 1) + (output[1] + 1) self.assertIn(key, expected) frequency[key] += 1 # Expect an approximately even distribution over all possible orders. expected_frequency = num_epochs / len(expected) margin = expected_frequency * 0.4 tf_logging.info("Observed counts: %s", frequency) for key in expected: value = frequency[key] self.assertGreater(value, expected_frequency - margin) self.assertLess(value, expected_frequency + margin) # Reached the limit. with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(dequeue) for thread in threads: thread.join() def testSharedName(self): with ops.Graph().as_default(), self.cached_session(): range_size = 5 queue = inp.range_input_producer( range_size, shared_name="SHARED_NAME_XYZ", name="Q") self.assertProtoEquals("s: 'SHARED_NAME_XYZ'", queue.queue_ref.op.node_def.attr["shared_name"])
RangeInputProducerTest
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/unit_tests/test_component_with_optional_scaffolder.py
{ "start": 399, "end": 636 }
class ____(dg.Component): ... def test_component_with_optional_scaffolder() -> None: assert isinstance( get_scaffolder(ComponentWithOptionalScaffolder), ScaffolderUnavailableReason, )
ComponentWithOptionalScaffolder
python
facebook__pyre-check
client/commands/infer.py
{ "start": 15090, "end": 15188 }
class ____(FunctionAnnotation): parent: str @dataclasses.dataclass(frozen=True)
MethodAnnotation
python
google__flatbuffers
python/flatbuffers/flexbuffers.py
{ "start": 11417, "end": 11867 }
class ____(Object): """Data accessor for the encoded key bytes.""" __slots__ = () def __init__(self, buf, byte_width): assert byte_width == 1 super().__init__(buf, byte_width) @property def Bytes(self): return self._buf[0 : len(self)] def __len__(self): return self._buf.Find(0) def __str__(self): return self.Bytes.decode('ascii') def __repr__(self): return 'Key(%s, size=%d)' % (self._buf, len(self))
Key
python
doocs__leetcode
lcof/面试题60. n个骰子的点数/Solution.py
{ "start": 0, "end": 459 }
class ____: def dicesProbability(self, n: int) -> List[float]: f = [[0] * (6 * n + 1) for _ in range(n + 1)] for j in range(1, 7): f[1][j] = 1 for i in range(2, n + 1): for j in range(i, 6 * i + 1): for k in range(1, 7): if j - k >= 0: f[i][j] += f[i - 1][j - k] m = pow(6, n) return [f[n][j] / m for j in range(n, 6 * n + 1)]
Solution
python
pyinstaller__pyinstaller
PyInstaller/utils/hooks/qt/__init__.py
{ "start": 6128, "end": 75633 }
class ____: def __init__(self, namespace): if namespace not in ['PyQt5', 'PyQt6', 'PySide2', 'PySide6']: raise Exception('Invalid namespace: {0}'.format(namespace)) self.namespace = namespace # Distinction between PyQt5/6 and PySide2/6 self.is_pyqt = namespace in {'PyQt5', 'PyQt6'} # Distinction between Qt5 and Qt6 self.qt_major = 6 if namespace in {'PyQt6', 'PySide6'} else 5 # Determine relative path where Qt libraries and data need to be collected in the frozen application. This # varies between PyQt5/PyQt6/PySide2/PySide6, their versions, and platforms. NOTE: it is tempting to consider # deriving this path as simply the value of QLibraryInfo.PrefixPath, taken relative to the package's root # directory. However, we also need to support non-wheel deployments (e.g., with Qt installed in custom path on # Windows, or with Qt and PyQt5 installed on linux using native package manager), and in those, the Qt # PrefixPath does not reflect the required relative target path for the frozen application. if namespace == 'PyQt5': if self._use_new_layout("PyQt5", "5.15.4", False): self.qt_rel_dir = os.path.join('PyQt5', 'Qt5') else: self.qt_rel_dir = os.path.join('PyQt5', 'Qt') elif namespace == 'PyQt6': if self._use_new_layout("PyQt6", "6.0.3", True): self.qt_rel_dir = os.path.join('PyQt6', 'Qt6') else: self.qt_rel_dir = os.path.join('PyQt6', 'Qt') elif namespace == 'PySide2': # PySide2 uses PySide2/Qt on linux and macOS, and PySide2 on Windows if compat.is_win: self.qt_rel_dir = 'PySide2' else: self.qt_rel_dir = os.path.join('PySide2', 'Qt') else: # PySide6 follows the same logic as PySide2 if compat.is_win: self.qt_rel_dir = 'PySide6' else: self.qt_rel_dir = os.path.join('PySide6', 'Qt') # Process module information list to construct python-module-name -> info and shared-lib-name -> info mappings. self._load_module_info() def __repr__(self): return f"QtLibraryInfo({self.namespace})" # Delay initialization of the Qt library information until the corresponding attributes are first requested. def __getattr__(self, name): if 'version' in self.__dict__: # Initialization was already done, but requested attribute is not available. raise AttributeError(name) # Load Qt library info... self._load_qt_info() # ... and return the requested attribute return getattr(self, name) # Check whether we must use the new layout (e.g. PyQt5/Qt5, PyQt6/Qt6) instead of the old layout (PyQt5/Qt, # PyQt6/Qt). @staticmethod def _use_new_layout(package_basename: str, version: str, fallback_value: bool) -> bool: # The PyQt wheels come in both non-commercial and commercial variants. So we need to check if a particular # variant is installed before testing its version. if hooks.check_requirement(package_basename): return hooks.check_requirement(f"{package_basename} >= {version}") if hooks.check_requirement(f"{package_basename}_commercial"): return hooks.check_requirement(f"{package_basename}_commercial >= {version}") return fallback_value # Load Qt information (called on first access to related fields) def _load_qt_info(self): """ Load and process Qt library information. Called on the first access to the related attributes of the class (e.g., `version` or `location`). """ # Ensure self.version exists, even if PyQt{5,6}/PySide{2,6} cannot be imported. Hooks and util functions use # `if .version` to check whether package was imported and other attributes are expected to be available. # This also serves as a marker that initialization was already done. self.version = None # Get library path information from Qt. See QLibraryInfo_. @isolated.decorate def _read_qt_library_info(package): import os import sys import importlib # Import the Qt-based package # equivalent to: from package.QtCore import QLibraryInfo, QCoreApplication try: QtCore = importlib.import_module('.QtCore', package) except ModuleNotFoundError: return None # Signal that package is unavailable QLibraryInfo = QtCore.QLibraryInfo QCoreApplication = QtCore.QCoreApplication # QLibraryInfo is not always valid until a QCoreApplication is instantiated. app = QCoreApplication(sys.argv) # noqa: F841 # Qt6 deprecated QLibraryInfo.location() in favor of QLibraryInfo.path(), and # QLibraryInfo.LibraryLocation enum was replaced by QLibraryInfo.LibraryPath. if hasattr(QLibraryInfo, 'path'): # Qt6; enumerate path enum values directly from the QLibraryInfo.LibraryPath enum. path_names = [x for x in dir(QLibraryInfo.LibraryPath) if x.endswith('Path')] location = {x: QLibraryInfo.path(getattr(QLibraryInfo.LibraryPath, x)) for x in path_names} else: # Qt5; in recent versions, location enum values can be enumeratd from QLibraryInfo.LibraryLocation. # However, in older versions of Qt5 and its python bindings, that is unavailable. Hence the # enumeration of "*Path"-named members of QLibraryInfo. path_names = [x for x in dir(QLibraryInfo) if x.endswith('Path')] location = {x: QLibraryInfo.location(getattr(QLibraryInfo, x)) for x in path_names} # Determine the python-based package location, by looking where the QtCore module is located. package_location = os.path.dirname(QtCore.__file__) # Determine Qt version. Works for Qt 5.8 and later, where QLibraryInfo.version() was introduced. try: version = QLibraryInfo.version().segments() except AttributeError: version = [] return { 'is_debug_build': QLibraryInfo.isDebugBuild(), 'version': version, 'location': location, 'package_location': package_location, } try: qt_info = _read_qt_library_info(self.namespace) except Exception as e: logger.warning("%s: failed to obtain Qt library info: %s", self, e) return # If package could not be imported, `_read_qt_library_info` returns None. In such cases, emit a debug message # instead of a warning, because this initialization might be triggered by a helper function that is trying to # determine availability of bindings by inspecting the `version` attribute of `QtLibraryInfo` for all bindings. if qt_info is None: logger.debug("%s: failed to obtain Qt library info: %s.QtCore could not be imported.", self, self.namespace) return for k, v in qt_info.items(): setattr(self, k, v) # Turn package_location into pathlib.Path(), and fully resolve it. self.package_location = pathlib.Path(self.package_location).resolve() # Determine if the Qt is bundled with python package itself; this usually means we are dealing with with PyPI # wheels. resolved_qt_prefix_path = pathlib.Path(self.location['PrefixPath']).resolve() self.qt_inside_package = ( self.package_location == resolved_qt_prefix_path or # PySide2 and PySide6 Windows PyPI wheels self.package_location in resolved_qt_prefix_path.parents ) # Determine directory that contains Qt shared libraries. On non-Windows, this is typically location given by # `LibrariesPath`. On Windows, it is usually `BinariesPath`, except for PySide PyPI wheels, where DLLs are # placed in top-level `PrefixPath`. if compat.is_win: if self.qt_inside_package and not self.is_pyqt: # Windows PyPI wheel qt_lib_dir = self.location['PrefixPath'] else: qt_lib_dir = self.location['BinariesPath'] else: qt_lib_dir = self.location['LibrariesPath'] self.qt_lib_dir = pathlib.Path(qt_lib_dir).resolve() # Module information list loading/processing def _load_module_info(self): """ Process the Qt modules info definition list and construct two dictionaries: - dictionary that maps Qt python module names to Qt module info entries - dictionary that maps shared library names to Qt module info entries """ self.python_modules = dict() self.shared_libraries = dict() for entry in _modules_info.QT_MODULES_INFO: # If entry specifies applicable bindings, check them if entry.bindings: applicable_bindings = _modules_info.process_namespace_strings(entry.bindings) if self.namespace not in applicable_bindings: continue # Create a QtModuleInfo entry info_entry = QtModuleInfo( module=entry.module, shared_lib=f"Qt{self.qt_major}{entry.shared_lib}" if entry.shared_lib else None, translations=entry.translations, plugins=entry.plugins ) # If we have python module (extension) name, create python-module-name -> info mapping. if info_entry.module is not None: self.python_modules[info_entry.module] = info_entry # If we have Qt module (shared library) name, create shared-lib-name -> info mapping. if info_entry.shared_lib is not None: self.shared_libraries[info_entry.shared_lib.lower()] = info_entry def _normalize_shared_library_name(self, filename): """ Normalize a shared library name into common form that we can use for look-ups and comparisons. Primarily intended for Qt shared library names. """ # Take base name, remove suffix, and lower case it. lib_name = os.path.splitext(os.path.basename(filename))[0].lower() # Linux libraries sometimes have a dotted version number -- ``libfoo.so.3``. It is now ''libfoo.so``, # but the ``.so`` must also be removed. if compat.is_linux and os.path.splitext(lib_name)[1] == '.so': lib_name = os.path.splitext(lib_name)[0] # Remove the "lib" prefix (Linux, macOS). if lib_name.startswith('lib'): lib_name = lib_name[3:] # macOS: handle different naming schemes. PyPI wheels ship framework-enabled Qt builds, where shared # libraries are part of .framework bundles (e.g., ``PyQt5/Qt5/lib/QtCore.framework/Versions/5/QtCore``). # In Anaconda (Py)Qt installations, the shared libraries are installed in environment's library directory, # and contain versioned extensions, e.g., ``libQt5Core.5.dylib``. if compat.is_darwin: if lib_name.startswith('qt') and not lib_name.startswith('qt' + str(self.qt_major)): # Qt library from a framework bundle (e.g., ``QtCore``); change prefix from ``qt`` to ``qt5`` or # ``qt6`` to match names in Windows/Linux. lib_name = 'qt' + str(self.qt_major) + lib_name[2:] if lib_name.endswith('.' + str(self.qt_major)): # Qt library from Anaconda, which originally had versioned extension, e.g., ``libfoo.5.dynlib``. # The above processing turned it into ``foo.5``, so we need to remove the last two characters. lib_name = lib_name[:-2] # Handle cases with QT_LIBINFIX set to '_conda', i.e. conda-forge builds. if lib_name.endswith('_conda'): lib_name = lib_name[:-6] return lib_name # Collection def collect_module(self, module_name): """ Collect all dependencies (hiddenimports, binaries, datas) for the given Qt python module. This function performs recursive analysis of extension module's link-time dependencies, and uses dictionaries built by `_load_module_info` to discover associated plugin types, translation file base names, and hidden imports that need to be collected. """ # Accumulate all dependencies in a set to avoid duplicates. hiddenimports = set() translation_base_names = set() plugin_types = set() # Exit if the requested library cannot be imported. # NOTE: self..version can be empty list on older Qt5 versions (#5381). if self.version is None: return [], [], [] logger.debug('%s: processing module %s...', self, module_name) # Look up the associated Qt module information by python module name. # This allows us to collect associated module data directly, even if there is no associated shared library # (e.g., header-only Qt module, or statically-built one). short_module_name = module_name.split('.', 1)[-1] # PySide2.QtModule -> QtModule if short_module_name in self.python_modules: qt_module_info = self.python_modules[short_module_name] # NOTE: no need to add a hiddenimport here, because this is the module under consideration. # Add plugins plugin_types.update(qt_module_info.plugins) # Add translation base name(s) translation_base_names.update(qt_module_info.translations) # Find the actual module extension file. module_file = hooks.get_module_file_attribute(module_name) # Additional search path for shared library resolution. This is mostly required for library resolution on # Windows (Linux and macOS binaries use run paths to find Qt libs). qtlib_search_paths = [ # For PyQt5 and PyQt6 wheels, shared libraries should be in BinariesPath, while for PySide2 and PySide6, # they should be in PrefixPath. self.location['BinariesPath' if self.is_pyqt else 'PrefixPath'], ] # Walk through all the link-time dependencies of a dynamically-linked library (``.so``/``.dll``/``.dylib``). imported_libraries = bindepend.get_imports(module_file, qtlib_search_paths) while imported_libraries: imported_lib_name, imported_lib_path = imported_libraries.pop() # (name, fullpath) tuple # Skip unresolved libraries if imported_lib_path is None: logger.debug("%s: ignoring unresolved library import %r", self, imported_lib_name) continue # Normalize the shared library name lib_name = self._normalize_shared_library_name(imported_lib_path) logger.debug( '%s: imported library %r, full path %r -> parsed name %r.', self, imported_lib_name, imported_lib_path, lib_name ) # PySide2 and PySide6 on linux seem to link all extension modules against libQt5Core, libQt5Network, and # libQt5Qml (or their libQt6* equivalents). While the first two are reasonable, the libQt5Qml dependency # pulls in whole QtQml module, along with its data and plugins, which in turn pull in several other Qt # libraries, greatly inflating the bundle size (see #6447). # # Similarly, some extension modules (QtWebChannel, QtWebEngine*) seem to be also linked against libQt5Qml, # even when the module can be used without having the whole QtQml module collected. # # Therefore, we explicitly prevent inclusion of QtQml based on the dynamic library dependency, except for # QtQml* and QtQuick* modules, whose use directly implies the use of QtQml. if lib_name in ("qt5qml", "qt6qml"): if not short_module_name.startswith(('QtQml', 'QtQuick')): logger.debug('%s: ignoring imported library %r.', self, lib_name) continue # Use the parsed library name to look up associated Qt module information. if lib_name in self.shared_libraries: logger.debug('%s: collecting Qt module associated with %r.', self, lib_name) # Look up associated module info qt_module_info = self.shared_libraries[lib_name] # If there is a python extension module associated with Qt module, add it to hiddenimports. Since this # means that we (most likely) have a hook available for that module, we can avoid analyzing the shared # library itself (i.e., stop the recursive analysis), because this will be done by the corresponding # hook. if qt_module_info.module: if qt_module_info.module == short_module_name: # The one exception is if we are analyzing shared library associated with the input module; in # that case, avoid adding a hidden import and analyze the library's link-time dependencies. We # do not need to worry about plugins and translations for this particular module, because those # have been handled at the beginning of this function. imported_libraries.update(bindepend.get_imports(imported_lib_path, qtlib_search_paths)) else: hiddenimports.add(self.namespace + "." + qt_module_info.module) continue # Add plugins plugin_types.update(qt_module_info.plugins) # Add translation base name(s) translation_base_names.update(qt_module_info.translations) # Analyze the linked shared libraries for its dependencies (recursive analysis). imported_libraries.update(bindepend.get_imports(imported_lib_path, qtlib_search_paths)) # Collect plugin files. binaries = [] for plugin_type in plugin_types: binaries += self.collect_plugins(plugin_type) # Collect translation files. datas = [] translation_src = self.location['TranslationsPath'] translation_dst = os.path.join(self.qt_rel_dir, 'translations') for translation_base_name in translation_base_names: # Not all PyQt5 installations include translations. See # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359479893 # and # https://github.com/pyinstaller/pyinstaller/issues/2857#issuecomment-368744341. translation_pattern = os.path.join(translation_src, translation_base_name + '_*.qm') translation_files = glob.glob(translation_pattern) if translation_files: datas += [(translation_file, translation_dst) for translation_file in translation_files] else: logger.warning( '%s: could not find translations with base name %r! These translations will not be collected.', self, translation_base_name ) # Convert hiddenimports to a list. hiddenimports = list(hiddenimports) logger.debug( '%s: dependencies for %s:\n' ' hiddenimports = %r\n' ' binaries = %r\n' ' datas = %r', self, module_name, hiddenimports, binaries, datas ) return hiddenimports, binaries, datas @staticmethod def _filter_release_plugins(plugin_files): """ Filter the provided list of Qt plugin files and remove the debug variants, under the assumption that both the release version of a plugin (qtplugin.dll) and its debug variant (qtplugind.dll) appear in the list. """ # All basenames for lookup plugin_basenames = {os.path.normcase(os.path.basename(f)) for f in plugin_files} # Process all given filenames release_plugin_files = [] for plugin_filename in plugin_files: plugin_basename = os.path.normcase(os.path.basename(plugin_filename)) if plugin_basename.endswith('d.dll'): # If we can find a variant without trailing 'd' in the plugin list, then the DLL we are dealing with is # a debug variant and needs to be excluded. release_name = os.path.splitext(plugin_basename)[0][:-1] + '.dll' if release_name in plugin_basenames: continue release_plugin_files.append(plugin_filename) return release_plugin_files def collect_plugins(self, plugin_type): """ Collect all plugins of the specified type from the Qt plugin directory. Returns list of (src, dst) tuples. """ # Ensure plugin directory exists plugin_src_dir = self.location['PluginsPath'] if not os.path.isdir(plugin_src_dir): raise Exception(f"Qt plugin directory '{plugin_src_dir}' does not exist!") # Collect all shared lib files in plugin type (sub)directory plugin_files = misc.dlls_in_dir(os.path.join(plugin_src_dir, plugin_type)) # Windows: # # dlls_in_dir() grabs all files ending with ``*.dll``, ``*.so`` and ``*.dylib`` in a certain directory. On # Windows this would grab debug copies of Qt plugins, which then causes PyInstaller to add a dependency on the # Debug CRT *in addition* to the release CRT. if compat.is_win: plugin_files = self._filter_release_plugins(plugin_files) logger.debug("%s: found plugin files for plugin type %r: %r", self, plugin_type, plugin_files) plugin_dst_dir = os.path.join(self.qt_rel_dir, 'plugins', plugin_type) # Exclude plugins with invalid Qt dependencies. binaries = [] for plugin_file in plugin_files: valid, reason = self._validate_plugin_dependencies(plugin_file) if valid: binaries.append((plugin_file, plugin_dst_dir)) else: logger.debug("%s: excluding plugin %r (%r)! Reason: %s", self, plugin_file, plugin_type, reason) return binaries def _validate_plugin_dependencies(self, plugin_file): """ Validate Qt dependencies of the given Qt plugin file. For the plugin to pass validation, all its Qt dependencies must be available (resolvable), and must be resolvable from the default Qt shared library directory (to avoid pulling in libraries from unrelated Qt installations that happen to be in search path). """ imported_libraries = bindepend.get_imports(plugin_file, search_paths=[self.qt_lib_dir]) for imported_lib_name, imported_lib_path in imported_libraries: # Parse/normalize the (unresolved) library name, to determine if dependency is a Qt shared library. If not, # skip the validation. lib_name = self._normalize_shared_library_name(imported_lib_name) if not lib_name.startswith(f"qt{self.qt_major}"): continue if imported_lib_path is None: return False, f"Missing Qt dependency {imported_lib_name!r}." imported_lib_path = pathlib.Path(imported_lib_path).resolve() if self.qt_lib_dir not in imported_lib_path.parents: return ( False, f"Qt dependency {imported_lib_name!r} ({str(imported_lib_path)!r}) has been resolved outside of " f"the Qt shared library directory ({str(self.qt_lib_dir)!r})." ) return True, None def _collect_all_or_none(self, mandatory_dll_patterns, optional_dll_patterns=None): """ Try to find Qt DLLs from the specified mandatory pattern list. If all mandatory patterns resolve to DLLs, collect them all, as well as any DLLs from the optional pattern list. If a mandatory pattern fails to resolve to a DLL, return an empty list. This allows all-or-none collection of particular groups of Qt DLLs that may or may not be available. """ optional_dll_patterns = optional_dll_patterns or [] # Package parent path; used to preserve the directory structure when DLLs are collected from the python # package (e.g., PyPI wheels). package_parent_path = self.package_location.parent # On Windows, DLLs are typically placed in `location['BinariesPath']`, except for PySide PyPI wheels, where # `location['PrefixPath']` is used. This difference is already handled by `qt_lib_dir`, which is also fully # resolved. dll_path = self.qt_lib_dir # Helper for processing single DLL pattern def _process_dll_pattern(dll_pattern): discovered_dlls = [] dll_files = dll_path.glob(dll_pattern) for dll_file in dll_files: if package_parent_path in dll_file.parents: # The DLL is located within python package; preserve the layout dst_dll_dir = dll_file.parent.relative_to(package_parent_path) else: # The DLL is not located within python package; collect into top-level directory dst_dll_dir = '.' discovered_dlls.append((str(dll_file), str(dst_dll_dir))) return discovered_dlls # Process mandatory patterns collected_dlls = [] for pattern in mandatory_dll_patterns: discovered_dlls = _process_dll_pattern(pattern) if not discovered_dlls: return [] # Mandatory pattern resulted in no DLLs; abort collected_dlls += discovered_dlls # Process optional patterns for pattern in optional_dll_patterns: collected_dlls += _process_dll_pattern(pattern) return collected_dlls # Collect required Qt binaries, but only if all binaries in a group exist. def collect_extra_binaries(self): """ Collect extra binaries/DLLs required by Qt. These include ANGLE DLLs, OpenGL software renderer DLL, and ICU DLLs. Applicable only on Windows (on other OSes, empty list is returned). """ binaries = [] # Applicable only to Windows. if not compat.is_win: return [] # OpenGL: EGL/GLES via ANGLE, software OpenGL renderer. binaries += self._collect_all_or_none(['libEGL.dll', 'libGLESv2.dll'], ['d3dcompiler_??.dll']) binaries += self._collect_all_or_none(['opengl32sw.dll']) # Include ICU files, if they exist. # See the "Deployment approach" section at the top of this file. binaries += self._collect_all_or_none(['icudt??.dll', 'icuin??.dll', 'icuuc??.dll']) return binaries # Collect additional shared libraries required for SSL support in QtNetwork, if they are available. # Primarily applicable to Windows (see issue #3520, #4048). def collect_qtnetwork_files(self): """ Collect extra binaries/shared libraries required by the QtNetwork module, such as OpenSSL shared libraries. """ # No-op if requested Qt-based package is not available. if self.version is None: return [] # Check if QtNetwork supports SSL and has OpenSSL backend available (Qt >= 6.1). # Also query the run-time OpenSSL version, so we know what dynamic libraries we need to search for. @isolated.decorate def _check_if_openssl_enabled(package): import sys import importlib # Import the Qt-based package # equivalent to: from package.QtCore import QCoreApplication QtCore = importlib.import_module('.QtCore', package) QCoreApplication = QtCore.QCoreApplication QLibraryInfo = QtCore.QLibraryInfo # equivalent to: from package.QtNetwork import QSslSocket QtNetwork = importlib.import_module('.QtNetwork', package) QSslSocket = QtNetwork.QSslSocket # Instantiate QCoreApplication to suppress warnings app = QCoreApplication(sys.argv) # noqa: F841 if not QSslSocket.supportsSsl(): return False, None # Query the run-time OpenSSL version openssl_version = QSslSocket.sslLibraryVersionNumber() # For Qt >= 6.1, check if `openssl` TLS backend is available try: qt_version = QLibraryInfo.version().segments() except AttributeError: qt_version = [] # Qt <= 5.8 if qt_version < [6, 1]: return True, openssl_version # TLS backends not implemented yet return ('openssl' in QSslSocket.availableBackends(), openssl_version) openssl_enabled, openssl_version = _check_if_openssl_enabled(self.namespace) if not openssl_enabled or openssl_version == 0: logger.debug("%s: QtNetwork: does not support SSL or does not use OpenSSL.", self) return [] # The actual search is handled in OS-specific ways. if compat.is_win: return self._collect_qtnetwork_openssl_windows(openssl_version) elif compat.is_darwin: return self._collect_qtnetwork_openssl_macos(openssl_version) elif compat.is_linux: return self._collect_qtnetwork_openssl_linux(openssl_version) else: logger.warning("%s: QtNetwork: collection of OpenSSL not implemented for this platform!") return [] def _collect_qtnetwork_openssl_windows(self, openssl_version): """ Windows-specific collection of OpenSSL DLLs required by QtNetwork module. """ # Package parent path; used to preserve the directory structure when DLLs are collected from the python # package (e.g., PyPI wheels). package_parent_path = self.package_location.parent # The OpenSSL DLLs might be shipped with PyPI wheel (PyQt5), might be available in the environment (msys2, # anaconda), or might be expected to be available in the environment (PySide2, PySide6, PyQt6 PyPI wheels). # # The OpenSSL DLL naming scheme depends on the version: # - OpenSSL 1.0.x: libeay32.dll, ssleay32.dll # - OpenSSL 1.1.x 32-bit: libssl-1_1.dll, libcrypto-1_1.dll # - OpenSSL 1.1.x 64-bit: libssl-1_1-x64.dll, libcrypto-1_1-x64.dll # - OpenSSL 3.0.x 32-bit: libssl-1.dll, libcrypto-3.dll # - OpenSSL 3.0.x 64-bit: libssl-3-x64.dll, libcrypto-3-x64.dll # # The official Qt builds (which are used by PySide and PyQt PyPI wheels) seem to be build against: # - OpenSSL 1.1.x starting with Qt5 5.14.2: # https://www.qt.io/blog/2019/06/17/qt-5-12-4-released-support-openssl-1-1-1 # - OpenSSL 3.x starting with Qt6 6.5.0: # https://www.qt.io/blog/moving-to-openssl-3-in-binary-builds-starting-from-qt-6.5-beta-2 # # However, a package can build Qt against OpenSSL version of their own choice. For example, at the time of # writing, both mingw-w64-x86_64-qt5-base 5.15.11+kde+r138-1 and mingw-w64-x86_64-qt6-base 6.6.0-2 packages # depend on mingw-w64-x86_64-openssl 3.1.4-1 (so OpenSSL 3). # # Luckily, we can query the run-time version of OpenSSL by calling `QSslSocket.sslLibraryVersionNumber()`, # and narrow down the search for specific version. if openssl_version >= 0x10000000 and openssl_version < 0x10100000: # OpenSSL 1.0.x - used by old Qt5 builds dll_names = ( 'libeay32.dll', 'ssleay32.dll', ) logger.debug("%s: QtNetwork: looking for OpenSSL 1.0.x DLLs: %r", self, dll_names) elif openssl_version >= 0x10100000 and openssl_version < 0x30000000: # OpenSSL 1.1.x dll_names = ( 'libssl-1_1-x64.dll' if compat.is_64bits else 'libssl-1_1.dll', 'libcrypto-1_1-x64.dll' if compat.is_64bits else 'libcrypto-1_1.dll', ) logger.debug("%s: QtNetwork: looking for OpenSSL 1.1.x DLLs: %r", self, dll_names) elif openssl_version >= 0x30000000 and openssl_version < 0x40000000: # OpenSSL 3.0.x dll_names = ( 'libssl-3-x64.dll' if compat.is_64bits else 'libssl-3.dll', 'libcrypto-3-x64.dll' if compat.is_64bits else 'libcrypto-3.dll', ) logger.debug("%s: QtNetwork: looking for OpenSSL 3.0.x DLLs: %r", self, dll_names) else: dll_names = [] # Nothing to search for logger.warning("%s: QtNetwork: unsupported OpenSSL version: %X", self, openssl_version) binaries = [] found_in_package = False for dll in dll_names: # Attempt to resolve the DLL path dll_file_path = bindepend.resolve_library_path(dll, search_paths=[self.qt_lib_dir]) if dll_file_path is None: continue dll_file_path = pathlib.Path(dll_file_path).resolve() if package_parent_path in dll_file_path.parents: # The DLL is located within python package; preserve the layout dst_dll_path = dll_file_path.parent.relative_to(package_parent_path) found_in_package = True else: # The DLL is not located within python package; collect into top-level directory dst_dll_path = '.' binaries.append((str(dll_file_path), str(dst_dll_path))) # If we found at least one OpenSSL DLL in the bindings' python package directory, discard all external # OpenSSL DLLs. if found_in_package: binaries = [(dll_src_path, dll_dest_path) for dll_src_path, dll_dest_path in binaries if package_parent_path in pathlib.Path(dll_src_path).parents] return binaries def _collect_qtnetwork_openssl_macos(self, openssl_version): """ macOS-specific collection of OpenSSL dylibs required by QtNetwork module. """ # The official Qt5 builds on macOS (shipped by PyPI wheels) appear to be built with Apple's SecureTransport API # instead of OpenSSL; for example, `QSslSocket.sslLibraryVersionNumber` returns 0, while # `sslLibraryVersionString()` returns "Secure Transport, macOS 12.6". So with PySide2 and PyQt5, we do not need # to worry about collection of OpenSSL shared libraries. # # Support for OpenSSL was introduced in Qt 6.1 with `openssl` TLS backend; the official Qt6 builds prior to 6.5 # seem to be built with OpenSSL 1.1.x, and later versions with 3.0.x. However, PySide6 and PyQt6 PyPI wheels do # not ship OpenSSL dynamic libraries at all , so whether `openssl` TLS backend is used or not depends on the # presence of externally provided OpenSSL dynamic libraries (for example, provided by Homebrew). It is worth # noting that python.org python installers *do* provide OpenSSL shared libraries (1.1.x for python <= 3.10, # 3.0.x for python >= 3.12, and both for python 3.11) for its `_ssl` extension - however, these are NOT visible # to Qt and its QtNetwork module. # # When the frozen application is built and we collect python's `_ssl` extension, we also collect the OpenSSL # shared libraries shipped by python. So at least in theory, those should be available to QtNetwork module as # well (assuming they are of compatible version). However, this is not exactly the case - QtNetwork looks for # the libraries in locations given by `DYLD_LIBRARY_PATH` environment variable and in .app/Contents/Frameworks # (if the program is an .app bundle): # # https://github.com/qt/qtbase/blob/6.6.0/src/plugins/tls/openssl/qsslsocket_openssl_symbols.cpp#L590-L599 # # So it works out-of-the box for our .app bundles, because starting with PyInstaller 6.0, `sys._MEIPASS` is in # .app/Contents/Frameworks. But it does not with POSIX builds, because bootloader does not modify the # `DYLD_LIBRARY_PATH` environment variable to include `sys._MEIPASS` (since we usually do not need that; # regular linked library resolution in our macOS builds is done via path rewriting and rpaths). So either we # need a run-time hook to add `sys._MEIPASS` to `DYLD_LIBRARY_PATH`, or modify the bootloader to always do that. # # Collecting the OpenSSL library and making it discoverable by adding `sys._MEIPASS` to `DYLD_LIBRARY_PATH` # should also prevent QtNetwork from "accidentally" pulling in Homebrew version at run-time (if Homebrew is # installed on the target system and provides compatible OpenSSL version). # # Therefore, try to resolve OpenSSL library via the version indicated by `QSslSocket.sslLibraryVersionNumber`; # however, we first explicitly search only {sys.base_prefix}/lib (which is where python.org builds put their # dynamic libs), and only if that fails, perform regular dylib path resolution. This way we ensure that if the # OpenSSL dylibs are provided by python itself, we always prefer those over the Homebrew version (since we are # very likely going to collect them for python's `_ssl` extension anyway). # As per above text, we need to worry only about Qt6, and thus OpenSSL 1.1.x or 3.0.x if openssl_version >= 0x10100000 and openssl_version < 0x30000000: # OpenSSL 1.1.x dylib_names = ( 'libcrypto.1.1.dylib', 'libssl.1.1.dylib', ) logger.debug("%s: QtNetwork: looking for OpenSSL 1.1.x dylibs: %r", self, dylib_names) elif openssl_version >= 0x30000000 and openssl_version < 0x40000000: # OpenSSL 3.0.x dylib_names = ( 'libcrypto.3.dylib', 'libssl.3.dylib', ) logger.debug("%s: QtNetwork: looking for OpenSSL 3.0.x dylibs: %r", self, dylib_names) else: dylib_names = [] # Nothing to search for logger.warning("%s: QtNetwork: unsupported OpenSSL version: %X", self, openssl_version) # Compared to Windows, we do not have to worry about dylib's path preservation, as these are never part of # the package, and are therefore always collected to the top-level application directory. binaries = [] base_prefix_lib_dir = os.path.join(compat.base_prefix, 'lib') for dylib in dylib_names: # First, attempt to resolve using only {sys.base_prefix}/lib - `bindepend.resolve_library_path` uses # standard dyld search semantics and uses the given search paths as fallback (and would therefore # favor Homebrew-provided version of the library). dylib_path = bindepend._resolve_library_path_in_search_paths(dylib, search_paths=[base_prefix_lib_dir]) if dylib_path is None: dylib_path = bindepend.resolve_library_path(dylib, search_paths=[base_prefix_lib_dir, self.qt_lib_dir]) if dylib_path is None: continue binaries.append((str(dylib_path), '.')) return binaries def _collect_qtnetwork_openssl_linux(self, openssl_version): """ Linux-specific collection of OpenSSL dylibs required by QtNetwork module. """ # Out of the supported OSes, Linux is by far the most straight-forward, because OpenSSL shared libraries are # expected to be provided by the system. So we can just use standard library path resolution with library names # inferred from the run-time OpenSSL version. At run-time, QtNetwork searches paths from `LD_LIBRARY_PATH`, and # on Linux, our bootloader already adds `sys._MEIPASS` to that environment variable. if openssl_version >= 0x10000000 and openssl_version < 0x10100000: # OpenSSL 1.0.x - used by old Qt5 builds shlib_names = ( 'libcrypto.so.10', 'libssl.so.10', ) logger.debug("%s: QtNetwork: looking for OpenSSL 1.0.x shared libraries: %r", self, shlib_names) elif openssl_version >= 0x10100000 and openssl_version < 0x30000000: # OpenSSL 1.1.x shlib_names = ( 'libcrypto.so.1.1', 'libssl.so.1.1', ) logger.debug("%s: QtNetwork: looking for OpenSSL 1.1.x shared libraries: %r", self, shlib_names) elif openssl_version >= 0x30000000 and openssl_version < 0x40000000: # OpenSSL 3.0.x shlib_names = ( 'libcrypto.so.3', 'libssl.so.3', ) logger.debug("%s: QtNetwork: looking for OpenSSL 3.0.x shared libraries: %r", self, shlib_names) else: shlib_names = [] # Nothing to search for logger.warning("%s: QtNetwork: unsupported OpenSSL version: %X", self, openssl_version) binaries = [] for shlib in shlib_names: shlib_path = bindepend.resolve_library_path(shlib) if shlib_path is None: continue binaries.append((str(shlib_path), '.')) return binaries def collect_qtqml_files(self): """ Collect additional binaries and data for QtQml module. """ # No-op if requested Qt-based package is not available. if self.version is None: return [], [] # Not all PyQt5/PySide2 installs have QML files. In this case, location['Qml2ImportsPath'] is empty. # Furthermore, even if location path is provided, the directory itself may not exist. # # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359735031 # https://github.com/pyinstaller/pyinstaller/issues/3864 # # In Qt 6, Qml2ImportsPath was deprecated in favor of QmlImportsPath. The former is not available in PySide6 # 6.4.0 anymore (but is in PyQt6 6.4.0). Use the new QmlImportsPath if available. if 'QmlImportsPath' in self.location: qml_src_dir = self.location['QmlImportsPath'] else: qml_src_dir = self.location['Qml2ImportsPath'] if not qml_src_dir or not os.path.isdir(qml_src_dir): logger.warning('%s: QML directory %r does not exist. QML files not packaged.', self, qml_src_dir) return [], [] qml_src_path = pathlib.Path(qml_src_dir).resolve() qml_dest_path = pathlib.PurePath(self.qt_rel_dir) / 'qml' binaries = [] datas = [] # Helper that computes the destination directory for the given file or directory from a QML plugin directory. def _compute_dest_dir(src_filename): if src_filename.is_dir(): rel_path = src_filename.relative_to(qml_src_path) else: rel_path = src_filename.relative_to(qml_src_path).parent return qml_dest_path / rel_path # Discover all QML plugin sub-directories by searching for `qmldir` files. qmldir_files = qml_src_path.rglob('**/qmldir') for qmldir_file in sorted(qmldir_files): plugin_dir = qmldir_file.parent logger.debug("%s: processing QML plugin directory %s", self, plugin_dir) try: # Obtain lists of source files (separated into binaries and data files). plugin_binaries, plugin_datas = self._process_qml_plugin(qmldir_file) # Convert into (src, dest) tuples. binaries += [(str(src_file), str(_compute_dest_dir(src_file))) for src_file in plugin_binaries] datas += [(str(src_file), str(_compute_dest_dir(src_file))) for src_file in plugin_datas] except Exception: logger.warning("%s: failed to process QML plugin directory %s", self, plugin_dir, exc_info=True) return binaries, datas # https://doc.qt.io/qt-6/qtqml-modules-qmldir.html#plugin-declaration # [optional] plugin <name> [<path]> _qml_plugin_def = re.compile(r"^(?:(?:optional)\s+)?(?:plugin)\s+(?P<name>\w+)(?:\s+(?P<path>\.+))?$") def _process_qml_plugin(self, qmldir_file): """ Processes the QML directory corresponding to the given `qmldir` file. Returns lists of binaries and data files, but only the source file names. It is up to caller to turn these into lists of (src, dest) tuples. """ plugin_dir = qmldir_file.parent plugin_binaries = set() # Read the `qmldir` file to determine the names of plugin binaries, if any. contents = qmldir_file.read_text(encoding="utf-8") for line in contents.splitlines(): m = self._qml_plugin_def.match(line) if m is None: continue plugin_name = m.group("name") plugin_path = m.group("path") # We currently do not support custom plugin path - neither relative nor absolute (the latter will never # be supported, because to make it relocatable, we would need to modify the `qmpldir file`). if plugin_path is not None: raise Exception(f"Non-empty plugin path ({plugin_path!r} is not supported yet!") # Turn the plugin base name into actual shared lib name. if compat.is_linux: plugin_file = plugin_dir / f"lib{plugin_name}.so" elif compat.is_win: plugin_file = plugin_dir / f"{plugin_name}.dll" elif compat.is_darwin: plugin_file = plugin_dir / f"lib{plugin_name}.dylib" else: continue # This implicitly disables subsequent validation on unhandled platforms. # Warn if plugin file does not exist if not plugin_file.is_file(): logger.warn("%s: QML plugin binary %r does not exist!", str(plugin_file)) continue plugin_binaries.add(plugin_file) # Exclude plugins with invalid Qt dependencies. invalid_binaries = False for plugin_binary in plugin_binaries: valid, reason = self._validate_plugin_dependencies(plugin_binary) if not valid: logger.debug("%s: excluding QML plugin binary %r! Reason: %s", self, str(plugin_binary), reason) invalid_binaries = True # If there was an invalid binary, discard the plugin. if invalid_binaries: logger.debug("%s: excluding QML plugin directory %r due to invalid plugin binaries!", self, str(plugin_dir)) return [], [] # Generate binaries list. binaries = sorted(plugin_binaries) # Generate list of data files - all content of this directory, except for the plugin binaries. Sub-directories # are included if they do not contain a `qmldir` file (we do not recurse into the directory, but instead pass # only its name, leaving the recursion to PyInstaller's built-in expansion of paths returned by hooks). datas = [] for entry in plugin_dir.iterdir(): if entry.is_file(): if entry in plugin_binaries: continue else: if (entry / "qmldir").is_file(): continue datas.append(entry) return binaries, datas def collect_qtwebengine_files(self): """ Collect QtWebEngine helper process executable, translations, and resources. """ binaries = [] datas = [] # Output directory (varies between PyQt and PySide and among OSes; the difference is abstracted by # QtLibraryInfo.qt_rel_dir) rel_data_path = self.qt_rel_dir is_macos_framework = False if compat.is_darwin: # Determine if we are dealing with a framework-based Qt build (e.g., PyPI wheels) or a dylib-based one # (e.g., Anaconda). The former requires special handling, while the latter is handled in the same way as # Windows and Linux builds. is_macos_framework = os.path.exists( os.path.join(self.location['LibrariesPath'], 'QtWebEngineCore.framework') ) if is_macos_framework: # macOS .framework bundle src_framework_path = os.path.join(self.location['LibrariesPath'], 'QtWebEngineCore.framework') # If Qt libraries are bundled with the package, collect the .framework bundle into corresponding package's # subdirectory, because binary dependency analysis will also try to preserve the directory structure. # However, if we are collecting from system-wide Qt installation (e.g., Homebrew-installed Qt), the binary # depndency analysis will attempt to re-create .framework bundle in top-level directory, so we need to # collect the extra files there. bundled_qt_libs = pathlib.Path(self.package_location) in pathlib.Path(src_framework_path).parents if bundled_qt_libs: dst_framework_path = os.path.join(rel_data_path, 'lib/QtWebEngineCore.framework') else: dst_framework_path = 'QtWebEngineCore.framework' # In top-level directory # Determine the version directory - for now, we assume we are dealing with single-version framework; # i.e., the Versions directory contains only a single <version> directory, and Current symlink to it. versions = sorted([ version for version in os.listdir(os.path.join(src_framework_path, 'Versions')) if version != 'Current' ]) if len(versions) == 0: raise RuntimeError("Could not determine version of the QtWebEngineCore.framework!") elif len(versions) > 1: logger.warning( "Found multiple versions in QtWebEngineCore.framework (%r) - using the last one!", versions ) version = versions[-1] # Collect the Helpers directory. In well-formed .framework bundles (such as the ones provided by Homebrew), # the Helpers directory is located in the versioned directory, and symlinked to the top-level directory. src_helpers_path = os.path.join(src_framework_path, 'Versions', version, 'Helpers') dst_helpers_path = os.path.join(dst_framework_path, 'Versions', version, 'Helpers') if not os.path.exists(src_helpers_path): # Alas, the .framework bundles shipped with contemporary PyPI PyQt/PySide wheels are not well-formed # (presumably because .whl cannot preserve symlinks?). The Helpers in the top-level directory is in fact # the hard copy, and there is either no Helpers in versioned directory, or there is a duplicate. # So fall back to collecting from the top-level, but collect into versioned directory in order to # be compliant with codesign's expectations. src_helpers_path = os.path.join(src_framework_path, 'Helpers') helper_datas = hooks.collect_system_data_files(src_helpers_path, dst_helpers_path) # Filter out the actual helper executable from datas, and add it to binaries instead. This ensures that it # undergoes additional binary processing that rewrites the paths to linked libraries. HELPER_EXE = 'QtWebEngineProcess.app/Contents/MacOS/QtWebEngineProcess' for src_name, dest_name in helper_datas: if src_name.endswith(HELPER_EXE): binaries.append((src_name, dest_name)) else: datas.append((src_name, dest_name)) # Collect the Resources directory; same logic is used as with Helpers directory. src_resources_path = os.path.join(src_framework_path, 'Versions', version, 'Resources') dst_resources_path = os.path.join(dst_framework_path, 'Versions', version, 'Resources') if not os.path.exists(src_resources_path): src_resources_path = os.path.join(src_framework_path, 'Resources') datas += hooks.collect_system_data_files(src_resources_path, dst_resources_path) # NOTE: the QtWebEngineProcess helper is actually sought within the `QtWebEngineCore.framework/Helpers`, # which ought to be a symlink to `QtWebEngineCore.framework/Versions/Current/Helpers`, where `Current` # is also a symlink to the actual version directory, `A`. # # These symlinks are created automatically when the TOC list of collected resources is post-processed # using `PyInstaller.utils.osx.collect_files_from_framework_bundles` helper, so we do not have to # worry about them here... else: # Windows and linux (or Anaconda on macOS) locales = 'qtwebengine_locales' resources = 'resources' # Translations datas.append(( os.path.join(self.location['TranslationsPath'], locales), os.path.join(rel_data_path, 'translations', locales), )) # Resources; ``DataPath`` is the base directory for ``resources``, as per the # `docs <https://doc.qt.io/qt-5.10/qtwebengine-deploying.html#deploying-resources>`_. datas.append((os.path.join(self.location['DataPath'], resources), os.path.join(rel_data_path, resources)),) # Helper process executable (QtWebEngineProcess), located in ``LibraryExecutablesPath``. # The target directory is determined as `LibraryExecutablesPath` relative to `PrefixPath`. On Windows, # this should handle the differences between PySide2/PySide6 and PyQt5/PyQt6 PyPI wheel layout. rel_helper_path = os.path.relpath(self.location['LibraryExecutablesPath'], self.location['PrefixPath']) # However, on Linux, we need to account for distribution-packaged Qt, where `LibraryExecutablesPath` might # be nested deeper under `PrefixPath` than anticipated (w.r.t. PyPI wheel layout). For example, in Fedora, # the helper is located under `/usr/lib64/qt5/libexec/QtWebEngineProcess`, with `PrefixPath` being `/usr` # and `LibraryExecutablesPath` being `/usr/lib64/qt5/libexec/`, so the relative path ends up being # `lib64/qt5/libexec` instead of just `libexec`. So on linux, we explicitly force the PyPI-compliant # layout, by overriding relative helper path to just `libexec`. if compat.is_linux and rel_helper_path != "libexec": logger.info( "%s: overriding relative destination path of QtWebEngineProcess helper from %r to %r!", self, rel_helper_path, "libexec" ) rel_helper_path = "libexec" # Similarly, force the relative helper path for PySide2/PySide6 on Windows to `.`. This is already the case # with PyPI PySide Windows wheels. But it is not the case with conda-installed PySide2, where the Qt's # `PrefixPath` is for example `C:/Users/<user>/miniconda3/envs/<env-name>/Library`, while the corresponding # `LibraryExecutablesPath` is `C:/Users/<user>/miniconda3/envs/<env-name>/Library/bin`. if compat.is_win and not self.is_pyqt and rel_helper_path != ".": logger.info( "%s: overriding relative destination path of QtWebEngineProcess helper from %r to %r!", self, rel_helper_path, "." ) rel_helper_path = "." dest = os.path.normpath(os.path.join(rel_data_path, rel_helper_path)) binaries.append((os.path.join(self.location['LibraryExecutablesPath'], 'QtWebEngineProcess*'), dest)) # The helper QtWebEngineProcess executable should have an accompanying qt.conf file that helps it locate the # Qt shared libraries. Try collecting it as well qt_conf_file = os.path.join(self.location['LibraryExecutablesPath'], 'qt.conf') if not os.path.isfile(qt_conf_file): # The file seems to have been dropped from Qt 6.3 (and corresponding PySide6 and PyQt6) due to # redundancy; however, we still need it in the frozen application - so generate our own. from PyInstaller.config import CONF # workpath # Relative path to root prefix of bundled Qt - this corresponds to the "inverse" of `rel_helper_path` # variable that we computed earlier. if rel_helper_path == '.': rel_prefix = '.' else: # Replace each directory component in `rel_helper_path` with `..`. rel_prefix = os.path.join(*['..' for _ in range(len(rel_helper_path.split(os.pathsep)))]) # We expect the relative path to be either . or .. depending on PySide/PyQt layout; if that is not the # case, warn about irregular path. if rel_prefix not in ('.', '..'): logger.warning( "%s: unexpected relative Qt prefix path for QtWebEngineProcess qt.conf: %s", self, rel_prefix ) # The Qt docs on qt.conf (https://doc.qt.io/qt-5/qt-conf.html) recommend using forward slashes on # Windows as well, due to backslash having to be escaped. This should not matter as we expect the # relative path to be . or .., but you never know... if os.sep == '\\': rel_prefix = rel_prefix.replace(os.sep, '/') # Create temporary file in workpath qt_conf_file = os.path.join(CONF['workpath'], "qt.conf") with open(qt_conf_file, 'w', encoding='utf-8') as fp: print("[Paths]", file=fp) print("Prefix = {}".format(rel_prefix), file=fp) datas.append((qt_conf_file, dest)) # Add Linux-specific libraries. if compat.is_linux: # The automatic library detection fails for `NSS <https://packages.ubuntu.com/search?keywords=libnss3>`_, # which is used by QtWebEngine. In some distributions, the ``libnss`` supporting libraries are stored in a # subdirectory ``nss``. Since ``libnss`` is not linked against them but loads them dynamically at run-time, # we need to search for and add them. # # Specifically, the files we are looking for are # - libfreebl3.so # - libfreeblpriv3.so # - libnssckbi.so # - libnssdbm3.so # - libsoftokn3.so # and they might be in the same directory as ``libnss3.so`` (instead of ``nss`` subdirectory); this is # the case even with contemporary Debian releases. See # https://packages.debian.org/bullseye/amd64/libnss3/filelist # vs. # https://packages.debian.org/bookworm/amd64/libnss3/filelist # Analyze imports of ``QtWebEngineCore`` extension module, and look for ``libnss3.so`` to determine its # parent directory. libnss_dir = None module_file = hooks.get_module_file_attribute(self.namespace + '.QtWebEngineCore') for lib_name, lib_path in bindepend.get_imports(module_file): # (name, fullpath) tuples if lib_path is None: continue # Skip unresolved libraries # Look for ``libnss3.so``. if os.path.basename(lib_path).startswith('libnss3.so'): libnss_dir = os.path.dirname(lib_path) break # Search for NSS libraries logger.debug("%s: QtWebEngineCore is linked against libnss3.so; collecting NSS libraries...", self) if libnss_dir is not None: # Libraries to search for NSS_LIBS = [ 'libfreebl3.so', 'libfreeblpriv3.so', 'libnssckbi.so', 'libnssdbm3.so', 'libsoftokn3.so', ] # Directories (relative to `libnss_dir`) to search in. Also serve as relative destination paths. NSS_LIB_SUBDIRS = [ 'nss', '.', ] for subdir in NSS_LIB_SUBDIRS: for lib_name in NSS_LIBS: lib_file = os.path.normpath(os.path.join(libnss_dir, subdir, lib_name)) if os.path.isfile(lib_file): logger.debug("%s: collecting NSS library: %r", self, lib_file) binaries.append((lib_file, subdir)) return binaries, datas # Provide single instances of this class to avoid each hook constructing its own. pyqt5_library_info = QtLibraryInfo('PyQt5') pyqt6_library_info = QtLibraryInfo('PyQt6') pyside2_library_info = QtLibraryInfo('PySide2') pyside6_library_info = QtLibraryInfo('PySide6') def get_qt_library_info(namespace): """ Return QtLibraryInfo instance for the given namespace. """ if namespace == 'PyQt5': return pyqt5_library_info if namespace == 'PyQt6': return pyqt6_library_info elif namespace == 'PySide2': return pyside2_library_info elif namespace == 'PySide6': return pyside6_library_info raise ValueError(f'Invalid namespace: {namespace}!') # add_qt_dependencies # -------------------- # Generic implemnentation that finds the Qt 5/6 dependencies based on the hook name of a PyQt5/PyQt6/PySide2/PySide6 # hook. Returns (hiddenimports, binaries, datas). Typical usage: # ``hiddenimports, binaries, datas = add_qt5_dependencies(__file__)``. def add_qt_dependencies(hook_file): # Find the module underlying this Qt hook: change ``/path/to/hook-PyQt5.blah.py`` to ``PyQt5.blah``. hook_name, hook_ext = os.path.splitext(os.path.basename(hook_file)) assert hook_ext.startswith('.py') assert hook_name.startswith('hook-') module_name = hook_name[5:] namespace = module_name.split('.')[0] # Retrieve Qt library info structure.... qt_info = get_qt_library_info(namespace) # ... and use it to collect module dependencies return qt_info.collect_module(module_name) # add_qt5_dependencies # -------------------- # Find the Qt5 dependencies based on the hook name of a PySide2/PyQt5 hook. Returns (hiddenimports, binaries, datas). # Typical usage: ``hiddenimports, binaries, datas = add_qt5_dependencies(__file__)``. add_qt5_dependencies = add_qt_dependencies # Use generic implementation # add_qt6_dependencies # -------------------- # Find the Qt6 dependencies based on the hook name of a PySide6/PyQt6 hook. Returns (hiddenimports, binaries, datas). # Typical usage: ``hiddenimports, binaries, datas = add_qt6_dependencies(__file__)``. add_qt6_dependencies = add_qt_dependencies # Use generic implementation # A helper for ensuring that only one Qt bindings package is collected into frozen application. Intended to be called # from hooks for top-level bindings packages. def ensure_single_qt_bindings_package(qt_bindings): # For the lack of better alternative, use CONF structure. Note that this enforces single bindings for the whole # spec file instead of individual Analysis instances! from PyInstaller.config import CONF seen_qt_bindings = CONF.get("_seen_qt_bindings") if seen_qt_bindings is None: CONF["_seen_qt_bindings"] = qt_bindings elif qt_bindings != seen_qt_bindings: # Raise SystemExit to abort build process raise SystemExit( "ERROR: Aborting build process due to attempt to collect multiple Qt bindings packages: attempting to run " f"hook for {qt_bindings!r}, while hook for {seen_qt_bindings!r} has already been run! PyInstaller does not " "support multiple Qt bindings packages in a frozen application - either ensure that the build environment " "has only one Qt bindings package installed, or exclude the extraneous bindings packages via the module " "exclusion mechanism (--exclude command-line option, or excludes list in the spec file)." ) # A helper for generating exclude rules for extraneous Qt bindings. Intended for use in hooks for packages that pull in # multiple Qt bindings packages due to conditional imports (for example, `matplotlib.backends.qt_compat`, `qtpy`). def exclude_extraneous_qt_bindings(hook_name, qt_bindings_order=None): _QT_BINDINGS = ['PyQt5', 'PySide2', 'PyQt6', 'PySide6'] # Known bindings, and also their preferred order _QT_API_ENV = 'QT_API' def _create_excludes(selected_bindings): return [bindings for bindings in _QT_BINDINGS if bindings != selected_bindings] logger.debug("%s: selecting Qt bindings package...", hook_name) if not qt_bindings_order: qt_bindings_order = _QT_BINDINGS # Use default preference order env_qt_bindings = os.environ.get(_QT_API_ENV) if env_qt_bindings is not None: # Case-normalize the value into capitalized name from _QT_BINDINGS for further processing. normalized_name = {name.lower(): name for name in _QT_BINDINGS}.get(env_qt_bindings.lower()) if normalized_name is None: logger.warning( "%s: ignoring unsupported Qt bindings name %r in %s environment variable (supported values: %r)!", hook_name, env_qt_bindings, _QT_API_ENV, _QT_BINDINGS ) env_qt_bindings = normalized_name # First choice: see if a hook for top-level Qt bindings package has already been run; if it has, use that bindings # package. Due to check in the `ensure_single_qt_bindings_package` that these hooks use, only one such hook could # have been run. This should cover cases when the entry-point script explicitly imports one of Qt bindings before # importing a package that supports multiple bindings. from PyInstaller.config import CONF seen_qt_bindings = CONF.get("_seen_qt_bindings") if seen_qt_bindings is not None: # If bindings are also specified via environment variable and they differ, display a warning. if env_qt_bindings is not None and env_qt_bindings != seen_qt_bindings: logger.warning( "%s: ignoring %s environment variable (%r) because hook for %r has been run!", hook_name, _QT_API_ENV, env_qt_bindings, seen_qt_bindings ) logger.info( "%s: selected %r as Qt bindings because hook for %r has been run before.", hook_name, seen_qt_bindings, seen_qt_bindings ) return _create_excludes(seen_qt_bindings) # Second choice: honor the QT_API environment variable, if it specified a valid Qt bindings package. if env_qt_bindings is not None: logger.info( "%s: selected %r as Qt bindings as specified by the %s environment variable.", hook_name, env_qt_bindings, _QT_API_ENV ) return _create_excludes(env_qt_bindings) # Third choice: select first available bindings (sorted by the given preference order), and display a warning if # multiple bindings are available. available_qt_bindings = [] for bindings_name in qt_bindings_order: # Check if bindings are available info = get_qt_library_info(bindings_name) if info.version is None: continue available_qt_bindings.append(bindings_name) if not available_qt_bindings: logger.warning("%s: no Qt bindings are available!", hook_name) return [] # No need to generate any excludes... selected_qt_bindings = available_qt_bindings[0] if len(available_qt_bindings) == 1: logger.info("%s: selected %r as the only available Qt bindings.", hook_name, selected_qt_bindings) else: # Warn on multiple bindings, and tell user to use QT_API environment variable logger.warning( "%s: selected %r as Qt bindings, but multiple bindings are available: %r. Use the %s environment variable " "to select different bindings and suppress this warning.", hook_name, selected_qt_bindings, available_qt_bindings, _QT_API_ENV ) return _create_excludes(selected_qt_bindings)
QtLibraryInfo
python
huggingface__transformers
src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
{ "start": 46692, "end": 51442 }
class ____(LayoutLMv3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.layoutlmv3 = LayoutLMv3Model(config) self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False) self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, bbox: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.LongTensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Examples: ```python >>> from transformers import AutoProcessor, AutoModelForSequenceClassification >>> from datasets import load_dataset >>> import torch >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") >>> sequence_label = torch.tensor([1]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, ) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "LayoutLMv3ForQuestionAnswering", "LayoutLMv3ForSequenceClassification", "LayoutLMv3ForTokenClassification", "LayoutLMv3Model", "LayoutLMv3PreTrainedModel", ]
LayoutLMv3ForSequenceClassification
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-twitter/llama_index/readers/twitter/base.py
{ "start": 208, "end": 2267 }
class ____(BasePydanticReader): """ Twitter tweets reader. Read tweets of user twitter handle. Check 'https://developer.twitter.com/en/docs/twitter-api/\ getting-started/getting-access-to-the-twitter-api' \ on how to get access to twitter API. Args: bearer_token (str): bearer_token that you get from twitter API. num_tweets (Optional[int]): Number of tweets for each user twitter handle.\ Default is 100 tweets. """ is_remote: bool = True bearer_token: str num_tweets: Optional[int] def __init__( self, bearer_token: str, num_tweets: Optional[int] = 100, ) -> None: """Initialize with parameters.""" super().__init__( num_tweets=num_tweets, bearer_token=bearer_token, ) @classmethod def class_name(cls) -> str: return "TwitterTweetReader" def load_data( self, twitterhandles: List[str], num_tweets: Optional[int] = None, **load_kwargs: Any, ) -> List[Document]: """ Load tweets of twitter handles. Args: twitterhandles (List[str]): List of user twitter handles to read tweets. """ try: import tweepy except ImportError: raise ImportError( "`tweepy` package not found, please run `pip install tweepy`" ) client = tweepy.Client(bearer_token=self.bearer_token) results = [] for username in twitterhandles: # tweets = api.user_timeline(screen_name=user, count=self.num_tweets) user = client.get_user(username=username) tweets = client.get_users_tweets( user.data.id, max_results=num_tweets or self.num_tweets ) response = " " for tweet in tweets.data: response = response + tweet.text + "\n" results.append(Document(text=response, id_=username)) return results
TwitterTweetReader
python
django-debug-toolbar__django-debug-toolbar
debug_toolbar/panels/settings.py
{ "start": 290, "end": 914 }
class ____(Panel): """ A panel to display all variables in django.conf.settings """ template = "debug_toolbar/panels/settings.html" is_async = True nav_title = _("Settings") def title(self): return _("Settings from %s") % self.get_stats()["settings"].get( "SETTINGS_MODULE" ) def generate_stats(self, request, response): self.record_stats( { "settings": { key: force_str(value) for key, value in sorted(get_safe_settings().items()) } } )
SettingsPanel
python
bokeh__bokeh
tests/unit/bokeh/plotting/test__graph.py
{ "start": 1432, "end": 5965 }
class ____: def test_convert_dataframes_to_sources(self) -> None: pd = pytest.importorskip("pandas") node_source = pd.DataFrame(data=dict(foo=[])) edge_source = pd.DataFrame(data=dict(start=[], end=[], bar=[])) kw = bpg.get_graph_kwargs(node_source, edge_source) # 'index' column is added from pandas df assert set(kw['node_renderer'].data_source.data.keys()) == {"index", "foo"} assert set(kw['edge_renderer'].data_source.data.keys()) == {"index", "start", "end", "bar"} def test_handle_sources(self) -> None: node_source = ColumnDataSource(data=dict(foo=[])) edge_source = ColumnDataSource(data=dict(start=[], end=[], bar=[])) kw = bpg.get_graph_kwargs(node_source, edge_source) assert set(kw['node_renderer'].data_source.data.keys()) == {"foo"} assert set(kw['edge_renderer'].data_source.data.keys()) == {"start", "end", "bar"} def test_handle_node_property_mixins(self) -> None: kwargs = dict(node_fill_color="purple", node_selection_fill_color="blue", node_nonselection_fill_color="yellow", node_hover_fill_color="red", node_muted_fill_color="orange", node_radius=0.6) kw = bpg.get_graph_kwargs({}, {}, **kwargs) r = kw['node_renderer'] assert r.glyph.fill_color == "purple" assert r.selection_glyph.fill_color == "blue" assert r.nonselection_glyph.fill_color == "yellow" assert r.hover_glyph.fill_color == "red" assert r.muted_glyph.fill_color == "orange" assert r.glyph.radius == 0.6 assert r.selection_glyph.radius == 0.6 assert r.nonselection_glyph.radius == 0.6 assert r.hover_glyph.radius == 0.6 assert r.muted_glyph.radius == 0.6 def test_handle_node_marker(self) -> None: kw = bpg.get_graph_kwargs({}, {}, node_marker='x') node_glyph = kw['node_renderer'].glyph assert isinstance(node_glyph, Scatter) and node_glyph.marker == "x" def test_handle_node_marker_dataspec_correctly(self) -> None: node_source = {'marker': ['square', 'circle', 'x']} kw = bpg.get_graph_kwargs(node_source, {}, node_marker='marker') node_glyph = kw['node_renderer'].glyph assert isinstance(node_glyph, Scatter) assert node_glyph.marker == field('marker') def test_handle_edge_property_mixins(self) -> None: kwargs = dict(edge_line_color="purple", edge_selection_line_color="blue", edge_nonselection_line_color="yellow", edge_hover_line_color="red", edge_muted_line_color="orange", edge_line_width=23) kw = bpg.get_graph_kwargs({}, {}, **kwargs) r = kw['edge_renderer'] assert r.glyph.line_color == "purple" assert r.selection_glyph.line_color == "blue" assert r.nonselection_glyph.line_color == "yellow" assert r.hover_glyph.line_color == "red" assert r.muted_glyph.line_color == "orange" assert r.glyph.line_width == 23 assert r.selection_glyph.line_width == 23 assert r.nonselection_glyph.line_width == 23 assert r.hover_glyph.line_width == 23 assert r.muted_glyph.line_width == 23 def test_default_muted_glyph(self) -> None: kwargs = dict( edge_line_color="purple", edge_line_alpha=0.7, node_fill_color="red", node_fill_alpha=0.8, node_line_color="blue", ) kw = bpg.get_graph_kwargs({}, {}, **kwargs) r = kw['edge_renderer'] assert isinstance(r.muted_glyph, MultiLine) assert r.muted_glyph.line_color == "purple" assert r.muted_glyph.line_alpha == 0.2 r = kw['node_renderer'] assert isinstance(r.muted_glyph, Scatter) assert r.muted_glyph.fill_color == "red" assert r.muted_glyph.line_alpha == 0.2 assert r.muted_glyph.line_color == "blue" def test_bad_input(self) -> None: msg = """\ Failed to auto-convert <class 'int'> to ColumnDataSource. Original error: expected a dict, dataclass, or eager dataframe support by Narwhals, got 42""" with pytest.raises(ValueError, match=msg): bpg.get_graph_kwargs(42, {}) with pytest.raises(ValueError, match=msg): bpg.get_graph_kwargs({}, 42) #----------------------------------------------------------------------------- # Private API #-----------------------------------------------------------------------------
Test_get_graph_kwargs
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclass4.py
{ "start": 1054, "end": 1356 }
class ____(DC6): # This should generate an error because it is overriding # a field with a default value, but it doesn't have a # default value. a: int # This should generate an error because the default # value for "a" is inherited from the base class. b: str @dataclass
DC7
python
kamyu104__LeetCode-Solutions
Python/redundant-connection.py
{ "start": 534, "end": 857 }
class ____(object): def findRedundantConnection(self, edges): """ :type edges: List[List[int]] :rtype: List[int] """ union_find = UnionFind(len(edges)+1) for edge in edges: if not union_find.union_set(*edge): return edge return []
Solution
python
wandb__wandb
wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/fsevents.py
{ "start": 5131, "end": 6546 }
class ____(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout) def schedule(self, event_handler, path, recursive=False): # Python 2/3 compat try: str_class = unicode except NameError: str_class = str # Fix for issue #26: Trace/BPT error when given a unicode path # string. https://github.com/gorakhargosh/watchdog/issues#issue/26 if isinstance(path, str_class): #path = unicode(path, 'utf-8') path = unicodedata.normalize('NFC', path) # We only encode the path in Python 2 for backwards compatibility. # On Python 3 we want the path to stay as unicode if possible for # the sake of path matching not having to be rewritten to use the # bytes API instead of strings. The _watchdog_fsevent.so code for # Python 3 can handle both str and bytes paths, which is why we # do not HAVE to encode it with Python 3. The Python 2 code in # _watchdog_fsevents.so was not changed for the sake of backwards # compatibility. if sys.version_info < (3,): path = path.encode('utf-8') return BaseObserver.schedule(self, event_handler, path, recursive)
FSEventsObserver
python
ray-project__ray
python/ray/serve/_private/common.py
{ "start": 25601, "end": 25665 }
class ____(str, Enum): MULTI_APP = "MULTI_APP"
ServeDeployMode
python
encode__httpx
httpx/_client.py
{ "start": 3418, "end": 3869 }
class ____(enum.Enum): # UNOPENED: # The client has been instantiated, but has not been used to send a request, # or been opened by entering the context of a `with` block. UNOPENED = 1 # OPENED: # The client has either sent a request, or is within a `with` block. OPENED = 2 # CLOSED: # The client has either exited the `with` block, or `close()` has # been called explicitly. CLOSED = 3
ClientState
python
simonw__datasette
datasette/views/row.py
{ "start": 5528, "end": 6487 }
class ____(Exception): def __init__(self, error): self.error = error async def _resolve_row_and_check_permission(datasette, request, permission): from datasette.app import DatabaseNotFound, TableNotFound, RowNotFound try: resolved = await datasette.resolve_row(request) except DatabaseNotFound as e: return False, _error(["Database not found: {}".format(e.database_name)], 404) except TableNotFound as e: return False, _error(["Table not found: {}".format(e.table)], 404) except RowNotFound as e: return False, _error(["Record not found: {}".format(e.pk_values)], 404) # Ensure user has permission to delete this row if not await datasette.allowed( action=permission, resource=TableResource(database=resolved.db.name, table=resolved.table), actor=request.actor, ): return False, _error(["Permission denied"], 403) return True, resolved
RowError
python
Pylons__pyramid
docs/quick_tutorial/authorization/tutorial/resources.py
{ "start": 52, "end": 202 }
class ____: __acl__ = [(Allow, Everyone, 'view'), (Allow, 'group:editors', 'edit')] def __init__(self, request): pass
Root
python
dask__distributed
distributed/protocol/pickle.py
{ "start": 271, "end": 2924 }
class ____(pickle.Pickler): def reducer_override(self, obj): # For some objects this causes segfaults otherwise, see # https://github.com/dask/distributed/pull/7564#issuecomment-1438727339 if _always_use_pickle_for(obj): return NotImplemented try: serialize = dask_serialize.dispatch(type(obj)) deserialize = dask_deserialize.dispatch(type(obj)) return deserialize, serialize(obj) except TypeError: return NotImplemented def _always_use_pickle_for(x): try: mod, _, _ = x.__class__.__module__.partition(".") except Exception: return False if mod == "numpy": import numpy as np return isinstance(x, np.ndarray) elif mod == "pandas": import pandas as pd return isinstance(x, pd.core.generic.NDFrame) elif mod == "builtins": return isinstance(x, (str, bytes)) else: return False def dumps(x, *, buffer_callback=None, protocol=HIGHEST_PROTOCOL): """Manage between cloudpickle and pickle 1. Try pickle 2. If it is short then check if it contains __main__ 3. If it is long, then first check type, then check __main__ """ buffers = [] dump_kwargs = {"protocol": protocol or HIGHEST_PROTOCOL} if dump_kwargs["protocol"] >= 5 and buffer_callback is not None: dump_kwargs["buffer_callback"] = buffers.append try: try: result = pickle.dumps(x, **dump_kwargs) except Exception: f = io.BytesIO() pickler = _DaskPickler(f, **dump_kwargs) buffers.clear() pickler.dump(x) result = f.getvalue() if b"__main__" in result or ( getattr(inspect.getmodule(x), "__name__", None) in cloudpickle.list_registry_pickle_by_value() ): if len(result) < 1000 or not _always_use_pickle_for(x): buffers.clear() result = cloudpickle.dumps(x, **dump_kwargs) except Exception: try: buffers.clear() result = cloudpickle.dumps(x, **dump_kwargs) except Exception: logger.exception("Failed to serialize %s.", x) raise if buffer_callback is not None: for b in buffers: buffer_callback(b) return result def loads(x, *, buffers=()): try: if buffers: return pickle.loads(x, buffers=buffers) else: return pickle.loads(x) except Exception: logger.info("Failed to deserialize %s", x[:10000], exc_info=True) raise
_DaskPickler
python
coleifer__peewee
peewee.py
{ "start": 23753, "end": 25224 }
class ____(object): def __init__(self, *args, **kwargs): super(_HashableSource, self).__init__(*args, **kwargs) self._update_hash() @Node.copy def alias(self, name): self._alias = name self._update_hash() def _update_hash(self): self._hash = self._get_hash() def _get_hash(self): return hash((self.__class__, self._path, self._alias)) def __hash__(self): return self._hash def __eq__(self, other): if isinstance(other, _HashableSource): return self._hash == other._hash return Expression(self, OP.EQ, other) def __ne__(self, other): if isinstance(other, _HashableSource): return self._hash != other._hash return Expression(self, OP.NE, other) def _e(op): def inner(self, rhs): return Expression(self, op, rhs) return inner __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) def __bind_database__(meth): @wraps(meth) def inner(self, *args, **kwargs): result = meth(self, *args, **kwargs) if self._database: return result.bind(self._database) return result return inner def __join__(join_type=JOIN.INNER, inverted=False): def method(self, other): if inverted: self, other = other, self return Join(self, other, join_type=join_type) return method
_HashableSource
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_coordinator_test.py
{ "start": 2355, "end": 2755 }
class ____(object): def __init__(self, between_graph=False, should_init=None, should_checkpoint=None, should_save_summary=None): self.experimental_between_graph = between_graph self.experimental_should_init = should_init self.should_checkpoint = should_checkpoint self.should_save_summary = should_save_summary
MockExtended
python
dagster-io__dagster
docs/sphinx/_ext/sphinx-mdx-builder/sphinxcontrib/mdxbuilder/writers/mdx.py
{ "start": 3889, "end": 4333 }
class ____(writers.Writer): supported = ("mdx",) settings_spec = ("No options here.", "", ()) settings_defaults = {} output: str def __init__(self, builder: "MdxBuilder") -> None: super().__init__() self.builder = builder def translate(self): self.visitor = MdxTranslator(self.document, self.builder) self.document.walkabout(self.visitor) self.output = self.visitor.body
MdxWriter
python
PrefectHQ__prefect
src/prefect/utilities/asyncutils.py
{ "start": 19024, "end": 19840 }
class ____: def __init__(self, initial_value_func: Callable[[], int]) -> None: self._semaphore: Optional[asyncio.Semaphore] = None self._initial_value_func = initial_value_func async def __aenter__(self) -> asyncio.Semaphore: self._initialize_semaphore() if TYPE_CHECKING: assert self._semaphore is not None await self._semaphore.__aenter__() return self._semaphore async def __aexit__(self, *args: Any) -> None: if TYPE_CHECKING: assert self._semaphore is not None await self._semaphore.__aexit__(*args) def _initialize_semaphore(self) -> None: if self._semaphore is None: initial_value = self._initial_value_func() self._semaphore = asyncio.Semaphore(initial_value)
LazySemaphore
python
pytorch__pytorch
test/dynamo/test_guard_serialization.py
{ "start": 4631, "end": 6880 }
class ____(torch.Tensor): @staticmethod def __new__(cls, a, extra, outer_size=None, outer_stride=None): if outer_size is None: outer_size = a.size() if outer_stride is None: outer_stride = a.stride() shape = outer_size kwargs = {} kwargs["strides"] = outer_stride kwargs["storage_offset"] = a.storage_offset() kwargs["device"] = a.device kwargs["layout"] = a.layout kwargs["requires_grad"] = a.requires_grad kwargs["dtype"] = a.dtype return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) def __init__(self, a, extra, outer_size=None, outer_stride=None): self.a = a self.extra = extra @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if kwargs is None: kwargs = {} args_a = pytree.tree_map_only( SubclassWithCustomMetadataGuard, lambda x: x.a, args ) kwargs_a = pytree.tree_map_only( SubclassWithCustomMetadataGuard, lambda x: x.a, kwargs ) out_a = func(*args_a, **kwargs_a) if isinstance(out_a, torch.Tensor): assert isinstance(args[0], SubclassWithCustomMetadataGuard) return SubclassWithCustomMetadataGuard(out_a, extra=args[0].extra) return out_a @classmethod def __metadata_guard__(cls, meta1, meta2): # Define custom metadata guard logic that only looks at "bar" to determine # metadata equivalence. This is more purposefully more lax than the default # guard behavior. return meta1["extra"]["bar"] == meta2["extra"]["bar"] def __tensor_flatten__(self): # store extra in meta return ["a"], {"extra": self.extra} @staticmethod def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): assert isinstance(meta, dict) a = inner_tensors["a"] # pull out extra from meta extra = meta["extra"] if type(a) is torch.Tensor: assert outer_size is not None assert outer_stride is not None return SubclassWithCustomMetadataGuard(a, extra, outer_size, outer_stride)
SubclassWithCustomMetadataGuard
python
realpython__materials
python-serialize/http-payload/pydantic-demo/main.py
{ "start": 212, "end": 609 }
class ____(Metadata): name: str @field_validator("name") def check_user_name(cls, name): if name[0].isupper(): return name raise ValueError("name must start with an uppercase letter") if __name__ == "__main__": response = httpx.get("http://localhost:8000/users") for item in response.json(): user = User(**item) print(repr(user))
User
python
mwaskom__seaborn
seaborn/_core/moves.py
{ "start": 6247, "end": 7550 }
class ____(Move): """ Divisive scaling on the value axis after aggregating within groups. Parameters ---------- func : str or callable Function called on each group to define the comparison value. where : str Query string defining the subset used to define the comparison values. by : list of variables Variables used to define aggregation groups. percent : bool If True, multiply the result by 100. Examples -------- .. include:: ../docstrings/objects.Norm.rst """ func: Union[Callable, str] = "max" where: Optional[str] = None by: Optional[list[str]] = None percent: bool = False group_by_orient: ClassVar[bool] = False def _norm(self, df, var): if self.where is None: denom_data = df[var] else: denom_data = df.query(self.where)[var] df[var] = df[var] / denom_data.agg(self.func) if self.percent: df[var] = df[var] * 100 return df def __call__( self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], ) -> DataFrame: other = {"x": "y", "y": "x"}[orient] return groupby.apply(data, self._norm, other) # TODO # @dataclass # class Ridge(Move): # ...
Norm
python
pytorch__pytorch
torch/distributed/rpc/internal.py
{ "start": 539, "end": 654 }
class ____(Enum): SYNC = "sync" ASYNC = "async" ASYNC_JIT = "async_jit" REMOTE = "remote"
RPCExecMode
python
py-pdf__pypdf
pypdf/errors.py
{ "start": 410, "end": 498 }
class ____(Exception): """Base class for all exceptions raised by pypdf."""
PyPdfError
python
numpy__numpy
benchmarks/benchmarks/bench_manipulate.py
{ "start": 2159, "end": 3178 }
class ____(Benchmark): params = [ [(2, 1, 4), (2, 1), (5, 2, 3, 1)], ] param_names = ['shape'] timeout = 10 def setup(self, shape): self.xarg = np.ones(shape=shape) self.reshaped = deque(shape) self.reshaped.rotate(1) self.reshaped = tuple(self.reshaped) def time_expand_dims(self, shape): np.expand_dims(self.xarg, axis=1) def time_expand_dims_neg(self, shape): np.expand_dims(self.xarg, axis=-1) def time_squeeze_dims(self, shape): np.squeeze(self.xarg) def time_flip_all(self, shape): np.flip(self.xarg, axis=None) def time_flip_one(self, shape): np.flip(self.xarg, axis=1) def time_flip_neg(self, shape): np.flip(self.xarg, axis=-1) def time_moveaxis(self, shape): np.moveaxis(self.xarg, [0, 1], [-1, -2]) def time_roll(self, shape): np.roll(self.xarg, 3) def time_reshape(self, shape): np.reshape(self.xarg, self.reshaped)
DimsManipulations
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/streams.py
{ "start": 21812, "end": 22354 }
class ____(Organizations): """ API docs: https://docs.github.com/en/rest/teams/teams?apiVersion=2022-11-28#list-teams """ use_cache = True def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str: return f"orgs/{stream_slice['organization']}/teams" def parse_response(self, response: requests.Response, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping]: for record in response.json(): yield self.transform(record=record, stream_slice=stream_slice)
Teams
python
has2k1__plotnine
plotnine/geoms/geom_smooth.py
{ "start": 515, "end": 3635 }
class ____(geom): """ A smoothed conditional mean {usage} Parameters ---------- {common_parameters} legend_fill_ratio : float, default=0.5 How much (vertically) of the legend box should be filled by the color that indicates the confidence intervals. Should be in the range [0, 1]. See Also -------- plotnine.stat_smooth : The default `stat` for this `geom`. """ DEFAULT_AES = { "alpha": 0.4, "color": "black", "fill": "#999999", "linetype": "solid", "size": 1, "ymin": None, "ymax": None, } REQUIRED_AES = {"x", "y"} DEFAULT_PARAMS = { "stat": "smooth", "position": "identity", "na_rm": False, "legend_fill_ratio": 0.5, } legend_key_size = staticmethod(geom_path.legend_key_size) def use_defaults( self, data: pd.DataFrame, aes_modifiers: dict[str, Any] ) -> pd.DataFrame: has_ribbon = "ymin" in data and "ymax" in data data = super().use_defaults(data, aes_modifiers) # When there is no ribbon, the default values for 'ymin' # and 'ymax' are None (not numeric). So we remove them # prevent any computations that may use them without checking. if not has_ribbon: del data["ymin"] del data["ymax"] return data def setup_data(self, data: pd.DataFrame) -> pd.DataFrame: return data.sort_values(["PANEL", "group", "x"]) @staticmethod def draw_group( data: pd.DataFrame, panel_params: panel_view, coord: coord, ax: Axes, params: dict[str, Any], ): has_ribbon = "ymin" in data and "ymax" in data if has_ribbon: data2 = data.copy() data2["color"] = "none" params2 = params.copy() params2["outline_type"] = "full" geom_ribbon.draw_group(data2, panel_params, coord, ax, params2) data["alpha"] = 1 geom_line.draw_group(data, panel_params, coord, ax, params) @staticmethod def draw_legend( data: pd.Series[Any], da: DrawingArea, lyr: layer ) -> DrawingArea: """ Draw letter 'a' in the box Parameters ---------- data : dataframe Legend data da : DrawingArea Canvas lyr : layer Layer Returns ------- out : DrawingArea """ from matplotlib.patches import Rectangle try: has_se = lyr.stat.params["se"] except KeyError: has_se = False if has_se: fill = to_rgba(data["fill"], data["alpha"]) r = lyr.geom.params["legend_fill_ratio"] bg = Rectangle( (0, (1 - r) * da.height / 2), width=da.width, height=r * da.height, facecolor=fill, linewidth=0, ) da.add_artist(bg) data["alpha"] = 1 return geom_path.draw_legend(data, da, lyr)
geom_smooth
python
conda__conda
conda/models/version.py
{ "start": 23727, "end": 26182 }
class ____(BaseSpec, metaclass=SingleStrArgCachingType): _cache_ = {} def __init__(self, vspec): vspec_str, matcher, is_exact = self.get_matcher(vspec) super().__init__(vspec_str, matcher, is_exact) def get_matcher(self, vspec): try: vspec = int(vspec) except ValueError: pass else: matcher = self.exact_match is_exact = True return vspec, matcher, is_exact vspec_str = str(vspec).strip() if vspec_str == "*": matcher = self.always_true_match is_exact = False elif vspec_str.startswith(("=", "<", ">", "!")): m = version_relation_re.match(vspec_str) if m is None: raise InvalidVersionSpec(vspec_str, "invalid operator") operator_str, vo_str = m.groups() try: self.operator_func = OPERATOR_MAP[operator_str] except KeyError: raise InvalidVersionSpec(vspec_str, f"invalid operator: {operator_str}") self.matcher_vo = VersionOrder(vo_str) matcher = self.operator_match is_exact = operator_str == "==" elif vspec_str[0] == "^" or vspec_str[-1] == "$": if vspec_str[0] != "^" or vspec_str[-1] != "$": raise InvalidVersionSpec( vspec_str, "regex specs must start with '^' and end with '$'" ) self.regex = re.compile(vspec_str) matcher = self.regex_match is_exact = False # if hasattr(spec, 'match'): # self.spec = _spec # self.match = spec.match else: matcher = self.exact_match is_exact = True return vspec_str, matcher, is_exact def merge(self, other): if self.raw_value != other.raw_value: raise ValueError( f"Incompatible component merge:\n - {self.raw_value!r}\n - {other.raw_value!r}" ) return self.raw_value def union(self, other): options = {self.raw_value, other.raw_value} return "|".join(options) @property def exact_value(self) -> int | None: try: return int(self.raw_value) except ValueError: return None def __str__(self): return str(self.spec) def __repr__(self): return str(self.spec)
BuildNumberMatch
python
spack__spack
lib/spack/spack/environment/environment.py
{ "start": 119807, "end": 120039 }
class ____(SpackEnvironmentError): """Class for Spack environment-specific configuration errors.""" def __init__(self, msg, filename): self.filename = filename super().__init__(msg)
SpackEnvironmentConfigError
python
dagster-io__dagster
python_modules/dagster/dagster/_core/secrets/env_file.py
{ "start": 704, "end": 2402 }
class ____(SecretsLoader, ConfigurableClass): """Default secrets loader which loads additional env vars from a per-project .env file. Can be manually configured in the dagster.yaml file or implicitly set via the DAGSTER_PROJECT_ENV_FILE_PATHS environment variable. """ def __init__( self, inst_data: Optional[ConfigurableClassData] = None, location_paths: Optional[Mapping[str, str]] = None, ): self._inst_data = inst_data self._location_paths = location_paths @classmethod def config_type(cls) -> Shape: return Shape( fields={ "location_paths": Map(key_type=str, inner_type=str), }, ) def get_secrets_for_environment(self, location_name: Optional[str]) -> dict[str, str]: inst_data = self._inst_data location_paths = self._location_paths or {} if not inst_data: inst_data_env = os.getenv("DAGSTER_PROJECT_ENV_FILE_PATHS") if inst_data_env: location_paths = json.loads(inst_data_env) else: location_paths = inst_data.config_dict.get("location_paths", {}) if location_name and location_name in location_paths: env_file_path = os.path.join(os.getcwd(), location_paths[location_name]) return get_env_var_dict(env_file_path) else: return {} @property def inst_data(self): return self._inst_data @classmethod def from_config_value( cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any] ) -> Self: return cls(inst_data=inst_data, **config_value)
PerProjectEnvFileLoader
python
PrefectHQ__prefect
src/prefect/server/orchestration/core_policy.py
{ "start": 7312, "end": 7822 }
class ____(FlowRunOrchestrationPolicy): @staticmethod def priority() -> list[ Union[ type[BaseUniversalTransform[orm_models.FlowRun, core.FlowRunPolicy]], type[BaseOrchestrationRule[orm_models.FlowRun, core.FlowRunPolicy]], ] ]: return [ BypassCancellingFlowRunsWithNoInfra, # cancel scheduled or suspended runs from the UI InstrumentFlowRunStateTransitions, ReleaseFlowConcurrencySlots, ]
MinimalFlowPolicy
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 24776, "end": 25395 }
class ____(ModuleAttributePrecedenceBase): def __init__(self) -> None: super().__init__() self.activation = torch.nn.ReLU() self.linear = torch.nn.Linear(10, 10) self.initializer = torch.ones([10, 10]) self.scale = 0.5 def activation(self, x): return x * 1.2 def initializer(self): return torch.zeros([10, 10]) def scale(self): return 2.0 def forward(self, x): # object attribute takes precedence unless it's a nn.Module return self.activation(self.linear(self.initializer + x)) * self.scale
ModuleAttributePrecedence
python
PrefectHQ__prefect
src/prefect/server/api/concurrency_limits_v2.py
{ "start": 6034, "end": 16935 }
class ____(PrefectBaseModel): lease_id: UUID limits: list[MinimalConcurrencyLimitResponse] async def _acquire_concurrency_slots( session: AsyncSession, names: List[str], slots: int, mode: Literal["concurrency", "rate_limit"], ) -> tuple[list[schemas.core.ConcurrencyLimitV2], bool]: limits = [ schemas.core.ConcurrencyLimitV2.model_validate(limit) for limit in ( await models.concurrency_limits_v2.bulk_read_concurrency_limits( session=session, names=names ) ) ] active_limits = [limit for limit in limits if bool(limit.active)] if any(limit.limit < slots for limit in active_limits): raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Slots requested is greater than the limit", ) non_decaying = [ str(limit.name) for limit in active_limits if limit.slot_decay_per_second == 0.0 ] if mode == "rate_limit" and non_decaying: raise HTTPException( status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=( "Only concurrency limits with slot decay can be used for " "rate limiting. The following limits do not have a decay " f"configured: {','.join(non_decaying)!r}" ), ) acquired = await models.concurrency_limits_v2.bulk_increment_active_slots( session=session, concurrency_limit_ids=[limit.id for limit in active_limits], slots=slots, ) if not acquired: await session.rollback() return limits, acquired async def _generate_concurrency_locked_response( session: AsyncSession, limits: list[schemas.core.ConcurrencyLimitV2], slots: int, ) -> HTTPException: """ Generate a 423 Locked response when concurrency slots cannot be acquired. Calculates an appropriate Retry-After header value based on the blocking limit's characteristics. For limits without slot decay, caps avg_slot_occupancy_seconds at a configured maximum to prevent excessive retry delays from long-running tasks: - Tag-based limits (name starts with "tag:"): Capped at tag_concurrency_slot_wait_seconds (default 30s) to restore V1 behavior that users relied on - Other limits: Capped at maximum_concurrency_slot_wait_seconds (default 30s) to allow more uniform queues while still preventing astronomical delays Low average occupancies are always respected (e.g., 2s stays 2s, not forced higher). Limits with slot decay use the decay rate directly without capping. The final retry value includes jitter via clamped_poisson_interval to prevent thundering herd when many tasks retry simultaneously. """ active_limits = [limit for limit in limits if bool(limit.active)] await models.concurrency_limits_v2.bulk_update_denied_slots( session=session, concurrency_limit_ids=[limit.id for limit in active_limits], slots=slots, ) def num_blocking_slots(limit: schemas.core.ConcurrencyLimitV2) -> float: if limit.slot_decay_per_second > 0: return slots + limit.denied_slots else: return (slots + limit.denied_slots) / limit.limit blocking_limit = max((limit for limit in active_limits), key=num_blocking_slots) blocking_slots = num_blocking_slots(blocking_limit) if blocking_limit.slot_decay_per_second == 0.0: settings = get_current_settings() max_wait = ( settings.server.tasks.tag_concurrency_slot_wait_seconds if blocking_limit.name.startswith("tag:") else settings.server.concurrency.maximum_concurrency_slot_wait_seconds ) wait_time_per_slot = min(blocking_limit.avg_slot_occupancy_seconds, max_wait) else: wait_time_per_slot = 1.0 / blocking_limit.slot_decay_per_second retry_after = clamped_poisson_interval( average_interval=wait_time_per_slot * blocking_slots ) return HTTPException( status_code=status.HTTP_423_LOCKED, headers={ "Retry-After": str(retry_after), }, ) @router.post("/increment", status_code=status.HTTP_200_OK) async def bulk_increment_active_slots( slots: int = Body(..., gt=0), names: List[str] = Body(..., min_items=1), mode: Literal["concurrency", "rate_limit"] = Body("concurrency"), create_if_missing: Optional[bool] = Body( None, deprecated="Limits must be explicitly created before acquiring concurrency slots.", ), db: PrefectDBInterface = Depends(provide_database_interface), ) -> List[MinimalConcurrencyLimitResponse]: async with db.session_context(begin_transaction=True) as session: acquired_limits, acquired = await _acquire_concurrency_slots( session=session, names=names, slots=slots, mode=mode, ) if acquired: return [ MinimalConcurrencyLimitResponse( id=limit.id, name=str(limit.name), limit=limit.limit ) for limit in acquired_limits ] else: async with db.session_context(begin_transaction=True) as session: raise await _generate_concurrency_locked_response( session=session, limits=acquired_limits, slots=slots, ) @router.post("/increment-with-lease", status_code=status.HTTP_200_OK) async def bulk_increment_active_slots_with_lease( slots: int = Body(..., gt=0), names: List[str] = Body(..., min_items=1), mode: Literal["concurrency", "rate_limit"] = Body("concurrency"), lease_duration: float = Body( 300, # 5 minutes ge=60, # 1 minute le=60 * 60 * 24, # 1 day description="The duration of the lease in seconds.", ), holder: Optional[ConcurrencyLeaseHolder] = Body( None, description="The holder of the lease with type (flow_run, task_run, or deployment) and id.", ), db: PrefectDBInterface = Depends(provide_database_interface), ) -> ConcurrencyLimitWithLeaseResponse: async with db.session_context(begin_transaction=True) as session: acquired_limits, acquired = await _acquire_concurrency_slots( session=session, names=names, slots=slots, mode=mode, ) if acquired: lease_storage = get_concurrency_lease_storage() lease = await lease_storage.create_lease( resource_ids=[limit.id for limit in acquired_limits], ttl=timedelta(seconds=lease_duration), metadata=ConcurrencyLimitLeaseMetadata( slots=slots, holder=holder, ), ) return ConcurrencyLimitWithLeaseResponse( lease_id=lease.id, limits=[ MinimalConcurrencyLimitResponse( id=limit.id, name=str(limit.name), limit=limit.limit ) for limit in acquired_limits ], ) else: async with db.session_context(begin_transaction=True) as session: raise await _generate_concurrency_locked_response( session=session, limits=acquired_limits, slots=slots, ) @router.post("/decrement", status_code=status.HTTP_200_OK) async def bulk_decrement_active_slots( slots: int = Body(..., gt=0), names: List[str] = Body(..., min_items=1), occupancy_seconds: Optional[float] = Body(None, gt=0.0), create_if_missing: bool = Body( None, deprecated="Limits must be explicitly created before decrementing active slots.", ), db: PrefectDBInterface = Depends(provide_database_interface), ) -> List[MinimalConcurrencyLimitResponse]: async with db.session_context(begin_transaction=True) as session: limits = await models.concurrency_limits_v2.bulk_read_concurrency_limits( session=session, names=names ) if not limits: return [] await models.concurrency_limits_v2.bulk_decrement_active_slots( session=session, concurrency_limit_ids=[limit.id for limit in limits if bool(limit.active)], slots=slots, occupancy_seconds=occupancy_seconds, ) return [ MinimalConcurrencyLimitResponse( id=limit.id, name=str(limit.name), limit=limit.limit ) for limit in limits ] @router.post("/decrement-with-lease", status_code=status.HTTP_204_NO_CONTENT) async def bulk_decrement_active_slots_with_lease( lease_id: UUID = Body( ..., description="The ID of the lease corresponding to the concurrency limits to decrement.", embed=True, ), db: PrefectDBInterface = Depends(provide_database_interface), ) -> None: lease_storage = get_concurrency_lease_storage() lease = await lease_storage.read_lease(lease_id) if not lease: return occupancy_seconds = (datetime.now(timezone.utc) - lease.created_at).total_seconds() async with db.session_context(begin_transaction=True) as session: await models.concurrency_limits_v2.bulk_decrement_active_slots( session=session, concurrency_limit_ids=lease.resource_ids, slots=lease.metadata.slots if lease.metadata else 0, occupancy_seconds=occupancy_seconds, ) await lease_storage.revoke_lease(lease_id) @router.post("/leases/{lease_id}/renew", status_code=status.HTTP_204_NO_CONTENT) async def renew_concurrency_lease( lease_id: UUID = Path(..., description="The ID of the lease to renew"), lease_duration: float = Body( 300, # 5 minutes ge=60, # 1 minute le=60 * 60 * 24, # 1 day description="The duration of the lease in seconds.", embed=True, ), ) -> None: lease_storage = get_concurrency_lease_storage() # Atomically renew the lease (checks existence and updates index in single operation) renewed = await lease_storage.renew_lease( lease_id=lease_id, ttl=timedelta(seconds=lease_duration), ) # Handle the three possible return values: # - True: lease successfully renewed # - False: lease not found or expired # - None: legacy implementation (check lease existence to determine success) lease = None if renewed is None: # Legacy implementation returned None - check if lease actually exists lease = await lease_storage.read_lease(lease_id) if renewed is False or (renewed is None and lease is None): raise HTTPException( status_code=status.HTTP_410_GONE, detail="Lease not found - it may have expired or been revoked", )
ConcurrencyLimitWithLeaseResponse
python
doocs__leetcode
solution/1200-1299/1235.Maximum Profit in Job Scheduling/Solution2.py
{ "start": 0, "end": 409 }
class ____: def jobScheduling( self, startTime: List[int], endTime: List[int], profit: List[int] ) -> int: jobs = sorted(zip(endTime, startTime, profit)) n = len(profit) dp = [0] * (n + 1) for i, (_, s, p) in enumerate(jobs): j = bisect_right(jobs, s, hi=i, key=lambda x: x[0]) dp[i + 1] = max(dp[i], dp[j] + p) return dp[n]
Solution
python
more-itertools__more-itertools
tests/test_recipes.py
{ "start": 2936, "end": 3746 }
class ____(TestCase): """Tests for ``consume()``""" def test_sanity(self): """Test basic functionality""" r = (x for x in range(10)) mi.consume(r, 3) self.assertEqual(3, next(r)) def test_null_consume(self): """Check the null case""" r = (x for x in range(10)) mi.consume(r, 0) self.assertEqual(0, next(r)) def test_negative_consume(self): """Check that negative consumption throws an error""" r = (x for x in range(10)) self.assertRaises(ValueError, lambda: mi.consume(r, -1)) def test_total_consume(self): """Check that iterator is totally consumed by default""" r = (x for x in range(10)) mi.consume(r) self.assertRaises(StopIteration, lambda: next(r))
ConsumeTests
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/fixtures/sql.py
{ "start": 9477, "end": 14148 }
class ____: def _compare_equal(self, a, b, *, compare_values=False): a_key = a._generate_cache_key() b_key = b._generate_cache_key() if a_key is None: assert a._annotations.get("nocache"), ( "Construct doesn't cache, so test suite should " "add the 'nocache' annotation" ) assert b_key is None else: eq_(a_key.key, b_key.key) eq_(hash(a_key.key), hash(b_key.key)) for a_param, b_param in zip(a_key.bindparams, b_key.bindparams): assert a_param.compare(b_param, compare_values=compare_values) return a_key, b_key def _run_compare_fixture(self, fixture, *, compare_values=False): case_a = fixture() case_b = fixture() for a, b in itertools.combinations_with_replacement( range(len(case_a)), 2 ): if a == b: assert case_a[a].compare( case_b[b], compare_values=compare_values ) else: assert not case_a[a].compare( case_b[b], compare_values=compare_values ) def _run_cache_key_fixture(self, fixture, *, compare_values=False): case_a = fixture() case_b = fixture() for a, b in itertools.combinations_with_replacement( range(len(case_a)), 2 ): if a == b: a_key, b_key = self._compare_equal( case_a[a], case_b[b], compare_values=compare_values ) if a_key is None: continue else: a_key = case_a[a]._generate_cache_key() b_key = case_b[b]._generate_cache_key() if a_key is None or b_key is None: if a_key is None: assert case_a[a]._annotations.get("nocache") if b_key is None: assert case_b[b]._annotations.get("nocache") continue if a_key.key == b_key.key: for a_param, b_param in zip( a_key.bindparams, b_key.bindparams ): if not a_param.compare( b_param, compare_values=compare_values ): break else: # this fails unconditionally since we could not # find bound parameter values that differed. # Usually we intended to get two distinct keys here # so the failure will be more descriptive using the # ne_() assertion. ne_(a_key.key, b_key.key) else: ne_(a_key.key, b_key.key) # ClauseElement-specific test to ensure the cache key # collected all the bound parameters that aren't marked # as "literal execute" if isinstance(case_a[a], ClauseElement) and isinstance( case_b[b], ClauseElement ): assert_a_params = [] assert_b_params = [] for elem in visitors.iterate(case_a[a]): if elem.__visit_name__ == "bindparam": assert_a_params.append(elem) for elem in visitors.iterate(case_b[b]): if elem.__visit_name__ == "bindparam": assert_b_params.append(elem) # note we're asserting the order of the params as well as # if there are dupes or not. ordering has to be # deterministic and matches what a traversal would provide. eq_( sorted(a_key.bindparams, key=lambda b: b.key), sorted( util.unique_list(assert_a_params), key=lambda b: b.key ), ) eq_( sorted(b_key.bindparams, key=lambda b: b.key), sorted( util.unique_list(assert_b_params), key=lambda b: b.key ), ) def _run_cache_key_equal_fixture(self, fixture, compare_values): case_a = fixture() case_b = fixture() for a, b in itertools.combinations_with_replacement( range(len(case_a)), 2 ): self._compare_equal( case_a[a], case_b[b], compare_values=compare_values )
CacheKeyFixture
python
PrefectHQ__prefect
tests/_internal/compatibility/test_async_dispatch.py
{ "start": 3368, "end": 5017 }
class ____: async def test_method_binding_works_correctly(self): """Verify that self is properly bound for instance methods""" class Counter: def __init__(self) -> None: self.count = 0 async def increment_async(self) -> None: self.count += 1 @async_dispatch(increment_async) def increment(self) -> None: self.count += 1 counter = Counter() assert counter.count == 0 # Test sync counter.increment(_sync=True) assert counter.count == 1 # Test async await counter.increment(_sync=False) assert counter.count == 2 async def test_method_binding_respects_context(self): """Verify that methods automatically dispatch based on context""" class Counter: def __init__(self) -> None: self.count = 0 self.calls: List[str] = [] async def increment_async(self) -> None: self.calls.append("async") self.count += 1 @async_dispatch(increment_async) def increment(self) -> None: self.calls.append("sync") self.count += 1 counter = Counter() # In sync context def sync_caller() -> None: counter.increment(_sync=True) sync_caller() assert counter.calls == ["sync"] assert counter.count == 1 # In async context await counter.increment() assert counter.calls == ["sync", "async"] assert counter.count == 2
TestMethodBinding
python
PrefectHQ__prefect
src/prefect/settings/base.py
{ "start": 8746, "end": 11012 }
class ____(SettingsConfigDict, total=False): """ Configuration for the behavior of Prefect settings models. """ prefect_toml_table_header: tuple[str, ...] """ Header of the TOML table within a prefect.toml file to use when filling variables. This is supplied as a `tuple[str, ...]` instead of a `str` to accommodate for headers containing a `.`. To use the root table, exclude this config setting or provide an empty tuple. """ def _add_environment_variables( schema: dict[str, Any], model: type[PrefectBaseSettings] ) -> None: for property in schema["properties"]: env_vars: list[str] = [] schema["properties"][property]["supported_environment_variables"] = env_vars field = model.model_fields[property] if inspect.isclass(field.annotation) and issubclass( field.annotation, PrefectBaseSettings ): continue elif field.validation_alias: if isinstance(field.validation_alias, AliasChoices): for alias in field.validation_alias.choices: if isinstance(alias, str): env_vars.append(alias.upper()) else: env_vars.append(f"{model.model_config.get('env_prefix')}{property.upper()}") def build_settings_config( path: tuple[str, ...] = tuple(), frozen: bool = False ) -> PrefectSettingsConfigDict: env_prefix = f"PREFECT_{'_'.join(path).upper()}_" if path else "PREFECT_" return PrefectSettingsConfigDict( env_prefix=env_prefix, env_file=".env", extra="ignore", toml_file="prefect.toml", prefect_toml_table_header=path, pyproject_toml_table_header=("tool", "prefect", *path), json_schema_extra=_add_environment_variables, # type: ignore frozen=frozen, ) _build_settings_config = build_settings_config # noqa # TODO: remove once all usage updated def _to_environment_variable_value( value: list[object] | set[object] | tuple[object] | Any, ) -> str: if isinstance(value, (list, set, tuple)): return ",".join(str(v) for v in sorted(value, key=str)) if isinstance(value, dict): return json.dumps(value) return str(value)
PrefectSettingsConfigDict
python
facelessuser__pymdown-extensions
tests/test_extensions/test_fancylists.py
{ "start": 23805, "end": 25092 }
class ____(util.MdCase): """Test fancy lists.""" extension = ['pymdownx.fancylists', 'pymdownx.saneheaders'] extension_configs = { 'pymdownx.fancylists': { 'additional_ordered_styles': ['roman', 'generic'] } } def test_no_alpha(self): """Test lists when alphabetical lists is disabled.""" self.check_markdown( R''' i. item 1 ii. item 2 a. item 1 V. item 1 ''', R''' <ol type="i"> <li>item 1</li> <li>item 2</li> </ol> <p>a. item 1</p> <ol start="5" type="I"> <li>item 1</li> </ol> ''', True ) def test_no_alpha_force(self): """Test attempting to force alphabetical when they are disabled.""" self.check_markdown( R''' /// fancylists | type=a i. item 1 j. item 2 /// ''', R''' <p>/// fancylists | type=a</p> <ol type="i"> <li>item 1 j. item 2 ///</li> </ol> ''', True )
TestFancyListsDisableAlpha
python
python-markdown__markdown
markdown/extensions/md_in_html.py
{ "start": 18717, "end": 19056 }
class ____(RawHtmlPostprocessor): def stash_to_string(self, text: str | etree.Element) -> str: """ Override default to handle any `etree` elements still in the stash. """ if isinstance(text, etree.Element): return self.md.serializer(text) else: return str(text)
MarkdownInHTMLPostprocessor
python
networkx__networkx
networkx/classes/filters.py
{ "start": 1508, "end": 2817 }
class ____: """Filter class to show specific nodes. Attach the set of nodes as an attribute to speed up this commonly used filter Note that another allowed attribute for filters is to store the number of nodes on the filter as attribute `length` (used in `__len__`). It is a user responsibility to ensure this attribute is accurate if present. """ def __init__(self, nodes): self.nodes = set(nodes) def __call__(self, node): return node in self.nodes def show_diedges(edges): """Returns a filter function that shows specific directed edges.""" edges = {(u, v) for u, v in edges} return lambda u, v: (u, v) in edges def show_edges(edges): """Returns a filter function that shows specific undirected edges.""" alledges = set(edges) | {(v, u) for (u, v) in edges} return lambda u, v: (u, v) in alledges def show_multidiedges(edges): """Returns a filter function that shows specific multi-directed edges.""" edges = {(u, v, k) for u, v, k in edges} return lambda u, v, k: (u, v, k) in edges def show_multiedges(edges): """Returns a filter function that shows specific multi-undirected edges.""" alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} return lambda u, v, k: (u, v, k) in alledges
show_nodes
python
pandas-dev__pandas
pandas/tests/reshape/concat/test_index.py
{ "start": 6427, "end": 16861 }
class ____: def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data): frame = multiindex_dataframe_random_data index = frame.index result = concat([frame, frame], keys=[0, 1], names=["iteration"]) assert result.index.names == ("iteration",) + index.names tm.assert_frame_equal(result.loc[0], frame) tm.assert_frame_equal(result.loc[1], frame) assert result.index.nlevels == 3 def test_concat_multiindex_with_none_in_index_names(self): # GH 15787 index = MultiIndex.from_product([[1], range(5)], names=["level1", None]) df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) result = concat([df, df], keys=[1, 2], names=["level2"]) index = MultiIndex.from_product( [[1, 2], [1], range(5)], names=["level2", "level1", None] ) expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) result = concat([df, df[:2]], keys=[1, 2], names=["level2"]) level2 = [1] * 5 + [2] * 2 level1 = [1] * 7 no_name = list(range(5)) + list(range(2)) tuples = list(zip(level2, level1, no_name)) index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) tm.assert_frame_equal(result, expected) def test_concat_multiindex_rangeindex(self): # GH13542 # when multi-index levels are RangeIndex objects # there is a bug in concat with objects of len 1 df = DataFrame(np.random.default_rng(2).standard_normal((9, 2))) df.index = MultiIndex( levels=[pd.RangeIndex(3), pd.RangeIndex(3)], codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)], ) res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]]) exp = df.iloc[[2, 3, 4, 5], :] tm.assert_frame_equal(res, exp) def test_concat_multiindex_dfs_with_deepcopy(self): # GH 9967 example_multiindex1 = MultiIndex.from_product([["a"], ["b"]]) example_dataframe1 = DataFrame([0], index=example_multiindex1) example_multiindex2 = MultiIndex.from_product([["a"], ["c"]]) example_dataframe2 = DataFrame([1], index=example_multiindex2) example_dict = {"s1": example_dataframe1, "s2": example_dataframe2} expected_index = MultiIndex( levels=[["s1", "s2"], ["a"], ["b", "c"]], codes=[[0, 1], [0, 0], [0, 1]], names=["testname", None, None], ) expected = DataFrame([[0], [1]], index=expected_index) result_copy = concat(deepcopy(example_dict), names=["testname"]) tm.assert_frame_equal(result_copy, expected) result_no_copy = concat(example_dict, names=["testname"]) tm.assert_frame_equal(result_no_copy, expected) @pytest.mark.parametrize( "mi1_list", [ [["a"], range(2)], [["b"], np.arange(2.0, 4.0)], [["c"], ["A", "B"]], [["d"], pd.date_range(start="2017", end="2018", periods=2)], ], ) @pytest.mark.parametrize( "mi2_list", [ [["a"], range(2)], [["b"], np.arange(2.0, 4.0)], [["c"], ["A", "B"]], [["d"], pd.date_range(start="2017", end="2018", periods=2)], ], ) def test_concat_with_various_multiindex_dtypes( self, mi1_list: list, mi2_list: list ): # GitHub #23478 mi1 = MultiIndex.from_product(mi1_list) mi2 = MultiIndex.from_product(mi2_list) df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1) df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2) if mi1_list[0] == mi2_list[0]: expected_mi = MultiIndex( levels=[mi1_list[0], list(mi1_list[1])], codes=[[0, 0, 0, 0], [0, 1, 0, 1]], ) else: expected_mi = MultiIndex( levels=[ mi1_list[0] + mi2_list[0], list(mi1_list[1]) + list(mi2_list[1]), ], codes=[[0, 0, 1, 1], [0, 1, 2, 3]], ) expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi) with tm.assert_produces_warning(None): result_df = concat((df1, df2), axis=1) tm.assert_frame_equal(expected_df, result_df) def test_concat_multiindex_(self): # GitHub #44786 df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"]) df = concat([df], keys=["X"]) iterables = [["X"], ["1", "2", "2"]] result_index = df.index expected_index = MultiIndex.from_product(iterables) tm.assert_index_equal(result_index, expected_index) result_df = df expected_df = DataFrame( {"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables) ) tm.assert_frame_equal(result_df, expected_df) def test_concat_with_key_not_unique(self, performance_warning): # GitHub #46519 df1 = DataFrame({"name": [1]}) df2 = DataFrame({"name": [2]}) df3 = DataFrame({"name": [3]}) df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) # the warning is caused by indexing unsorted multi-index with tm.assert_produces_warning( performance_warning, match="indexing past lexsort depth" ): out_a = df_a.loc[("x", 0), :] df_b = DataFrame( {"name": [1, 2, 3]}, index=MultiIndex( levels=[["x", "y"], range(1)], codes=[[0, 1, 0], [0, 0, 0]] ), ) with tm.assert_produces_warning( performance_warning, match="indexing past lexsort depth" ): out_b = df_b.loc[("x", 0)] tm.assert_frame_equal(out_a, out_b) df1 = DataFrame({"name": ["a", "a", "b"]}) df2 = DataFrame({"name": ["a", "b"]}) df3 = DataFrame({"name": ["c", "d"]}) df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) with tm.assert_produces_warning( performance_warning, match="indexing past lexsort depth" ): out_a = df_a.loc[("x", 0), :] df_b = DataFrame( { "a": ["x", "x", "x", "y", "y", "x", "x"], "b": [0, 1, 2, 0, 1, 0, 1], "name": list("aababcd"), } ).set_index(["a", "b"]) df_b.index.names = [None, None] with tm.assert_produces_warning( performance_warning, match="indexing past lexsort depth" ): out_b = df_b.loc[("x", 0), :] tm.assert_frame_equal(out_a, out_b) def test_concat_with_duplicated_levels(self): # keyword levels should be unique df1 = DataFrame({"A": [1]}, index=["x"]) df2 = DataFrame({"A": [1]}, index=["y"]) msg = r"Level values not unique: \['x', 'y', 'y'\]" with pytest.raises(ValueError, match=msg): concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]]) @pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]]) def test_concat_with_levels_with_none_keys(self, levels): df1 = DataFrame({"A": [1]}, index=["x"]) df2 = DataFrame({"A": [1]}, index=["y"]) msg = "levels supported only when keys is not None" with pytest.raises(ValueError, match=msg): concat([df1, df2], levels=levels) def test_concat_range_index_result(self): # GH#47501 df1 = DataFrame({"a": [1, 2]}) df2 = DataFrame({"b": [1, 2]}) result = concat([df1, df2], sort=True, axis=1) expected = DataFrame({"a": [1, 2], "b": [1, 2]}) tm.assert_frame_equal(result, expected) expected_index = pd.RangeIndex(0, 2) tm.assert_index_equal(result.index, expected_index, exact=True) def test_concat_index_keep_dtype(self): # GH#47329 df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object")) df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object")) result = concat([df1, df2], ignore_index=True, join="outer", sort=True) expected = DataFrame( [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object") ) tm.assert_frame_equal(result, expected) def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype): # GH#47329 df1 = DataFrame( [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype) ) df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype)) result = concat([df1, df2], ignore_index=True, join="outer", sort=True) expected = DataFrame( [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"]) def test_concat_index_find_common(self, dtype): # GH#47329 df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype)) df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32")) result = concat([df1, df2], ignore_index=True, join="outer", sort=True) expected = DataFrame( [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32") ) tm.assert_frame_equal(result, expected) def test_concat_axis_1_sort_false_rangeindex(self, using_infer_string): # GH 46675 s1 = Series(["a", "b", "c"]) s2 = Series(["a", "b"]) s3 = Series(["a", "b", "c", "d"]) s4 = Series([], dtype=object if not using_infer_string else "str") result = concat( [s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1 ) expected = DataFrame( [ ["a"] * 3 + [np.nan], ["b"] * 3 + [np.nan], ["c", np.nan] * 2, [np.nan] * 2 + ["d"] + [np.nan], ], dtype=object if not using_infer_string else "str", ) tm.assert_frame_equal( result, expected, check_index_type=True, check_column_type=True )
TestMultiIndexConcat
python
kamyu104__LeetCode-Solutions
Python/simplify-path.py
{ "start": 29, "end": 412 }
class ____(object): # @param path, a string # @return a string def simplifyPath(self, path): stack, tokens = [], path.split("/") for token in tokens: if token == ".." and stack: stack.pop() elif token != ".." and token != "." and token: stack.append(token) return "/" + "/".join(stack)
Solution
python
streamlit__streamlit
lib/streamlit/dataframe_util.py
{ "start": 3883, "end": 4746 }
class ____(Protocol): """Protocol for DBAPI 2.0 Cursor objects (PEP 249). This is a simplified version of the DBAPI Cursor protocol that only contains the methods that are relevant or used for our DB API Integration. Specification: https://peps.python.org/pep-0249/ Inspired by: https://github.com/python/typeshed/blob/main/stdlib/_typeshed/dbapi.pyi """ @property def description( self, ) -> ( Sequence[ tuple[ str, Any | None, int | None, int | None, int | None, int | None, bool | None, ] ] | None ): ... def fetchmany(self, size: int = ..., /) -> Sequence[Sequence[Any]]: ... def fetchall(self) -> Sequence[Sequence[Any]]: ...
DBAPICursor
python
kamyu104__LeetCode-Solutions
Python/move-sub-tree-of-n-ary-tree.py
{ "start": 1880, "end": 3071 }
class ____(object): def moveSubTree(self, root, p, q): """ :type root: Node :type p: Node :type q: Node :rtype: Node """ def find_parents(node, parent, p, q, is_ancestor, lookup): if node in (p, q): lookup[node] = parent if len(lookup) == 2: return True, is_ancestor for child in node.children: found, new_is_ancestor = find_parents(child, node, p, q, is_ancestor or node == p, lookup) if found: return True, new_is_ancestor return False, False lookup = {} is_ancestor = find_parents(root, None, p, q, False, lookup)[1] if p in lookup and lookup[p] == q: return root q.children.append(p) if not is_ancestor: lookup[p].children.remove(p) else: lookup[q].children.remove(q) if p == root: root = q else: lookup[p].children[lookup[p].children.index(p)] = q return root # Time: O(n) # Space: O(h) # two pass solution without recursion
Solution_Recu
python
wandb__wandb
wandb/apis/paginator.py
{ "start": 5247, "end": 5920 }
class ____(RelayPaginator[_NodeT, _WandbT], Sized, ABC): """A Paginator for GQL nodes parsed via Pydantic, with a known total count. <!-- lazydoc-ignore-class: internal --> """ last_response: Connection[_NodeT] | None def __len__(self) -> int: """Returns the total number of objects to expect.""" # If the first page hasn't been fetched yet, do that first if self.last_response is None: self._load_page() if (conn := self.last_response) and (total := conn.total_count) is not None: return total raise NotImplementedError(f"{nameof(type(self))!r} doesn't provide length")
SizedRelayPaginator
python
pytorch__pytorch
test/distributed/fsdp/test_fsdp_fine_tune.py
{ "start": 1975, "end": 15705 }
class ____(FSDPTest): """Tests fine-tuning cases where some parameters are frozen.""" NUM_LINEARS = 6 @property def world_size(self) -> int: return min(_get_device_module(self.device_type).device_count(), 2) def _init_seq_module(self, device) -> nn.Module: torch.manual_seed(42) modules = [] for _ in range(self.NUM_LINEARS): modules += [nn.Linear(5, 5, device=device), nn.ReLU()] seq = nn.Sequential(*modules) self._set_seq_module_requires_grad(seq, False) return seq def _set_seq_module_requires_grad(self, seq: nn.Module, requires_grad: bool): # Assume that the linears are leaf modules, meaning that we can pass # `recurse=True` to have this to work for both pre/post FSDP wrapping for i in range(self.NUM_LINEARS): # Only set for every other linear to test mixing frozen/non-frozen if i % 2 == 0: for param in seq[i * 2].parameters(recurse=True): param.requires_grad = requires_grad @skip_if_lt_x_gpu(2) def test_backward_reshard_hooks(self, device): """ Tests that the post-backward reshard happens even for flat parameters that do not require gradients. """ self.run_subtests( { "device_id": [device], "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], "inp_requires_grad": [False, True], "unfreeze_params": [False, True], }, self._test_backward_reshard_hooks, ) def _test_backward_reshard_hooks( self, device_id, sharding_strategy: ShardingStrategy, use_orig_params: bool, inp_requires_grad: bool, unfreeze_params: bool, ): seq = self._init_seq_module(device_type) policy = ModuleWrapPolicy({nn.Linear}) fsdp_kwargs = {"device_id": device_type} seq = FSDP( seq, auto_wrap_policy=policy, sharding_strategy=sharding_strategy, use_orig_params=use_orig_params, **fsdp_kwargs, ) orig_post_backward_reshard = ( torch.distributed.fsdp._runtime_utils._post_backward_reshard ) post_backward_reshard_count = 0 def _post_backward_reshard_with_count(*args, **kwargs): nonlocal post_backward_reshard_count post_backward_reshard_count += 1 return orig_post_backward_reshard(*args, **kwargs) def _assert_post_backward_requires_grad(seq): if step_idx == num_steps - 1 and unfreeze_params: self.assertTrue( all(p.requires_grad for p in seq.parameters()), msg="Expected all parameters to require grad but some did not!", ) def _assert_post_backward_reshard_count(step_idx, num_steps): if step_idx < num_steps - 1 or not unfreeze_params: # If the input does not require gradient, then the 0th # frozen linear gets resharded in the catch-all reshard # since we cannot register an autograd hook on it expected_post_backward_reshard_count = ( self.NUM_LINEARS if inp_requires_grad else self.NUM_LINEARS - 1 ) else: # This follows the normal post-backward hook path expected_post_backward_reshard_count = self.NUM_LINEARS self.assertEqual( post_backward_reshard_count, expected_post_backward_reshard_count ) with mock.patch( "torch.distributed.fsdp._runtime_utils._post_backward_reshard", _post_backward_reshard_with_count, ): num_steps = 3 # interleave a `no_grad` step to validate post-backward hooks are not registered in that context # and that `requires_grad` is reset appropriately when unfreezing nograd_step_idx = 1 for step_idx in range(num_steps): if unfreeze_params and step_idx == num_steps - 1: # Unfreeze the parameters on the last step to emulate some # kinds of fine-tuning self._set_seq_module_requires_grad(seq, True) inp = torch.randn( (8, 5), device=device_type, requires_grad=inp_requires_grad ) if step_idx == nograd_step_idx: with torch.no_grad(): output = seq(inp) else: output = seq(inp) if step_idx != nograd_step_idx: output.sum().backward() _assert_post_backward_requires_grad(seq) _assert_post_backward_reshard_count(step_idx, num_steps) post_backward_reshard_count = 0 def _init_multi_traversal_module(self, device) -> nn.Module: torch.manual_seed(42) class TestModule(nn.Module): def __init__(self) -> None: super().__init__() self.layer_0 = nn.Linear(5, 5, device=device) self.layer_no_grad = nn.Linear(5, 5, device=device) self.layer_with_grad = nn.Linear(5, 5, device=device) self.layer_no_grad.requires_grad_(False) def forward(self, x): # Layer `layer_no_grad` and `layer_with_grad` are called # multiple times, IOW, their parameters are used multiple times # during forward pass. x = self.layer_0(x) for _ in range(10): x = self.layer_no_grad(self.layer_with_grad(x)) # Make sure calling the same layer multiple times works # regardless whether gradient is enabled. with torch.no_grad(): x += self.layer_with_grad(x) return x return TestModule() @skip_if_lt_x_gpu(2) def test_hooks_multi_traversal(self): """ Tests that the hooks do reshard / unshard correctly in the case of same parameters being used multiple times during forward pass. """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], "inp_requires_grad": [False, True], "forward_prefetch": [False, True], }, self._test_hooks_multi_traversal, ) def _test_hooks_multi_traversal( self, sharding_strategy: ShardingStrategy, use_orig_params: bool, inp_requires_grad: bool, forward_prefetch: bool, ): seq = self._init_multi_traversal_module(device_type.type) policy = ModuleWrapPolicy({nn.Linear}) fsdp_kwargs = {"device_id": device_type} fsdp_seq = FSDP( copy.deepcopy(seq), auto_wrap_policy=policy, sharding_strategy=sharding_strategy, use_orig_params=use_orig_params, forward_prefetch=forward_prefetch, **fsdp_kwargs, ) ddp_seq = DDP(copy.deepcopy(seq), device_ids=[device_type]) fsdp_optim = torch.optim.Adam(fsdp_seq.parameters(), lr=1e-2) ddp_optim = torch.optim.Adam(ddp_seq.parameters(), lr=1e-2) torch.manual_seed(self.rank + 1) losses = [] for _ in range(6): inp = torch.randn( (8, 5), device=device_type, requires_grad=inp_requires_grad ) for seq, optim in ((fsdp_seq, fsdp_optim), (ddp_seq, ddp_optim)): loss = seq(inp).sum() losses.append(loss) loss.backward() optim.step() optim.zero_grad() torch.testing.assert_close(losses[0], losses[1]) losses.clear() @skip_if_lt_x_gpu(2) def test_parity_with_ddp(self): """ Tests parity with DDP when mixing flat parameters that require and do not require gradients. """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], }, self._test_parity_with_ddp, ) def _test_parity_with_ddp( self, sharding_strategy: ShardingStrategy, use_orig_params: bool, ): seq = self._init_seq_module(device_type) policy = ModuleWrapPolicy({nn.Linear}) fsdp_kwargs = {"device_id": device_type} fsdp_seq = FSDP( copy.deepcopy(seq), auto_wrap_policy=policy, sharding_strategy=sharding_strategy, use_orig_params=use_orig_params, **fsdp_kwargs, ) ddp_seq = DDP(copy.deepcopy(seq), device_ids=[device_type]) fsdp_optim = torch.optim.Adam(fsdp_seq.parameters(), lr=1e-2) ddp_optim = torch.optim.Adam(ddp_seq.parameters(), lr=1e-2) torch.manual_seed(self.rank + 1) losses = [] for _ in range(6): inp = torch.randn((8, 5), device=device_type.type) for seq, optim in ((fsdp_seq, fsdp_optim), (ddp_seq, ddp_optim)): loss = seq(inp).sum() losses.append(loss) loss.backward() optim.step() optim.zero_grad() if TEST_CUDA: torch.testing.assert_close(losses[0], losses[1]) else: torch.testing.assert_close(losses[0], losses[1], atol=1e-03, rtol=1e-03) losses.clear() @skip_if_lt_x_gpu(2) def test_parity_with_non_frozen_fsdp(self, device): """ For frozen modules with unused input, reshard could happen without unshard Verify numerical parity between `_post_backward_reshard_only_hook` and `_post_backward_hook` path """ self.run_subtests( { "device_id": [device], "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ], "use_orig_params": [True, False], "offload_params": [True, False], "mixed_precision": [ MixedPrecision(), MixedPrecision( param_dtype=torch.float16, buffer_dtype=torch.float16, reduce_dtype=torch.float16, ), ], "backward_prefetch": [ BackwardPrefetch.BACKWARD_PRE, BackwardPrefetch.BACKWARD_POST, ], }, self._test_parity_with_non_frozen_fsdp, ) def _test_parity_with_non_frozen_fsdp( self, device_id, sharding_strategy: ShardingStrategy, use_orig_params: bool, offload_params: bool, mixed_precision: MixedPrecision, backward_prefetch: BackwardPrefetch, ): torch.manual_seed(42) model = ModelUnusedInput(freeze=True).to(device_type) torch.manual_seed(42) ref_model = ModelUnusedInput(freeze=False).to(device_type) fsdp_kwargs = { "device_id": device_type, "auto_wrap_policy": ModuleWrapPolicy({LinearUnusedInput}), "sharding_strategy": sharding_strategy, "use_orig_params": use_orig_params, "cpu_offload": CPUOffload(offload_params=offload_params), "mixed_precision": mixed_precision, "backward_prefetch": backward_prefetch, } model = FSDP(model, **fsdp_kwargs) ref_model = FSDP(ref_model, **fsdp_kwargs) model_optim = torch.optim.Adam(model.parameters(), lr=1e-2) ref_model_optim = torch.optim.Adam( [ param for name, param in ref_model.named_parameters() if not name.startswith("_fsdp_wrapped_module.layer1_frozen") ], lr=1e-2, ) torch.manual_seed(self.rank + 1) losses = [] for _ in range(6): frozen_input = torch.randn((4, 4), device=device_type, requires_grad=False) for _model, _optim in ((model, model_optim), (ref_model, ref_model_optim)): loss = _model(frozen_input, frozen_input).sum() losses.append(loss) loss.backward() _optim.step() _optim.zero_grad() self.assertEqual(losses[0], losses[1]) losses.clear() with FSDP.summon_full_params(model): with FSDP.summon_full_params(ref_model): for param, ref_param in zip(model.parameters(), ref_model.parameters()): self.assertEqual(param, ref_param) devices = ("cuda", "hpu", "xpu") instantiate_device_type_tests( TestFSDPFineTune, globals(), only_for=devices, allow_xpu=True ) if __name__ == "__main__": run_tests()
TestFSDPFineTune
python
cherrypy__cherrypy
cherrypy/lib/reprconf.py
{ "start": 3935, "end": 5138 }
class ____(dict): """A dict-like set of configuration data, with defaults and namespaces. May take a file, filename, or dict. """ defaults = {} environments = {} namespaces = NamespaceSet() def __init__(self, file=None, **kwargs): """Initialize a CherryPy :class:`Config`.""" self.reset() if file is not None: self.update(file) if kwargs: self.update(kwargs) def reset(self): """Reset self to default values.""" self.clear() dict.update(self, self.defaults) def update(self, config): """Update self from a dict, file, or filename.""" self._apply(Parser.load(config)) def _apply(self, config): """Update self from a dict.""" which_env = config.get('environment') if which_env: env = self.environments[which_env] for k in env: if k not in config: config[k] = env[k] dict.update(self, config) self.namespaces(config) def __setitem__(self, k, v): """Assign a config setting.""" dict.__setitem__(self, k, v) self.namespaces({k: v})
Config
python
facebook__pyre-check
client/language_server/protocol.py
{ "start": 11050, "end": 11538 }
class ____(json_mixins.CamlCaseAndExcludeJsonMixin): capabilities: ClientCapabilities process_id: Optional[int] = None client_info: Optional[Info] = None initialization_options: Optional[InitializationOptions] = None @staticmethod def from_json_rpc_parameters( parameters: json_rpc.Parameters, ) -> "InitializeParameters": return _parse_parameters(parameters, target=InitializeParameters) @dataclasses.dataclass(frozen=True)
InitializeParameters
python
django__django
django/utils/datastructures.py
{ "start": 7409, "end": 8376 }
class ____(dict): """ Wrap accesses to a dictionary so that certain values (those starting with the specified prefix) are passed through a function before being returned. The prefix is removed before looking up the real value. Used by the SQL construction code to ensure that values are correctly quoted before being used. """ def __init__(self, data, func, prefix): super().__init__(data) self.func = func self.prefix = prefix def __getitem__(self, key): """ Retrieve the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. """ use_func = key.startswith(self.prefix) key = key.removeprefix(self.prefix) value = super().__getitem__(key) if use_func: return self.func(value) return value
DictWrapper
python
allegroai__clearml
clearml/backend_api/services/v2_23/events.py
{ "start": 54151, "end": 56384 }
class ____(Response): """ Response of events.add_batch endpoint. :param added: :type added: int :param errors: :type errors: int :param errors_info: :type errors_info: dict """ _service = "events" _action = "add_batch" _version = "2.23" _schema = { "definitions": {}, "properties": { "added": {"type": ["integer", "null"]}, "errors": {"type": ["integer", "null"]}, "errors_info": {"type": ["object", "null"]}, }, "type": "object", } def __init__( self, added: Optional[int] = None, errors: Optional[int] = None, errors_info: Optional[dict] = None, **kwargs: Any ) -> None: super(AddBatchResponse, self).__init__(**kwargs) self.added = added self.errors = errors self.errors_info = errors_info @schema_property("added") def added(self) -> Optional[int]: return self._property_added @added.setter def added(self, value: Optional[int]) -> None: if value is None: self._property_added = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "added", six.integer_types) self._property_added = value @schema_property("errors") def errors(self) -> Optional[int]: return self._property_errors @errors.setter def errors(self, value: Optional[int]) -> None: if value is None: self._property_errors = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "errors", six.integer_types) self._property_errors = value @schema_property("errors_info") def errors_info(self) -> Optional[dict]: return self._property_errors_info @errors_info.setter def errors_info(self, value: Optional[dict]) -> None: if value is None: self._property_errors_info = None return self.assert_isinstance(value, "errors_info", (dict,)) self._property_errors_info = value
AddBatchResponse
python
zarr-developers__zarr-python
tests/test_dtype/test_wrapper.py
{ "start": 282, "end": 747 }
class ____: # subclasses define the URL for the schema, if available schema_url: ClassVar[str] = "" @pytest.fixture(scope="class") def get_schema(self) -> object: response = requests.get(self.schema_url) response.raise_for_status() return json_schema.loads(response.text) def test_schema(self, schema: json_schema.Schema) -> None: assert schema.is_valid(self.test_cls.to_json(zarr_format=2)) """
_TestZDTypeSchema
python
getsentry__sentry
src/sentry/preprod/migrations/0015_add_preprod_artifact_size_metrics_identifier.py
{ "start": 155, "end": 2351 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("preprod", "0014_commitcomparisons_fk"), ] operations = [ migrations.AlterUniqueTogether( name="preprodartifactsizemetrics", unique_together=set(), ), migrations.AddField( model_name="preprodartifactsizemetrics", name="identifier", field=models.CharField(max_length=255, null=True), ), migrations.AddConstraint( model_name="preprodartifactsizemetrics", constraint=models.UniqueConstraint( condition=models.Q(("identifier__isnull", False)), fields=("preprod_artifact", "metrics_artifact_type", "identifier"), name="preprod_artifact_size_metrics_unique", ), ), migrations.AddConstraint( model_name="preprodartifactsizemetrics", constraint=models.UniqueConstraint( condition=models.Q(("identifier__isnull", True)), fields=("preprod_artifact", "metrics_artifact_type"), name="preprod_artifact_size_metrics_unique_no_identifier", ), ), ]
Migration
python
arrow-py__arrow
tests/test_locales.py
{ "start": 134972, "end": 136140 }
class ____: def test_plurals(self): assert self.locale._format_timeframe("now", 0) == "just nu" assert self.locale._format_timeframe("second", 1) == "en sekund" assert self.locale._format_timeframe("seconds", 30) == "30 sekunder" assert self.locale._format_timeframe("minute", 1) == "en minut" assert self.locale._format_timeframe("minutes", 40) == "40 minuter" assert self.locale._format_timeframe("hour", 1) == "en timme" assert self.locale._format_timeframe("hours", 23) == "23 timmar" assert self.locale._format_timeframe("day", 1) == "en dag" assert self.locale._format_timeframe("days", 12) == "12 dagar" assert self.locale._format_timeframe("week", 1) == "en vecka" assert self.locale._format_timeframe("weeks", 38) == "38 veckor" assert self.locale._format_timeframe("month", 1) == "en månad" assert self.locale._format_timeframe("months", 11) == "11 månader" assert self.locale._format_timeframe("year", 1) == "ett år" assert self.locale._format_timeframe("years", 12) == "12 år" @pytest.mark.usefixtures("lang_locale")
TestSwedishLocale
python
walkccc__LeetCode
solutions/2275. Largest Combination With Bitwise AND Greater Than Zero/2275.py
{ "start": 0, "end": 149 }
class ____: def largestCombination(self, candidates: list[int]) -> int: return max(sum(c >> i & 1 for c in candidates) for i in range(24))
Solution
python
langchain-ai__langchain
libs/core/langchain_core/output_parsers/openai_tools.py
{ "start": 4103, "end": 6915 }
class ____(BaseCumulativeTransformOutputParser[Any]): """Parse tools from OpenAI response.""" strict: bool = False """Whether to allow non-JSON-compliant strings. See: https://docs.python.org/3/library/json.html#encoders-and-decoders Useful when the parsed output may include unicode characters or new lines. """ return_id: bool = False """Whether to return the tool call id.""" first_tool_only: bool = False """Whether to return only the first tool call. If `False`, the result will be a list of tool calls, or an empty list if no tool calls are found. If true, and multiple tool calls are found, only the first one will be returned, and the other tool calls will be ignored. If no tool calls are found, None will be returned. """ def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: """Parse the result of an LLM call to a list of tool calls. Args: result: The result of the LLM call. partial: Whether to parse partial JSON. If `True`, the output will be a JSON object containing all the keys that have been returned so far. If `False`, the output will be the full JSON object. Returns: The parsed tool calls. Raises: OutputParserException: If the output is not valid JSON. """ generation = result[0] if not isinstance(generation, ChatGeneration): msg = "This output parser can only be used with a chat generation." raise OutputParserException(msg) message = generation.message if isinstance(message, AIMessage) and message.tool_calls: tool_calls = [dict(tc) for tc in message.tool_calls] for tool_call in tool_calls: if not self.return_id: _ = tool_call.pop("id") else: try: raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"]) except KeyError: return [] tool_calls = parse_tool_calls( raw_tool_calls, partial=partial, strict=self.strict, return_id=self.return_id, ) # for backwards compatibility for tc in tool_calls: tc["type"] = tc.pop("name") if self.first_tool_only: return tool_calls[0] if tool_calls else None return tool_calls def parse(self, text: str) -> Any: """Parse the output of an LLM call to a list of tool calls. Args: text: The output of the LLM call. Returns: The parsed tool calls. """ raise NotImplementedError
JsonOutputToolsParser
python
sympy__sympy
sympy/functions/combinatorial/numbers.py
{ "start": 14802, "end": 21905 }
class ____(DefinedFunction): r""" Bernoulli numbers / Bernoulli polynomials / Bernoulli function The Bernoulli numbers are a sequence of rational numbers defined by `B_0 = 1` and the recursive relation (`n > 0`): .. math :: n+1 = \sum_{k=0}^n \binom{n+1}{k} B_k They are also commonly defined by their exponential generating function, which is `\frac{x}{1 - e^{-x}}`. For odd indices > 1, the Bernoulli numbers are zero. The Bernoulli polynomials satisfy the analogous formula: .. math :: B_n(x) = \sum_{k=0}^n (-1)^k \binom{n}{k} B_k x^{n-k} Bernoulli numbers and Bernoulli polynomials are related as `B_n(1) = B_n`. The generalized Bernoulli function `\operatorname{B}(s, a)` is defined for any complex `s` and `a`, except where `a` is a nonpositive integer and `s` is not a nonnegative integer. It is an entire function of `s` for fixed `a`, related to the Hurwitz zeta function by .. math:: \operatorname{B}(s, a) = \begin{cases} -s \zeta(1-s, a) & s \ne 0 \\ 1 & s = 0 \end{cases} When `s` is a nonnegative integer this function reduces to the Bernoulli polynomials: `\operatorname{B}(n, x) = B_n(x)`. When `a` is omitted it is assumed to be 1, yielding the (ordinary) Bernoulli function which interpolates the Bernoulli numbers and is related to the Riemann zeta function. We compute Bernoulli numbers using Ramanujan's formula: .. math :: B_n = \frac{A(n) - S(n)}{\binom{n+3}{n}} where: .. math :: A(n) = \begin{cases} \frac{n+3}{3} & n \equiv 0\ \text{or}\ 2 \pmod{6} \\ -\frac{n+3}{6} & n \equiv 4 \pmod{6} \end{cases} and: .. math :: S(n) = \sum_{k=1}^{[n/6]} \binom{n+3}{n-6k} B_{n-6k} This formula is similar to the sum given in the definition, but cuts `\frac{2}{3}` of the terms. For Bernoulli polynomials, we use Appell sequences. For `n` a nonnegative integer and `s`, `a`, `x` arbitrary complex numbers, * ``bernoulli(n)`` gives the nth Bernoulli number, `B_n` * ``bernoulli(s)`` gives the Bernoulli function `\operatorname{B}(s)` * ``bernoulli(n, x)`` gives the nth Bernoulli polynomial in `x`, `B_n(x)` * ``bernoulli(s, a)`` gives the generalized Bernoulli function `\operatorname{B}(s, a)` .. versionchanged:: 1.12 ``bernoulli(1)`` gives `+\frac{1}{2}` instead of `-\frac{1}{2}`. This choice of value confers several theoretical advantages [5]_, including the extension to complex parameters described above which this function now implements. The previous behavior, defined only for nonnegative integers `n`, can be obtained with ``(-1)**n*bernoulli(n)``. Examples ======== >>> from sympy import bernoulli >>> from sympy.abc import x >>> [bernoulli(n) for n in range(11)] [1, 1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66] >>> bernoulli(1000001) 0 >>> bernoulli(3, x) x**3 - 3*x**2/2 + x/2 See Also ======== andre, bell, catalan, euler, fibonacci, harmonic, lucas, genocchi, partition, tribonacci, sympy.polys.appellseqs.bernoulli_poly References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_number .. [2] https://en.wikipedia.org/wiki/Bernoulli_polynomial .. [3] https://mathworld.wolfram.com/BernoulliNumber.html .. [4] https://mathworld.wolfram.com/BernoulliPolynomial.html .. [5] Peter Luschny, "The Bernoulli Manifesto", https://luschny.de/math/zeta/The-Bernoulli-Manifesto.html .. [6] Peter Luschny, "An introduction to the Bernoulli function", https://arxiv.org/abs/2009.06743 """ args: tuple[Integer] # Calculates B_n for positive even n @staticmethod def _calc_bernoulli(n): s = 0 a = int(binomial(n + 3, n - 6)) for j in range(1, n//6 + 1): s += a * bernoulli(n - 6*j) # Avoid computing each binomial coefficient from scratch a *= _product(n - 6 - 6*j + 1, n - 6*j) a //= _product(6*j + 4, 6*j + 9) if n % 6 == 4: s = -Rational(n + 3, 6) - s else: s = Rational(n + 3, 3) - s return s / binomial(n + 3, n) # We implement a specialized memoization scheme to handle each # case modulo 6 separately _cache = {0: S.One, 1: Rational(1, 2), 2: Rational(1, 6), 4: Rational(-1, 30)} _highest = {0: 0, 1: 1, 2: 2, 4: 4} @classmethod def eval(cls, n, x=None): if x is S.One: return cls(n) elif n.is_zero: return S.One elif n.is_integer is False or n.is_nonnegative is False: if x is not None and x.is_Integer and x.is_nonpositive: return S.NaN return # Bernoulli numbers elif x is None: if n is S.One: return S.Half elif n.is_odd and (n-1).is_positive: return S.Zero elif n.is_Number: n = int(n) # Use mpmath for enormous Bernoulli numbers if n > 500: p, q = mp.bernfrac(n) return Rational(int(p), int(q)) case = n % 6 highest_cached = cls._highest[case] if n <= highest_cached: return cls._cache[n] # To avoid excessive recursion when, say, bernoulli(1000) is # requested, calculate and cache the entire sequence ... B_988, # B_994, B_1000 in increasing order for i in range(highest_cached + 6, n + 6, 6): b = cls._calc_bernoulli(i) cls._cache[i] = b cls._highest[case] = i return b # Bernoulli polynomials elif n.is_Number: return bernoulli_poly(n, x) def _eval_rewrite_as_zeta(self, n, x=1, **kwargs): from sympy.functions.special.zeta_functions import zeta return Piecewise((1, Eq(n, 0)), (-n * zeta(1-n, x), True)) def _eval_evalf(self, prec): if not all(x.is_number for x in self.args): return n = self.args[0]._to_mpmath(prec) x = (self.args[1] if len(self.args) > 1 else S.One)._to_mpmath(prec) with workprec(prec): if n == 0: res = mp.mpf(1) elif n == 1: res = x - mp.mpf(0.5) elif mp.isint(n) and n >= 0: res = mp.bernoulli(n) if x == 1 else mp.bernpoly(n, x) else: res = -n * mp.zeta(1-n, x) return Expr._from_mpmath(res, prec) #----------------------------------------------------------------------------# # # # Bell numbers # # # #----------------------------------------------------------------------------#
bernoulli
python
django__django
tests/null_fk/tests.py
{ "start": 156, "end": 2952 }
class ____(TestCase): def test_null_fk(self): d = SystemDetails.objects.create(details="First details") s = SystemInfo.objects.create(system_name="First forum", system_details=d) f = Forum.objects.create(system_info=s, forum_name="First forum") p = Post.objects.create(forum=f, title="First Post") c1 = Comment.objects.create(post=p, comment_text="My first comment") c2 = Comment.objects.create(comment_text="My second comment") # Starting from comment, make sure that a .select_related(...) with a # specified set of fields will properly LEFT JOIN multiple levels of # NULLs (and the things that come after the NULLs, or else data that # should exist won't). Regression test for #7369. c = Comment.objects.select_related().get(id=c1.id) self.assertEqual(c.post, p) self.assertIsNone(Comment.objects.select_related().get(id=c2.id).post) self.assertQuerySetEqual( Comment.objects.select_related("post__forum__system_info").all(), [ (c1.id, "My first comment", "<Post: First Post>"), (c2.id, "My second comment", "None"), ], transform=lambda c: (c.id, c.comment_text, repr(c.post)), ) # Regression test for #7530, #7716. self.assertIsNone( Comment.objects.select_related("post").filter(post__isnull=True)[0].post ) self.assertQuerySetEqual( Comment.objects.select_related("post__forum__system_info__system_details"), [ (c1.id, "My first comment", "<Post: First Post>"), (c2.id, "My second comment", "None"), ], transform=lambda c: (c.id, c.comment_text, repr(c.post)), ) def test_combine_isnull(self): item = Item.objects.create(title="Some Item") pv = PropertyValue.objects.create(label="Some Value") item.props.create(key="a", value=pv) item.props.create(key="b") # value=NULL q1 = Q(props__key="a", props__value=pv) q2 = Q(props__key="b", props__value__isnull=True) # Each of these individually should return the item. self.assertEqual(Item.objects.get(q1), item) self.assertEqual(Item.objects.get(q2), item) # Logically, qs1 and qs2, and qs3 and qs4 should be the same. qs1 = Item.objects.filter(q1) & Item.objects.filter(q2) qs2 = Item.objects.filter(q2) & Item.objects.filter(q1) qs3 = Item.objects.filter(q1) | Item.objects.filter(q2) qs4 = Item.objects.filter(q2) | Item.objects.filter(q1) # Regression test for #15823. self.assertEqual(list(qs1), list(qs2)) self.assertEqual(list(qs3), list(qs4))
NullFkTests
python
conda__conda
tests/shell/test_shell.py
{ "start": 2571, "end": 6442 }
class ____: name: str prefix: Path | None = None paths: tuple[Path, ...] | None = None def __post_init__(self): if self.paths is None: paths = [ self.prefix / "Scripts" / "small.bat" if on_win else None, self.prefix / "bin" / "small", ] self.paths = filter(Path.exists, filter(None, paths)) @pytest.fixture(scope="module") def stacking_envs(session_tmp_env: TmpEnvFixture) -> dict[str, Env]: # create envs using full path to avoid solver path = TEST_RECIPES_CHANNEL / "noarch" / "small-executable-1.0.0-0.tar.bz2" with ( session_tmp_env(path) as base_env, session_tmp_env(path) as has_env, # use --offline for empty env to avoid HTTP hit session_tmp_env("--offline") as not_env, ): return { "sys": Env("sys", paths=()), "base": Env("base", prefix=base_env), "has": Env("has", prefix=has_env), "not": Env("not", prefix=not_env), } @pytest.fixture def stack(request: FixtureRequest, stacking_envs: dict[str, Env]) -> tuple[Env, ...]: envs = request.param.split(",") if request.param else () return tuple(stacking_envs[env] for env in envs) @pytest.fixture def run(request: FixtureRequest, stacking_envs: dict[str, Env]) -> Env: return stacking_envs[request.param] @pytest.fixture def expected(request: FixtureRequest, stacking_envs: dict[str, Env]) -> tuple[Env, ...]: envs = request.param.split(",") if request.param else () return tuple(stacking_envs[env] for env in envs) # TODO: test stacking on all shells # see https://github.com/conda/conda/pull/11257#issuecomment-1050531320 @pytest.mark.parametrize( "auto_stack,stack,run,expected", [ # no environments activated (0, None, "base", "base,sys"), (0, None, "has", "has,sys"), (0, None, "not", "sys"), # one environment activated, no stacking (0, "base", "base", "base,sys"), (0, "base", "has", "has,sys"), (0, "base", "not", "sys"), (0, "has", "base", "base,sys"), (0, "has", "has", "has,sys"), (0, "has", "not", "sys"), (0, "not", "base", "base,sys"), (0, "not", "has", "has,sys"), (0, "not", "not", "sys"), # one environment activated, stacking allowed (5, "base", "base", "base,sys"), (5, "base", "has", "has,base,sys"), (5, "base", "not", "base,sys"), (5, "has", "base", "base,has,sys"), (5, "has", "has", "has,sys"), (5, "has", "not", "has,sys"), (5, "not", "base", "base,sys"), (5, "not", "has", "has,sys"), (5, "not", "not", "sys"), # two environments activated, stacking allowed (5, "base,has", "base", "base,has,sys" if on_win else "base,has,base,sys"), (5, "base,has", "has", "has,base,sys"), (5, "base,has", "not", "has,base,sys"), (5, "base,not", "base", "base,sys" if on_win else "base,base,sys"), (5, "base,not", "has", "has,base,sys"), (5, "base,not", "not", "base,sys"), ], indirect=["stack", "run", "expected"], ) def test_stacking( auto_stack: int, stack: tuple[Env, ...], run: Env, expected: tuple[Env, ...], shell: Shell, ) -> None: which = f"{'where' if on_win else 'which -a'} small" with shell.interactive(env={"CONDA_AUTO_STACK": str(auto_stack)}) as sh: for env in stack: sh.sendline(f'conda activate "{env.prefix}"') sh.clear() sh.sendline(f'conda run --prefix="{run.prefix}" --dev {which}') if not expected: sh.expect_exact(f"'conda run {which}' failed") else: for env in expected: for path in env.paths: sh.expect_exact(str(path)) sh.clear()
Env
python
pypa__pipenv
pipenv/patched/pip/_vendor/requests/models.py
{ "start": 9566, "end": 21090 }
class ____(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, containing the exact bytes that will be sent to the server. Instances are generated from a :class:`Request <Request>` object, and should not be instantiated manually; doing so may produce undesirable effects. Usage:: >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> r = req.prepare() >>> r <PreparedRequest [GET]> >>> s = requests.Session() >>> s.send(r) <Response [200]> """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() #: integer denoting starting position of a readable file-like body. self._body_position = None def prepare( self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None, ): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return f"<PreparedRequest [{self.method}]>" def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks p._body_position = self._body_position return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = to_native_string(self.method.upper()) @staticmethod def _get_idna_encoded_host(host): from pipenv.patched.pip._vendor import idna try: host = idna.encode(host, uts46=True).decode("utf-8") except idna.IDNAError: raise UnicodeError return host def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindly call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): url = url.decode("utf8") else: url = str(url) # Remove leading whitespaces from url url = url.lstrip() # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ":" in url and not url.lower().startswith("http"): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: raise MissingSchema( f"Invalid URL {url!r}: No scheme supplied. " f"Perhaps you meant https://{url}?" ) if not host: raise InvalidURL(f"Invalid URL {url!r}: No host supplied") # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA # behaviour. For strings containing only ASCII characters, we need to also verify # it doesn't start with a wildcard (*), before allowing the unencoded hostname. if not unicode_is_ascii(host): try: host = self._get_idna_encoded_host(host) except UnicodeError: raise InvalidURL("URL has an invalid label.") elif host.startswith(("*", ".")): raise InvalidURL("URL has an invalid label.") # Carefully reconstruct the network location netloc = auth or "" if netloc: netloc += "@" netloc += host if port: netloc += f":{port}" # Bare domains aren't valid URLs. if not path: path = "/" if isinstance(params, (str, bytes)): params = to_native_string(params) enc_params = self._encode_params(params) if enc_params: if query: query = f"{query}&{enc_params}" else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" self.headers = CaseInsensitiveDict() if headers: for header in headers.items(): # Raise exception on invalid header value. check_header_validity(header) name, value = header self.headers[to_native_string(name)] = value def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. content_type = "application/json" try: body = complexjson.dumps(json, allow_nan=False) except ValueError as ve: raise InvalidJSONError(ve, request=self) if not isinstance(body, bytes): body = body.encode("utf-8") is_stream = all( [ hasattr(data, "__iter__"), not isinstance(data, (basestring, list, tuple, Mapping)), ] ) if is_stream: try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None body = data if getattr(body, "tell", None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() except OSError: # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: raise NotImplementedError( "Streamed bodies and files are mutually exclusive." ) if length: self.headers["Content-Length"] = builtin_str(length) else: self.headers["Transfer-Encoding"] = "chunked" else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, "read"): content_type = None else: content_type = "application/x-www-form-urlencoded" self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ("content-type" not in self.headers): self.headers["Content-Type"] = content_type self.body = body def prepare_content_length(self, body): """Prepare Content-Length header based on request method and body""" if body is not None: length = super_len(body) if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. self.headers["Content-Length"] = builtin_str(length) elif ( self.method not in ("GET", "HEAD") and self.headers.get("Content-Length") is None ): # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) self.headers["Content-Length"] = "0" def prepare_auth(self, auth, url=""): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. """ if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers["Cookie"] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event])
PreparedRequest
python
python__mypy
mypy/types.py
{ "start": 73941, "end": 97301 }
class ____(FunctionLike): """Type of a non-overloaded callable object (such as function).""" __slots__ = ( "arg_types", # Types of function arguments "arg_kinds", # ARG_ constants "arg_names", # Argument names; None if not a keyword argument "min_args", # Minimum number of arguments; derived from arg_kinds "ret_type", # Return value type "name", # Name (may be None; for error messages and plugins) "definition", # For error messages. May be None. "variables", # Type variables for a generic function "is_ellipsis_args", # Is this Callable[..., t] (with literal '...')? "implicit", # Was this type implicitly generated instead of explicitly # specified by the user? "special_sig", # Non-None for signatures that require special handling # (currently only values are 'dict' for a signature similar to # 'dict' and 'partial' for a `functools.partial` evaluation) "from_type_type", # Was this callable generated by analyzing Type[...] # instantiation? "is_bound", # Is this a bound method? "type_guard", # T, if -> TypeGuard[T] (ret_type is bool in this case). "type_is", # T, if -> TypeIs[T] (ret_type is bool in this case). "from_concatenate", # whether this callable is from a concatenate object # (this is used for error messages) "imprecise_arg_kinds", "unpack_kwargs", # Was an Unpack[...] with **kwargs used to define this callable? ) def __init__( self, # maybe this should be refactored to take a Parameters object arg_types: Sequence[Type], arg_kinds: list[ArgKind], arg_names: Sequence[str | None], ret_type: Type, fallback: Instance, name: str | None = None, definition: SymbolNode | None = None, variables: Sequence[TypeVarLikeType] | None = None, line: int = -1, column: int = -1, is_ellipsis_args: bool = False, implicit: bool = False, special_sig: str | None = None, from_type_type: bool = False, is_bound: bool = False, type_guard: Type | None = None, type_is: Type | None = None, from_concatenate: bool = False, imprecise_arg_kinds: bool = False, unpack_kwargs: bool = False, ) -> None: super().__init__(line, column) assert len(arg_types) == len(arg_kinds) == len(arg_names) self.arg_types = list(arg_types) for t in self.arg_types: if isinstance(t, ParamSpecType): assert not t.prefix.arg_types # TODO: should we assert that only ARG_STAR contain ParamSpecType? # See testParamSpecJoin, that relies on passing e.g `P.args` as plain argument. self.arg_kinds = arg_kinds self.arg_names = list(arg_names) self.min_args = arg_kinds.count(ARG_POS) self.ret_type = ret_type self.fallback = fallback assert not name or "<bound method" not in name self.name = name # The rules for what exactly is considered a definition: # * If it is a non-decorated function, FuncDef is the definition # * If it is a decorated function, enclosing Decorator is the definition self.definition = definition self.variables: tuple[TypeVarLikeType, ...] if variables is None: self.variables = () else: self.variables = tuple(variables) self.is_ellipsis_args = is_ellipsis_args self.implicit = implicit self.special_sig = special_sig self.from_type_type = from_type_type self.from_concatenate = from_concatenate self.imprecise_arg_kinds = imprecise_arg_kinds self.is_bound = is_bound self.type_guard = type_guard self.type_is = type_is self.unpack_kwargs = unpack_kwargs def copy_modified( self: CT, arg_types: Bogus[Sequence[Type]] = _dummy, arg_kinds: Bogus[list[ArgKind]] = _dummy, arg_names: Bogus[Sequence[str | None]] = _dummy, ret_type: Bogus[Type] = _dummy, fallback: Bogus[Instance] = _dummy, name: Bogus[str | None] = _dummy, definition: Bogus[SymbolNode] = _dummy, variables: Bogus[Sequence[TypeVarLikeType]] = _dummy, line: int = _dummy_int, column: int = _dummy_int, is_ellipsis_args: Bogus[bool] = _dummy, implicit: Bogus[bool] = _dummy, special_sig: Bogus[str | None] = _dummy, from_type_type: Bogus[bool] = _dummy, is_bound: Bogus[bool] = _dummy, type_guard: Bogus[Type | None] = _dummy, type_is: Bogus[Type | None] = _dummy, from_concatenate: Bogus[bool] = _dummy, imprecise_arg_kinds: Bogus[bool] = _dummy, unpack_kwargs: Bogus[bool] = _dummy, ) -> CT: modified = CallableType( arg_types=arg_types if arg_types is not _dummy else self.arg_types, arg_kinds=arg_kinds if arg_kinds is not _dummy else self.arg_kinds, arg_names=arg_names if arg_names is not _dummy else self.arg_names, ret_type=ret_type if ret_type is not _dummy else self.ret_type, fallback=fallback if fallback is not _dummy else self.fallback, name=name if name is not _dummy else self.name, definition=definition if definition is not _dummy else self.definition, variables=variables if variables is not _dummy else self.variables, line=line if line != _dummy_int else self.line, column=column if column != _dummy_int else self.column, is_ellipsis_args=( is_ellipsis_args if is_ellipsis_args is not _dummy else self.is_ellipsis_args ), implicit=implicit if implicit is not _dummy else self.implicit, special_sig=special_sig if special_sig is not _dummy else self.special_sig, from_type_type=from_type_type if from_type_type is not _dummy else self.from_type_type, is_bound=is_bound if is_bound is not _dummy else self.is_bound, type_guard=type_guard if type_guard is not _dummy else self.type_guard, type_is=type_is if type_is is not _dummy else self.type_is, from_concatenate=( from_concatenate if from_concatenate is not _dummy else self.from_concatenate ), imprecise_arg_kinds=( imprecise_arg_kinds if imprecise_arg_kinds is not _dummy else self.imprecise_arg_kinds ), unpack_kwargs=unpack_kwargs if unpack_kwargs is not _dummy else self.unpack_kwargs, ) # Optimization: Only NewTypes are supported as subtypes since # the class is effectively final, so we can use a cast safely. return cast(CT, modified) def var_arg(self) -> FormalArgument | None: """The formal argument for *args.""" for position, (type, kind) in enumerate(zip(self.arg_types, self.arg_kinds)): if kind == ARG_STAR: return FormalArgument(None, position, type, False) return None def kw_arg(self) -> FormalArgument | None: """The formal argument for **kwargs.""" for position, (type, kind) in enumerate(zip(self.arg_types, self.arg_kinds)): if kind == ARG_STAR2: return FormalArgument(None, position, type, False) return None @property def is_var_arg(self) -> bool: """Does this callable have a *args argument?""" return ARG_STAR in self.arg_kinds @property def is_kw_arg(self) -> bool: """Does this callable have a **kwargs argument?""" return ARG_STAR2 in self.arg_kinds def is_type_obj(self) -> bool: return self.fallback.type.is_metaclass() and not isinstance( get_proper_type(self.ret_type), UninhabitedType ) def type_object(self) -> mypy.nodes.TypeInfo: assert self.is_type_obj() ret = get_proper_type(self.ret_type) if isinstance(ret, TypeVarType): ret = get_proper_type(ret.upper_bound) if isinstance(ret, TupleType): ret = ret.partial_fallback if isinstance(ret, TypedDictType): ret = ret.fallback if isinstance(ret, LiteralType): ret = ret.fallback assert isinstance(ret, Instance) return ret.type def accept(self, visitor: TypeVisitor[T]) -> T: return visitor.visit_callable_type(self) def with_name(self, name: str) -> CallableType: """Return a copy of this type with the specified name.""" return self.copy_modified(ret_type=self.ret_type, name=name) def get_name(self) -> str | None: return self.name def max_possible_positional_args(self) -> int: """Returns maximum number of positional arguments this method could possibly accept. This takes into account *arg and **kwargs but excludes keyword-only args.""" if self.is_var_arg or self.is_kw_arg: return sys.maxsize return sum(kind.is_positional() for kind in self.arg_kinds) def formal_arguments(self, include_star_args: bool = False) -> list[FormalArgument]: """Return a list of the formal arguments of this callable, ignoring *arg and **kwargs. To handle *args and **kwargs, use the 'callable.var_args' and 'callable.kw_args' fields, if they are not None. If you really want to include star args in the yielded output, set the 'include_star_args' parameter to 'True'.""" args = [] done_with_positional = False for i in range(len(self.arg_types)): kind = self.arg_kinds[i] if kind.is_named() or kind.is_star(): done_with_positional = True if not include_star_args and kind.is_star(): continue required = kind.is_required() pos = None if done_with_positional else i arg = FormalArgument(self.arg_names[i], pos, self.arg_types[i], required) args.append(arg) return args def argument_by_name(self, name: str | None) -> FormalArgument | None: if name is None: return None seen_star = False for i, (arg_name, kind, typ) in enumerate( zip(self.arg_names, self.arg_kinds, self.arg_types) ): # No more positional arguments after these. if kind.is_named() or kind.is_star(): seen_star = True if kind.is_star(): continue if arg_name == name: position = None if seen_star else i return FormalArgument(name, position, typ, kind.is_required()) return self.try_synthesizing_arg_from_kwarg(name) def argument_by_position(self, position: int | None) -> FormalArgument | None: if position is None: return None if position >= len(self.arg_names): return self.try_synthesizing_arg_from_vararg(position) name, kind, typ = ( self.arg_names[position], self.arg_kinds[position], self.arg_types[position], ) if kind.is_positional(): return FormalArgument(name, position, typ, kind == ARG_POS) else: return self.try_synthesizing_arg_from_vararg(position) def try_synthesizing_arg_from_kwarg(self, name: str | None) -> FormalArgument | None: kw_arg = self.kw_arg() if kw_arg is not None: return FormalArgument(name, None, kw_arg.typ, False) else: return None def try_synthesizing_arg_from_vararg(self, position: int | None) -> FormalArgument | None: var_arg = self.var_arg() if var_arg is not None: return FormalArgument(None, position, var_arg.typ, False) else: return None @property def items(self) -> list[CallableType]: return [self] def is_generic(self) -> bool: return bool(self.variables) def type_var_ids(self) -> list[TypeVarId]: a: list[TypeVarId] = [] for tv in self.variables: a.append(tv.id) return a def param_spec(self) -> ParamSpecType | None: """Return ParamSpec if callable can be called with one. A Callable accepting ParamSpec P args (*args, **kwargs) must have the two final parameters like this: *args: P.args, **kwargs: P.kwargs. """ if len(self.arg_types) < 2: return None if self.arg_kinds[-2] != ARG_STAR or self.arg_kinds[-1] != ARG_STAR2: return None arg_type = self.arg_types[-2] if not isinstance(arg_type, ParamSpecType): return None # Prepend prefix for def f(prefix..., *args: P.args, **kwargs: P.kwargs) -> ... # TODO: confirm that all arg kinds are positional prefix = Parameters(self.arg_types[:-2], self.arg_kinds[:-2], self.arg_names[:-2]) return arg_type.copy_modified(flavor=ParamSpecFlavor.BARE, prefix=prefix) def normalize_trivial_unpack(self) -> None: # Normalize trivial unpack in var args as *args: *tuple[X, ...] -> *args: X in place. if self.is_var_arg: star_index = self.arg_kinds.index(ARG_STAR) star_type = self.arg_types[star_index] if isinstance(star_type, UnpackType): p_type = get_proper_type(star_type.type) if isinstance(p_type, Instance): assert p_type.type.fullname == "builtins.tuple" self.arg_types[star_index] = p_type.args[0] def with_unpacked_kwargs(self) -> NormalizedCallableType: if not self.unpack_kwargs: return cast(NormalizedCallableType, self) last_type = get_proper_type(self.arg_types[-1]) assert isinstance(last_type, TypedDictType) extra_kinds = [ ArgKind.ARG_NAMED if name in last_type.required_keys else ArgKind.ARG_NAMED_OPT for name in last_type.items ] new_arg_kinds = self.arg_kinds[:-1] + extra_kinds new_arg_names = self.arg_names[:-1] + list(last_type.items) new_arg_types = self.arg_types[:-1] + list(last_type.items.values()) return NormalizedCallableType( self.copy_modified( arg_kinds=new_arg_kinds, arg_names=new_arg_names, arg_types=new_arg_types, unpack_kwargs=False, ) ) def with_normalized_var_args(self) -> Self: var_arg = self.var_arg() if not var_arg or not isinstance(var_arg.typ, UnpackType): return self unpacked = get_proper_type(var_arg.typ.type) if not isinstance(unpacked, TupleType): # Note that we don't normalize *args: *tuple[X, ...] -> *args: X, # this should be done once in semanal_typeargs.py for user-defined types, # and we ourselves rarely construct such type. return self unpack_index = find_unpack_in_list(unpacked.items) if unpack_index == 0 and len(unpacked.items) > 1: # Already normalized. return self # Boilerplate: var_arg_index = self.arg_kinds.index(ARG_STAR) types_prefix = self.arg_types[:var_arg_index] kinds_prefix = self.arg_kinds[:var_arg_index] names_prefix = self.arg_names[:var_arg_index] types_suffix = self.arg_types[var_arg_index + 1 :] kinds_suffix = self.arg_kinds[var_arg_index + 1 :] names_suffix = self.arg_names[var_arg_index + 1 :] no_name: str | None = None # to silence mypy # Now we have something non-trivial to do. if unpack_index is None: # Plain *Tuple[X, Y, Z] -> replace with ARG_POS completely types_middle = unpacked.items kinds_middle = [ARG_POS] * len(unpacked.items) names_middle = [no_name] * len(unpacked.items) else: # *Tuple[X, *Ts, Y, Z] or *Tuple[X, *tuple[T, ...], X, Z], here # we replace the prefix by ARG_POS (this is how some places expect # Callables to be represented) nested_unpack = unpacked.items[unpack_index] assert isinstance(nested_unpack, UnpackType) nested_unpacked = get_proper_type(nested_unpack.type) if unpack_index == len(unpacked.items) - 1: # Normalize also single item tuples like # *args: *Tuple[*tuple[X, ...]] -> *args: X # *args: *Tuple[*Ts] -> *args: *Ts # This may be not strictly necessary, but these are very verbose. if isinstance(nested_unpacked, Instance): assert nested_unpacked.type.fullname == "builtins.tuple" new_unpack = nested_unpacked.args[0] else: if not isinstance(nested_unpacked, TypeVarTupleType): # We found a non-normalized tuple type, this means this method # is called during semantic analysis (e.g. from get_proper_type()) # there is no point in normalizing callables at this stage. return self new_unpack = nested_unpack else: new_unpack = UnpackType( unpacked.copy_modified(items=unpacked.items[unpack_index:]) ) types_middle = unpacked.items[:unpack_index] + [new_unpack] kinds_middle = [ARG_POS] * unpack_index + [ARG_STAR] names_middle = [no_name] * unpack_index + [self.arg_names[var_arg_index]] return self.copy_modified( arg_types=types_prefix + types_middle + types_suffix, arg_kinds=kinds_prefix + kinds_middle + kinds_suffix, arg_names=names_prefix + names_middle + names_suffix, ) def __hash__(self) -> int: return hash( ( self.ret_type, self.is_ellipsis_args, self.name, tuple(self.arg_types), tuple(self.arg_names), tuple(self.arg_kinds), self.fallback, ) ) def __eq__(self, other: object) -> bool: if isinstance(other, CallableType): return ( self.ret_type == other.ret_type and self.arg_types == other.arg_types and self.arg_names == other.arg_names and self.arg_kinds == other.arg_kinds and self.name == other.name and self.is_ellipsis_args == other.is_ellipsis_args and self.type_guard == other.type_guard and self.type_is == other.type_is and self.fallback == other.fallback ) else: return NotImplemented def serialize(self) -> JsonDict: # TODO: As an optimization, leave out everything related to # generic functions for non-generic functions. return { ".class": "CallableType", "arg_types": [t.serialize() for t in self.arg_types], "arg_kinds": [int(x.value) for x in self.arg_kinds], "arg_names": self.arg_names, "ret_type": self.ret_type.serialize(), "fallback": self.fallback.serialize(), "name": self.name, # We don't serialize the definition (only used for error messages). "variables": [v.serialize() for v in self.variables], "is_ellipsis_args": self.is_ellipsis_args, "implicit": self.implicit, "is_bound": self.is_bound, "type_guard": self.type_guard.serialize() if self.type_guard is not None else None, "type_is": (self.type_is.serialize() if self.type_is is not None else None), "from_concatenate": self.from_concatenate, "imprecise_arg_kinds": self.imprecise_arg_kinds, "unpack_kwargs": self.unpack_kwargs, } @classmethod def deserialize(cls, data: JsonDict) -> CallableType: assert data[".class"] == "CallableType" # The .definition link is set in fixup.py. return CallableType( [deserialize_type(t) for t in data["arg_types"]], [ARG_KINDS[x] for x in data["arg_kinds"]], data["arg_names"], deserialize_type(data["ret_type"]), Instance.deserialize(data["fallback"]), name=data["name"], variables=[cast(TypeVarLikeType, deserialize_type(v)) for v in data["variables"]], is_ellipsis_args=data["is_ellipsis_args"], implicit=data["implicit"], is_bound=data["is_bound"], type_guard=( deserialize_type(data["type_guard"]) if data["type_guard"] is not None else None ), type_is=(deserialize_type(data["type_is"]) if data["type_is"] is not None else None), from_concatenate=data["from_concatenate"], imprecise_arg_kinds=data["imprecise_arg_kinds"], unpack_kwargs=data["unpack_kwargs"], ) def write(self, data: WriteBuffer) -> None: write_tag(data, CALLABLE_TYPE) self.fallback.write(data) write_type_list(data, self.arg_types) write_int_list(data, [int(x.value) for x in self.arg_kinds]) write_str_opt_list(data, self.arg_names) self.ret_type.write(data) write_str_opt(data, self.name) write_type_list(data, self.variables) write_bool(data, self.is_ellipsis_args) write_bool(data, self.implicit) write_bool(data, self.is_bound) write_type_opt(data, self.type_guard) write_type_opt(data, self.type_is) write_bool(data, self.from_concatenate) write_bool(data, self.imprecise_arg_kinds) write_bool(data, self.unpack_kwargs) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> CallableType: assert read_tag(data) == INSTANCE fallback = Instance.read(data) ret = CallableType( read_type_list(data), [ARG_KINDS[ak] for ak in read_int_list(data)], read_str_opt_list(data), read_type(data), fallback, name=read_str_opt(data), variables=read_type_var_likes(data), is_ellipsis_args=read_bool(data), implicit=read_bool(data), is_bound=read_bool(data), type_guard=read_type_opt(data), type_is=read_type_opt(data), from_concatenate=read_bool(data), imprecise_arg_kinds=read_bool(data), unpack_kwargs=read_bool(data), ) assert read_tag(data) == END_TAG return ret # This is a little safety net to prevent reckless special-casing of callables # that can potentially break Unpack[...] with **kwargs. # TODO: use this in more places in checkexpr.py etc? NormalizedCallableType = NewType("NormalizedCallableType", CallableType)
CallableType
python
getsentry__sentry
src/sentry/preprod/producer.py
{ "start": 515, "end": 2209 }
class ____(Enum): SIZE_ANALYSIS = "size_analysis" BUILD_DISTRIBUTION = "build_distribution" def _get_preprod_producer() -> KafkaProducer: return get_arroyo_producer( "sentry.preprod.producer", Topic.PREPROD_ARTIFACT_EVENTS, exclude_config_keys=["compression.type", "message.max.bytes"], ) _preprod_producer = SingletonProducer( _get_preprod_producer, max_futures=settings.SENTRY_PREPROD_ARTIFACT_EVENTS_FUTURES_MAX_LIMIT ) def produce_preprod_artifact_to_kafka( project_id: int, organization_id: int, artifact_id: int, requested_features: list[PreprodFeature] | None = None, ) -> None: if requested_features is None: # TODO(preprod): wire up to quota system and remove this default requested_features = [ PreprodFeature.SIZE_ANALYSIS, PreprodFeature.BUILD_DISTRIBUTION, ] payload_data = { "artifact_id": str(artifact_id), "project_id": str(project_id), "organization_id": str(organization_id), "requested_features": [feature.value for feature in requested_features], } partition_key = f"{project_id}-{artifact_id}".encode() payload = KafkaPayload(partition_key, json.dumps(payload_data).encode("utf-8"), []) try: topic = get_topic_definition(Topic.PREPROD_ARTIFACT_EVENTS)["real_topic_name"] _preprod_producer.produce(ArroyoTopic(topic), payload) except KafkaException: logger.exception( "Failed to send preprod artifact message to Kafka", extra={"artifact_id": artifact_id, "project_id": project_id}, ) raise # Re-raise to trigger task retry
PreprodFeature
python
readthedocs__readthedocs.org
readthedocs/builds/migrations/0016_add_mkdocs_html_doctype.py
{ "start": 149, "end": 1014 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("builds", "0015_uploading_build_state"), ] operations = [ migrations.AlterField( model_name="version", name="documentation_type", field=models.CharField( choices=[ ("sphinx", "Sphinx Html"), ("mkdocs", "Mkdocs (Markdown)"), ("sphinx_htmldir", "Sphinx HtmlDir"), ("sphinx_singlehtml", "Sphinx Single Page HTML"), ("mkdocs_html", "Mkdocs Html Pages"), ], default="sphinx", help_text="Type of documentation the version was built with.", max_length=20, verbose_name="Documentation type", ), ), ]
Migration
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
{ "start": 13278, "end": 14983 }
class ____(test.TestCase): def setUp(self): self._adder = linear_operator_addition._AddAndReturnDiag() @test_util.run_deprecated_v1 def test_identity_plus_identity_returns_diag(self): id1 = linalg.LinearOperatorIdentity(num_rows=2) id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3]) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(id1, id2)) operator = self._adder.add(id1, id2, "my_operator", hints) self.assertIsInstance(operator, linalg.LinearOperatorDiag) with self.cached_session(): self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]), operator.to_dense()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) @test_util.run_deprecated_v1 def test_diag_plus_diag(self): diag1 = rng.rand(2, 3, 4) diag2 = rng.rand(4) op1 = linalg.LinearOperatorDiag(diag1) op2 = linalg.LinearOperatorDiag(diag2) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(op1, op2)) operator = self._adder.add(op1, op2, "my_operator", hints) self.assertIsInstance(operator, linalg.LinearOperatorDiag) with self.cached_session(): self.assertAllClose( linalg.LinearOperatorDiag(diag1 + diag2).to_dense(), operator.to_dense()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name)
AddAndReturnDiagTest
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 74292, "end": 89178 }
class ____(TypedDict, total=False): """ :class:`altair.Config` ``TypedDict`` wrapper. Parameters ---------- arc Arc-specific Config area Area-Specific Config aria A boolean flag indicating if ARIA default attributes should be included for marks and guides (SVG output only). If false, the ``"aria-hidden"`` attribute will be set for all guides, removing them from the ARIA accessibility tree and Vega-Lite will not generate default descriptions for marks. **Default value:** ``true``. autosize How the visualization size should be determined. If a string, should be one of ``"pad"``, ``"fit"`` or ``"none"``. Object values can additionally specify parameters for content sizing and automatic resizing. **Default value**: ``pad`` axis Axis configuration, which determines default properties for all ``x`` and ``y`` `axes <https://vega.github.io/vega-lite/docs/axis.html>`__. For a full list of axis configuration options, please see the `corresponding section of the axis documentation <https://vega.github.io/vega-lite/docs/axis.html#config>`__. axisBand Config for axes with "band" scales. axisBottom Config for x-axis along the bottom edge of the chart. axisDiscrete Config for axes with "point" or "band" scales. axisLeft Config for y-axis along the left edge of the chart. axisPoint Config for axes with "point" scales. axisQuantitative Config for quantitative axes. axisRight Config for y-axis along the right edge of the chart. axisTemporal Config for temporal axes. axisTop Config for x-axis along the top edge of the chart. axisX X-axis specific config. axisXBand Config for x-axes with "band" scales. axisXDiscrete Config for x-axes with "point" or "band" scales. axisXPoint Config for x-axes with "point" scales. axisXQuantitative Config for x-quantitative axes. axisXTemporal Config for x-temporal axes. axisY Y-axis specific config. axisYBand Config for y-axes with "band" scales. axisYDiscrete Config for y-axes with "point" or "band" scales. axisYPoint Config for y-axes with "point" scales. axisYQuantitative Config for y-quantitative axes. axisYTemporal Config for y-temporal axes. background CSS color property to use as the background of the entire view. **Default value:** ``"white"`` bar Bar-Specific Config boxplot Box Config circle Circle-Specific Config concat Default configuration for all concatenation and repeat view composition operators (``concat``, ``hconcat``, ``vconcat``, and ``repeat``) countTitle Default axis and legend title for count fields. **Default value:** ``'Count of Records``. customFormatTypes Allow the ``formatType`` property for text marks and guides to accept a custom formatter function `registered as a Vega expression <https://vega.github.io/vega-lite/usage/compile.html#format-type>`__. errorband ErrorBand Config errorbar ErrorBar Config facet Default configuration for the ``facet`` view composition operator fieldTitle Defines how Vega-Lite generates title for fields. There are three possible styles: * ``"verbal"`` (Default) - displays function in a verbal style (e.g., "Sum of field", "Year-month of date", "field (binned)"). * ``"function"`` - displays function using parentheses and capitalized texts (e.g., "SUM(field)", "YEARMONTH(date)", "BIN(field)"). * ``"plain"`` - displays only the field name without functions (e.g., "field", "date", "field"). font Default font for all text marks, titles, and labels. geoshape Geoshape-Specific Config header Header configuration, which determines default properties for all `headers <https://vega.github.io/vega-lite/docs/header.html>`__. For a full list of header configuration options, please see the `corresponding section of in the header documentation <https://vega.github.io/vega-lite/docs/header.html#config>`__. headerColumn Header configuration, which determines default properties for column `headers <https://vega.github.io/vega-lite/docs/header.html>`__. For a full list of header configuration options, please see the `corresponding section of in the header documentation <https://vega.github.io/vega-lite/docs/header.html#config>`__. headerFacet Header configuration, which determines default properties for non-row/column facet `headers <https://vega.github.io/vega-lite/docs/header.html>`__. For a full list of header configuration options, please see the `corresponding section of in the header documentation <https://vega.github.io/vega-lite/docs/header.html#config>`__. headerRow Header configuration, which determines default properties for row `headers <https://vega.github.io/vega-lite/docs/header.html>`__. For a full list of header configuration options, please see the `corresponding section of in the header documentation <https://vega.github.io/vega-lite/docs/header.html#config>`__. image Image-specific Config legend Legend configuration, which determines default properties for all `legends <https://vega.github.io/vega-lite/docs/legend.html>`__. For a full list of legend configuration options, please see the `corresponding section of in the legend documentation <https://vega.github.io/vega-lite/docs/legend.html#config>`__. line Line-Specific Config lineBreak A delimiter, such as a newline character, upon which to break text strings into multiple lines. This property provides a global default for text marks, which is overridden by mark or style config settings, and by the lineBreak mark encoding channel. If signal-valued, either string or regular expression (regexp) values are valid. locale Locale definitions for string parsing and formatting of number and date values. The locale object should contain ``number`` and/or ``time`` properties with `locale definitions <https://vega.github.io/vega/docs/api/locale/>`__. Locale definitions provided in the config block may be overridden by the View constructor locale option. mark Mark Config normalizedNumberFormat If normalizedNumberFormatType is not specified, D3 number format for axis labels, text marks, and tooltips of normalized stacked fields (fields with ``stack: "normalize"``). For example ``"s"`` for SI units. Use `D3's number format pattern <https://github.com/d3/d3-format#locale_format>`__. If ``config.normalizedNumberFormatType`` is specified and ``config.customFormatTypes`` is ``true``, this value will be passed as ``format`` alongside ``datum.value`` to the ``config.numberFormatType`` function. **Default value:** ``%`` normalizedNumberFormatType `Custom format type <https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for ``config.normalizedNumberFormat``. **Default value:** ``undefined`` -- This is equilvalent to call D3-format, which is exposed as `format in Vega-Expression <https://vega.github.io/vega/docs/expressions/#format>`__. **Note:** You must also set ``customFormatTypes`` to ``true`` to use this feature. numberFormat If numberFormatType is not specified, D3 number format for guide labels, text marks, and tooltips of non-normalized fields (fields *without* ``stack: "normalize"``). For example ``"s"`` for SI units. Use `D3's number format pattern <https://github.com/d3/d3-format#locale_format>`__. If ``config.numberFormatType`` is specified and ``config.customFormatTypes`` is ``true``, this value will be passed as ``format`` alongside ``datum.value`` to the ``config.numberFormatType`` function. numberFormatType `Custom format type <https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for ``config.numberFormat``. **Default value:** ``undefined`` -- This is equilvalent to call D3-format, which is exposed as `format in Vega-Expression <https://vega.github.io/vega/docs/expressions/#format>`__. **Note:** You must also set ``customFormatTypes`` to ``true`` to use this feature. padding The default visualization padding, in pixels, from the edge of the visualization canvas to the data rectangle. If a number, specifies padding for all sides. If an object, the value should have the format ``{"left": 5, "top": 5, "right": 5, "bottom": 5}`` to specify padding for each side of the visualization. **Default value**: ``5`` params Dynamic variables or selections that parameterize a visualization. point Point-Specific Config projection Projection configuration, which determines default properties for all `projections <https://vega.github.io/vega-lite/docs/projection.html>`__. For a full list of projection configuration options, please see the `corresponding section of the projection documentation <https://vega.github.io/vega-lite/docs/projection.html#config>`__. range An object hash that defines default range arrays or schemes for using with scales. For a full list of scale range configuration options, please see the `corresponding section of the scale documentation <https://vega.github.io/vega-lite/docs/scale.html#config>`__. rect Rect-Specific Config rule Rule-Specific Config scale Scale configuration determines default properties for all `scales <https://vega.github.io/vega-lite/docs/scale.html>`__. For a full list of scale configuration options, please see the `corresponding section of the scale documentation <https://vega.github.io/vega-lite/docs/scale.html#config>`__. selection An object hash for defining default properties for each type of selections. square Square-Specific Config style An object hash that defines key-value mappings to determine default properties for marks with a given `style <https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__. The keys represent styles names; the values have to be valid `mark configuration objects <https://vega.github.io/vega-lite/docs/mark.html#config>`__. text Text-Specific Config tick Tick-Specific Config timeFormat Default time format for raw time values (without time units) in text marks, legend labels and header labels. **Default value:** ``"%b %d, %Y"`` **Note:** Axes automatically determine the format for each label automatically so this config does not affect axes. timeFormatType `Custom format type <https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__ for ``config.timeFormat``. **Default value:** ``undefined`` -- This is equilvalent to call D3-time-format, which is exposed as `timeFormat in Vega-Expression <https://vega.github.io/vega/docs/expressions/#timeFormat>`__. **Note:** You must also set ``customFormatTypes`` to ``true`` and there must *not* be a ``timeUnit`` defined to use this feature. title Title configuration, which determines default properties for all `titles <https://vega.github.io/vega-lite/docs/title.html>`__. For a full list of title configuration options, please see the `corresponding section of the title documentation <https://vega.github.io/vega-lite/docs/title.html#config>`__. tooltipFormat Define `custom format configuration <https://vega.github.io/vega-lite/docs/config.html#format>`__ for tooltips. If unspecified, default format config will be applied. trail Trail-Specific Config view Default properties for `single view plots <https://vega.github.io/vega-lite/docs/spec.html#single>`__. """ arc: RectConfigKwds area: AreaConfigKwds aria: bool autosize: AutoSizeParamsKwds | AutosizeType_T axis: AxisConfigKwds axisBand: AxisConfigKwds axisBottom: AxisConfigKwds axisDiscrete: AxisConfigKwds axisLeft: AxisConfigKwds axisPoint: AxisConfigKwds axisQuantitative: AxisConfigKwds axisRight: AxisConfigKwds axisTemporal: AxisConfigKwds axisTop: AxisConfigKwds axisX: AxisConfigKwds axisXBand: AxisConfigKwds axisXDiscrete: AxisConfigKwds axisXPoint: AxisConfigKwds axisXQuantitative: AxisConfigKwds axisXTemporal: AxisConfigKwds axisY: AxisConfigKwds axisYBand: AxisConfigKwds axisYDiscrete: AxisConfigKwds axisYPoint: AxisConfigKwds axisYQuantitative: AxisConfigKwds axisYTemporal: AxisConfigKwds background: ColorHex | ColorName_T bar: BarConfigKwds boxplot: BoxPlotConfigKwds circle: MarkConfigKwds concat: CompositionConfigKwds countTitle: str customFormatTypes: bool errorband: ErrorBandConfigKwds errorbar: ErrorBarConfigKwds facet: CompositionConfigKwds fieldTitle: Literal["verbal", "functional", "plain"] font: str geoshape: MarkConfigKwds header: HeaderConfigKwds headerColumn: HeaderConfigKwds headerFacet: HeaderConfigKwds headerRow: HeaderConfigKwds image: RectConfigKwds legend: LegendConfigKwds line: LineConfigKwds lineBreak: str locale: LocaleKwds mark: MarkConfigKwds normalizedNumberFormat: str normalizedNumberFormatType: str numberFormat: str numberFormatType: str padding: float | PaddingKwds params: Sequence[VariableParameterKwds | TopLevelSelectionParameterKwds] point: MarkConfigKwds projection: ProjectionConfigKwds range: RangeConfigKwds rect: RectConfigKwds rule: MarkConfigKwds scale: ScaleConfigKwds selection: SelectionConfigKwds square: MarkConfigKwds style: StyleConfigIndexKwds text: MarkConfigKwds tick: TickConfigKwds timeFormat: str timeFormatType: str title: TitleConfigKwds tooltipFormat: FormatConfigKwds trail: LineConfigKwds view: ViewConfigKwds
ConfigKwds
python
google__pytype
pytype/pattern_matching.py
{ "start": 9137, "end": 9249 }
class ____: """A list of uncovered cases, for error reporting.""" line: int cases: set[str]
IncompleteMatch
python
HypothesisWorks__hypothesis
hypothesis-python/tests/django/toystore/models.py
{ "start": 1591, "end": 1664 }
class ____(models.Model): charm = CharmField(null=True)
CouldBeCharming
python
bokeh__bokeh
tests/unit/bokeh/plotting/test_contour.py
{ "start": 1621, "end": 4426 }
class ____: @pytest.mark.parametrize("xy_dim", [0, 1, 2]) def test_xy_dim(self, xyz_levels: XYZ_Levels, xy_dim: int) -> None: x, y, z, levels = xyz_levels if xy_dim == 0: x = y = None elif xy_dim == 2: x, y = np.meshgrid(x, y) data = contour_data(x, y, z, levels) fill, line = data.fill_data, data.line_data assert isinstance(fill, FillData) assert np.allclose(fill.lower_levels, [-0.5, 1.5]) assert np.allclose(fill.upper_levels, [1.5, 3.5]) assert len(fill.xs) == 2 assert len(fill.ys) == 2 assert np.allclose(fill.xs[0], [0, 1, 1.5, 1, 0.5, 0, 0]) assert np.allclose(fill.ys[0], [0, 0, 0, 0.5, 1, 1, 0]) assert np.allclose(fill.xs[1], [0.5, 1, 1.5, 2, 2, 1, 0.5]) assert np.allclose(fill.ys[1], [1, 0.5, 0, 0, 1, 1, 1]) assert isinstance(line, LineData) assert np.allclose(line.levels, [-0.5, 1.5, 3.5]) assert len(line.xs) == 3 assert len(line.ys) == 3 assert np.allclose(line.xs[0], []) assert np.allclose(line.ys[0], []) assert np.allclose(line.xs[1], [0.5, 1, 1.5]) assert np.allclose(line.ys[1], [1, 0.5, 0]) assert np.allclose(line.xs[2], []) assert np.allclose(line.ys[2], []) @pytest.mark.parametrize("want_fill,want_line", [(True, True), (True, False), (False, True)]) def test_fill_line(self, xyz_levels: XYZ_Levels, want_fill: bool, want_line: bool) -> None: x, y, z, levels = xyz_levels data = contour_data(x, y, z, levels, want_fill=want_fill, want_line=want_line) fill, line = data.fill_data, data.line_data if want_fill: assert isinstance(fill, FillData) else: assert fill is None if want_line: assert isinstance(line, LineData) else: assert line is None def test_neither(self, xyz_levels: XYZ_Levels) -> None: _, _, z, levels = xyz_levels with pytest.raises(ValueError, match="Neither fill nor line requested in contour_data"): contour_data(z=z, levels=levels, want_fill=False, want_line=False) def test_invalid_args(self) -> None: with pytest.raises(ValueError, match="No contour levels specified"): contour_data(z=[[0, 1], [2, 3]]) with pytest.raises(ValueError, match="Contour levels must be increasing"): contour_data(z=[[0, 1], [2, 3]], levels=[2, 1, 0]) with pytest.raises(ValueError, match="Contour levels must be increasing"): contour_data(z=[[0, 1], [2, 3]], levels=[0, 1, 1]) with pytest.raises(TypeError): # No z, not matching exception string as originates in ContourPy contour_data(levels=[1])
Test_contour_data
python
imageio__imageio
imageio/core/v3_plugin_api.py
{ "start": 1475, "end": 15560 }
class ____: """A ImageIO Plugin. This is an abstract plugin that documents the v3 plugin API interface. A plugin is an adapter/wrapper around a backend that converts a request from iio.core (e.g., read an image from file) into a sequence of instructions for the backend that fulfill the request. Plugin authors may choose to subclass this class when implementing a new plugin, but aren't obliged to do so. As long as the plugin class implements the interface (methods) described below the ImageIO core will treat it just like any other plugin. Parameters ---------- request : iio.Request A request object that represents the users intent. It provides a standard interface to access the various ImageResources and serves them to the plugin as a file object (or file). Check the docs for details. **kwargs : Any Additional configuration arguments for the plugin or backend. Usually these match the configuration arguments available on the backend and are forwarded to it. Raises ------ InitializationError During ``__init__`` the plugin tests if it can fulfill the request. If it can't, e.g., because the request points to a file in the wrong format, then it should raise an ``InitializationError`` and provide a reason for failure. This reason may be reported to the user. ImportError Plugins will be imported dynamically when listed in ``iio.config.known_plugins`` to fulfill requests. This way, users only have to load plugins/backends they actually use. If this plugin's backend is not installed, it should raise an ``ImportError`` either during module import or during class construction. Notes ----- Upon successful construction the plugin takes ownership of the provided request. This means that it is the plugin's responsibility to call request.finish() to close the resource when it is no longer needed. Plugins _must_ implement a context manager that closes and cleans any resources held by the plugin upon exit. """ def __init__(self, request: Request) -> None: """Initialize a new Plugin Instance. See Plugin's docstring for detailed documentation. Notes ----- The implementation here stores the request as a local variable that is exposed using a @property below. If you inherit from PluginV3, remember to call ``super().__init__(request)``. """ self._request = request def read(self, *, index: int = 0) -> np.ndarray: """Read a ndimage. The ``read`` method loads a (single) ndimage, located at ``index`` from the requested ImageResource. It is at the plugin's descretion to decide (and document) what constitutes a single ndimage. A sensible way to make this decision is to choose based on the ImageResource's format and on what users will expect from such a format. For example, a sensible choice for a TIFF file produced by an ImageJ hyperstack is to read it as a volumetric ndimage (1 color dimension followed by 3 spatial dimensions). On the other hand, a sensible choice for a MP4 file produced by Davinci Resolve is to treat each frame as a ndimage (2 spatial dimensions followed by 1 color dimension). The value ``index=None`` is special. It requests the plugin to load all ndimages in the file and stack them along a new first axis. For example, if a MP4 file is read with ``index=None`` and the plugin identifies single frames as ndimages, then the plugin should read all frames and stack them into a new ndimage which now contains a time axis as its first axis. If a PNG file (single image format) is read with ``index=None`` the plugin does a very similar thing: It loads all ndimages in the file (here it's just one) and stacks them along a new first axis, effectively prepending an axis with size 1 to the image. If a plugin does not wish to support ``index=None`` it should set a more sensible default and raise a ``ValueError`` when requested to read using ``index=None``. Parameters ---------- index : int If the ImageResource contains multiple ndimages, and index is an integer, select the index-th ndimage from among them and return it. If index is an ellipsis (...), read all ndimages in the file and stack them along a new batch dimension. If index is None, let the plugin decide. If the index is out of bounds a ``ValueError`` is raised. **kwargs : Any The read method may accept any number of plugin-specific keyword arguments to further customize the read behavior. Usually these match the arguments available on the backend and are forwarded to it. Returns ------- ndimage : np.ndarray A ndimage containing decoded pixel data (sometimes called bitmap). Notes ----- The ImageResource from which the plugin should read is managed by the provided request object. Directly accessing the managed ImageResource is _not_ permitted. Instead, you can get FileLike access to the ImageResource via request.get_file(). If the backend doesn't support reading from FileLike objects, you can request a temporary file to pass to the backend via ``request.get_local_filename()``. This is, however, not very performant (involves copying the Request's content into a temporary file), so you should avoid doing this whenever possible. Consider it a fallback method in case all else fails. """ raise NotImplementedError() def write(self, ndimage: Union[ArrayLike, List[ArrayLike]]) -> Optional[bytes]: """Write a ndimage to a ImageResource. The ``write`` method encodes the given ndimage into the format handled by the backend and writes it to the ImageResource. It overwrites any content that may have been previously stored in the file. If the backend supports only a single format then it must check if the ImageResource matches that format and raise an exception if not. Typically, this should be done during initialization in the form of a ``InitializationError``. If the backend supports more than one format it must determine the requested/desired format. Usually this can be done by inspecting the ImageResource (e.g., by checking ``request.extension``), or by providing a mechanism to explicitly set the format (perhaps with a - sensible - default value). If the plugin can not determine the desired format, it **must not** write to the ImageResource, but raise an exception instead. If the backend supports at least one format that can hold multiple ndimages it should be capable of handling ndimage batches and lists of ndimages. If the ``ndimage`` input is a list of ndimages, the plugin should not assume that the ndimages are not stackable, i.e., ndimages may have different shapes. Otherwise, the ``ndimage`` may be a batch of multiple ndimages stacked along the first axis of the array. The plugin must be able to discover this, either automatically or via additional `kwargs`. If there is ambiguity in the process, the plugin must clearly document what happens in such cases and, if possible, describe how to resolve this ambiguity. Parameters ---------- ndimage : ArrayLike The ndimage to encode and write to the current ImageResource. **kwargs : Any The write method may accept any number of plugin-specific keyword arguments to customize the writing behavior. Usually these match the arguments available on the backend and are forwarded to it. Returns ------- encoded_image : bytes or None If the chosen ImageResource is the special target ``"<bytes>"`` then write should return a byte string containing the encoded image data. Otherwise, it returns None. Notes ----- The ImageResource to which the plugin should write to is managed by the provided request object. Directly accessing the managed ImageResource is _not_ permitted. Instead, you can get FileLike access to the ImageResource via request.get_file(). If the backend doesn't support writing to FileLike objects, you can request a temporary file to pass to the backend via ``request.get_local_filename()``. This is, however, not very performant (involves copying the Request's content from a temporary file), so you should avoid doing this whenever possible. Consider it a fallback method in case all else fails. """ raise NotImplementedError() def iter(self) -> Iterator[np.ndarray]: """Iterate the ImageResource. This method returns a generator that yields ndimages in the order in which they appear in the file. This is roughly equivalent to:: idx = 0 while True: try: yield self.read(index=idx) except ValueError: break It works very similar to ``read``, and you can consult the documentation of that method for additional information on desired behavior. Parameters ---------- **kwargs : Any The iter method may accept any number of plugin-specific keyword arguments to further customize the reading/iteration behavior. Usually these match the arguments available on the backend and are forwarded to it. Yields ------ ndimage : np.ndarray A ndimage containing decoded pixel data (sometimes called bitmap). See Also -------- PluginV3.read """ raise NotImplementedError() def properties(self, index: int = 0) -> ImageProperties: """Standardized ndimage metadata. Parameters ---------- index : int If the ImageResource contains multiple ndimages, and index is an integer, select the index-th ndimage from among them and return its properties. If index is an ellipsis (...), read all ndimages in the file and stack them along a new batch dimension and return their properties. If index is None, the plugin decides the default. Returns ------- properties : ImageProperties A dataclass filled with standardized image metadata. """ raise NotImplementedError() def metadata(self, index: int = 0, exclude_applied: bool = True) -> Dict[str, Any]: """Format-Specific ndimage metadata. The method reads metadata stored in the ImageResource and returns it as a python dict. The plugin is free to choose which name to give a piece of metadata; however, if possible, it should match the name given by the format. There is no requirement regarding the fields a plugin must expose; however, if a plugin does expose any,``exclude_applied`` applies to these fields. If the plugin does return metadata items, it must check the value of ``exclude_applied`` before returning them. If ``exclude applied`` is True, then any metadata item that would be applied to an ndimage returned by ``read`` (or ``iter``) must not be returned. This is done to avoid confusion; for example, if an ImageResource defines the ExIF rotation tag, and the plugin applies the rotation to the data before returning it, then ``exclude_applied`` prevents confusion on whether the tag was already applied or not. The `kwarg` ``index`` behaves similar to its counterpart in ``read`` with one exception: If the ``index`` is None, then global metadata is returned instead of returning a combination of all metadata items. If there is no global metadata, the Plugin should return an empty dict or raise an exception. Parameters ---------- index : int If the ImageResource contains multiple ndimages, and index is an integer, select the index-th ndimage from among them and return its metadata. If index is an ellipsis (...), return global metadata. If index is None, the plugin decides the default. exclude_applied : bool If True (default), do not report metadata fields that the plugin would apply/consume while reading the image. Returns ------- metadata : dict A dictionary filled with format-specific metadata fields and their values. """ raise NotImplementedError() def close(self) -> None: """Close the ImageResource. This method allows a plugin to behave similar to the python built-in ``open``:: image_file = my_plugin(Request, "r") ... image_file.close() It is used by the context manager and deconstructor below to avoid leaking ImageResources. If the plugin has no other cleanup to do it doesn't have to overwrite this method itself and can rely on the implementation below. """ self.request.finish() @property def request(self) -> Request: return self._request def __enter__(self) -> "PluginV3": return self def __exit__(self, type, value, traceback) -> None: self.close() def __del__(self) -> None: self.close()
PluginV3
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/airflow/AIR301_airflow_plugin.py
{ "start": 52, "end": 925 }
class ____(AirflowPlugin): name = "test_plugin" # --- Invalid extensions start operators = [PluginOperator] sensors = [PluginSensorOperator] hooks = [PluginHook] executors = [PluginExecutor] # --- Invalid extensions end macros = [plugin_macro] flask_blueprints = [bp] appbuilder_views = [v_appbuilder_package] appbuilder_menu_items = [appbuilder_mitem, appbuilder_mitem_toplevel] global_operator_extra_links = [ AirflowLink(), GithubLink(), ] operator_extra_links = [ GoogleLink(), AirflowLink2(), CustomOpLink(), CustomBaseIndexOpLink(1), ] timetables = [CustomCronDataIntervalTimetable] listeners = [empty_listener, ClassBasedListener()] ti_deps = [CustomTestTriggerRule()] priority_weight_strategies = [CustomPriorityWeightStrategy]
AirflowTestPlugin
python
django__django
tests/model_regress/models.py
{ "start": 612, "end": 801 }
class ____(models.Model): # Test models with non-default primary keys / AutoFields #5218 movie_id = models.AutoField(primary_key=True) name = models.CharField(max_length=60)
Movie
python
lxml__lxml
src/lxml/tests/test_dtd.py
{ "start": 266, "end": 16675 }
class ____(HelperTestCase): def test_dtd(self): pass def test_dtd_file(self): parse = etree.parse tree = parse(fileInTestDir("test.xml")) root = tree.getroot() dtd = etree.DTD(fileInTestDir("test.dtd")) self.assertTrue(dtd.validate(root)) def test_dtd_file_pathlike(self): parse = etree.parse tree = parse(fileInTestDir("test.xml")) root = tree.getroot() dtd = etree.DTD(SimpleFSPath(fileInTestDir("test.dtd"))) self.assertTrue(dtd.validate(root)) def test_dtd_external_id(self): # Only test that the 'external_id' option passes. # Don't fail if catalogues aren't available. try: etree.DTD(external_id="-//W3C//DTD HTML 4.01//EN") except etree.DTDParseError: pass def test_dtd_stringio(self): root = etree.XML(b"<b/>") dtd = etree.DTD(BytesIO(b"<!ELEMENT b EMPTY>")) self.assertTrue(dtd.validate(root)) def test_dtd_parse_invalid(self): fromstring = etree.fromstring parser = etree.XMLParser(dtd_validation=True) xml = ('<!DOCTYPE b SYSTEM "%s"><b><a/></b>' % fileInTestDir("test.dtd")).encode('utf-8') self.assertRaises(etree.XMLSyntaxError, fromstring, xml, parser=parser) def test_dtd_parse_file_not_found(self): fromstring = etree.fromstring dtd_filename = fileUrlInTestDir("__nosuch.dtd") parser = etree.XMLParser(dtd_validation=True) xml = '<!DOCTYPE b SYSTEM "%s"><b><a/></b>' % dtd_filename self.assertRaises(etree.XMLSyntaxError, fromstring, xml, parser=parser) errors = None try: fromstring(xml, parser=parser) except etree.XMLSyntaxError: e = sys.exc_info()[1] self.assertTrue(e.error_log) self.assertTrue(parser.error_log) errors = [entry.message for entry in e.error_log if dtd_filename in entry.message] self.assertTrue(errors) def test_dtd_parse_valid(self): parser = etree.XMLParser(dtd_validation=True) xml = ('<!DOCTYPE a SYSTEM "%s"><a><b/></a>' % fileUrlInTestDir("test.dtd")) root = etree.fromstring(xml, parser=parser) def test_dtd_parse_valid_file_url(self): parser = etree.XMLParser(dtd_validation=True) xml = ('<!DOCTYPE a SYSTEM "%s"><a><b/></a>' % fileUrlInTestDir("test.dtd")) root = etree.fromstring(xml, parser=parser) def test_dtd_parse_valid_relative(self): parser = etree.XMLParser(dtd_validation=True) xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>' root = etree.fromstring( xml, parser=parser, base_url=fileUrlInTestDir("test.xml")) def test_dtd_parse_valid_relative_file_url(self): parser = etree.XMLParser(dtd_validation=True) xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>' root = etree.fromstring( xml, parser=parser, base_url=fileUrlInTestDir("test.xml")) def test_dtd_invalid(self): root = etree.XML("<b><a/></b>") dtd = etree.DTD(BytesIO(b"<!ELEMENT b EMPTY>")) self.assertRaises(etree.DocumentInvalid, dtd.assertValid, root) def test_dtd_assertValid(self): root = etree.XML("<b><a/></b>") dtd = etree.DTD(BytesIO(b"<!ELEMENT b (a)><!ELEMENT a EMPTY>")) dtd.assertValid(root) def test_dtd_internal(self): root = etree.XML(b''' <!DOCTYPE b SYSTEM "none" [ <!ELEMENT b (a)> <!ELEMENT a EMPTY> ]> <b><a/></b> ''') dtd = etree.ElementTree(root).docinfo.internalDTD self.assertTrue(dtd) dtd.assertValid(root) def test_dtd_internal_invalid(self): root = etree.XML(b''' <!DOCTYPE b SYSTEM "none" [ <!ELEMENT b (a)> <!ELEMENT a (c)> <!ELEMENT c EMPTY> ]> <b><a/></b> ''') dtd = etree.ElementTree(root).docinfo.internalDTD self.assertTrue(dtd) self.assertFalse(dtd.validate(root)) def test_dtd_invalid_duplicate_id(self): root = etree.XML(b''' <a><b id="id1"/><b id="id2"/><b id="id1"/></a> ''') dtd = etree.DTD(BytesIO(b""" <!ELEMENT a (b*)> <!ATTLIST b id ID #REQUIRED > <!ELEMENT b EMPTY> """)) self.assertFalse(dtd.validate(root)) self.assertTrue(dtd.error_log) self.assertTrue([error for error in dtd.error_log if 'id1' in error.message]) def test_dtd_api_internal(self): root = etree.XML(b''' <!DOCTYPE b SYSTEM "none" [ <!ATTLIST a attr1 (x | y | z) "z" attr2 CDATA #FIXED "X" > <!ELEMENT b (a)> <!ELEMENT a EMPTY> ]> <b><a/></b> ''') dtd = etree.ElementTree(root).docinfo.internalDTD self.assertTrue(dtd) dtd.assertValid(root) seen = [] for el in dtd.iterelements(): if el.name == 'a': self.assertEqual(2, len(el.attributes())) for attr in el.iterattributes(): if attr.name == 'attr1': self.assertEqual('enumeration', attr.type) self.assertEqual('none', attr.default) self.assertEqual('z', attr.default_value) values = attr.values() values.sort() self.assertEqual(['x', 'y', 'z'], values) else: self.assertEqual('attr2', attr.name) self.assertEqual('cdata', attr.type) self.assertEqual('fixed', attr.default) self.assertEqual('X', attr.default_value) else: self.assertEqual('b', el.name) self.assertEqual(0, len(el.attributes())) seen.append(el.name) seen.sort() self.assertEqual(['a', 'b'], seen) self.assertEqual(2, len(dtd.elements())) def test_internal_dtds(self): for el_count in range(2, 5): for attr_count in range(4): root = etree.XML(''' <!DOCTYPE el0 SYSTEM "none" [ ''' + ''.join([''' <!ATTLIST el%d attr%d (x | y | z) "z" > ''' % (e, a) for a in range(attr_count) for e in range(el_count) ]) + ''.join([''' <!ELEMENT el%d EMPTY> ''' % e for e in range(1, el_count) ]) + ''' ''' + '<!ELEMENT el0 (%s)>' % '|'.join([ 'el%d' % e for e in range(1, el_count)]) + ''' ]> <el0><el1 %s /></el0> ''' % ' '.join(['attr%d="x"' % a for a in range(attr_count)])) dtd = etree.ElementTree(root).docinfo.internalDTD self.assertTrue(dtd) dtd.assertValid(root) e = -1 for e, el in enumerate(dtd.iterelements()): self.assertEqual(attr_count, len(el.attributes())) a = -1 for a, attr in enumerate(el.iterattributes()): self.assertEqual('enumeration', attr.type) self.assertEqual('none', attr.default) self.assertEqual('z', attr.default_value) values = sorted(attr.values()) self.assertEqual(['x', 'y', 'z'], values) self.assertEqual(attr_count - 1, a) self.assertEqual(el_count - 1, e) self.assertEqual(el_count, len(dtd.elements())) def test_dtd_broken(self): self.assertRaises(etree.DTDParseError, etree.DTD, BytesIO(b"<!ELEMENT b HONKEY>")) def test_parse_file_dtd(self): parser = etree.XMLParser(attribute_defaults=True) tree = etree.parse(fileInTestDir('test.xml'), parser) root = tree.getroot() self.assertEqual( "valueA", root.get("default")) self.assertEqual( "valueB", root[0].get("default")) @skipIf(etree.LIBXML_VERSION == (2, 9, 0), "DTD loading is broken for incremental parsing in libxml2 2.9.0") def test_iterparse_file_dtd_start(self): iterparse = etree.iterparse iterator = iterparse(fileInTestDir("test.xml"), events=('start',), attribute_defaults=True) attributes = [ element.get("default") for event, element in iterator ] self.assertEqual( ["valueA", "valueB"], attributes) @skipIf(etree.LIBXML_VERSION == (2, 9, 0), "DTD loading is broken for incremental parsing in libxml2 2.9.0") def test_iterparse_file_dtd_end(self): iterparse = etree.iterparse iterator = iterparse(fileInTestDir("test.xml"), events=('end',), attribute_defaults=True) attributes = [ element.get("default") for event, element in iterator ] self.assertEqual( ["valueB", "valueA"], attributes) def test_dtd_attrs(self): dtd = etree.DTD(fileUrlInTestDir("test.dtd")) # Test DTD.system_url attribute self.assertTrue(dtd.system_url.endswith("test.dtd")) # Test elements and their attributes a = dtd.elements()[0] self.assertEqual(a.name, "a") self.assertEqual(a.type, "element") self.assertEqual(a.content.name, "b") self.assertEqual(a.content.type, "element") self.assertEqual(a.content.occur, "once") aattr = a.attributes()[0] self.assertEqual(aattr.name, "default") self.assertEqual(aattr.type, "enumeration") self.assertEqual(aattr.values(), ["valueA", "valueB"]) self.assertEqual(aattr.default_value, "valueA") b = dtd.elements()[1] self.assertEqual(b.name, "b") self.assertEqual(b.type, "empty") self.assertEqual(b.content, None) # Test entities and their attributes c = dtd.entities()[0] self.assertEqual(c.name, "c") self.assertEqual(c.orig, "&#42;") self.assertEqual(c.content, "*") # Test DTD.name attribute root = etree.XML(b''' <!DOCTYPE a SYSTEM "none" [ <!ELEMENT a EMPTY> ]> <a/> ''') dtd = etree.ElementTree(root).docinfo.internalDTD self.assertEqual(dtd.name, "a") # Test DTD.name and DTD.systemID attributes parser = etree.XMLParser(dtd_validation=True) xml = '<!DOCTYPE a SYSTEM "test.dtd"><a><b/></a>' root = etree.fromstring(xml, parser=parser, base_url=fileUrlInTestDir("test.xml")) dtd = root.getroottree().docinfo.internalDTD self.assertEqual(dtd.name, "a") self.assertEqual(dtd.system_url, "test.dtd") def test_declaration_escape_quote_pid(self): # Standard allows quotes in systemliteral, but in that case # systemliteral must be escaped with single quotes. # See http://www.w3.org/TR/REC-xml/#sec-prolog-dtd. root = etree.XML('''<!DOCTYPE a PUBLIC 'foo' '"'><a/>''') doc = root.getroottree() self.assertEqual(doc.docinfo.doctype, '''<!DOCTYPE a PUBLIC "foo" '"'>''') self.assertEqual(etree.tostring(doc), b'''<!DOCTYPE a PUBLIC "foo" '"'>\n<a/>''') def test_declaration_quote_withoutpid(self): root = etree.XML('''<!DOCTYPE a SYSTEM '"'><a/>''') doc = root.getroottree() self.assertEqual(doc.docinfo.doctype, '''<!DOCTYPE a SYSTEM '"'>''') self.assertEqual(etree.tostring(doc), b'''<!DOCTYPE a SYSTEM '"'>\n<a/>''') def test_declaration_apos(self): root = etree.XML('''<!DOCTYPE a SYSTEM "'"><a/>''') doc = root.getroottree() self.assertEqual(doc.docinfo.doctype, '''<!DOCTYPE a SYSTEM "'">''') self.assertEqual(etree.tostring(doc), b'''<!DOCTYPE a SYSTEM "'">\n<a/>''') def test_ietf_decl(self): html_data = ( '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n' '<html></html>') root = etree.HTML(html_data) doc = root.getroottree() self.assertEqual(doc.docinfo.doctype, '<!DOCTYPE html PUBLIC "-//IETF//DTD HTML//EN">') self.assertEqual(etree.tostring(doc, method='html'), html_data.encode('utf-8')) def test_set_decl_public(self): doc = etree.Element('test').getroottree() doc.docinfo.public_id = 'bar' doc.docinfo.system_url = 'baz' self.assertEqual(doc.docinfo.doctype, '<!DOCTYPE test PUBLIC "bar" "baz">') self.assertEqual(etree.tostring(doc), b'<!DOCTYPE test PUBLIC "bar" "baz">\n<test/>') def test_html_decl(self): # Slightly different to one above: when we create an html element, # we do not start with a blank slate. doc = html.Element('html').getroottree() doc.docinfo.public_id = 'bar' doc.docinfo.system_url = 'baz' self.assertEqual(doc.docinfo.doctype, '<!DOCTYPE html PUBLIC "bar" "baz">') self.assertEqual(etree.tostring(doc), b'<!DOCTYPE html PUBLIC "bar" "baz">\n<html/>') def test_clean_doctype(self): doc = html.Element('html').getroottree() self.assertTrue(doc.docinfo.doctype != '') doc.docinfo.clear() self.assertTrue(doc.docinfo.doctype == '') def test_set_decl_system(self): doc = etree.Element('test').getroottree() doc.docinfo.system_url = 'baz' self.assertEqual(doc.docinfo.doctype, '<!DOCTYPE test SYSTEM "baz">') self.assertEqual(etree.tostring(doc), b'<!DOCTYPE test SYSTEM "baz">\n<test/>') def test_empty_decl(self): doc = etree.Element('test').getroottree() doc.docinfo.public_id = None self.assertEqual(doc.docinfo.doctype, '<!DOCTYPE test>') self.assertTrue(doc.docinfo.public_id is None) self.assertTrue(doc.docinfo.system_url is None) self.assertEqual(etree.tostring(doc), b'<!DOCTYPE test>\n<test/>') def test_invalid_decl_1(self): docinfo = etree.Element('test').getroottree().docinfo def set_public_id(value): docinfo.public_id = value self.assertRaises(ValueError, set_public_id, 'ä') self.assertRaises(ValueError, set_public_id, 'qwerty ä asdf') def test_invalid_decl_2(self): docinfo = etree.Element('test').getroottree().docinfo def set_system_url(value): docinfo.system_url = value self.assertRaises(ValueError, set_system_url, '\'"') self.assertRaises(ValueError, set_system_url, '"\'') self.assertRaises(ValueError, set_system_url, ' " \' ') def test_comment_before_dtd(self): data = '<!--comment--><!DOCTYPE test>\n<!-- --><test/>' doc = etree.fromstring(data).getroottree() self.assertEqual(etree.tostring(doc), data.encode('utf-8')) def test_entity_system_url(self): xml = etree.parse(BytesIO(b'<!DOCTYPE test [ <!ENTITY TestReference SYSTEM "./foo.bar"> ]><a/>')) self.assertEqual(xml.docinfo.internalDTD.entities()[0].system_url, "./foo.bar") def test_entity_system_url_none(self): xml = etree.parse(BytesIO(b'<!DOCTYPE test [ <!ENTITY TestReference "testvalue"> ]><a/>')) self.assertEqual(xml.docinfo.internalDTD.entities()[0].system_url, None) def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeDtdTestCase)]) suite.addTests( [make_doctest('validation.txt')]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__)
ETreeDtdTestCase
python
doocs__leetcode
solution/0500-0599/0518.Coin Change II/Solution.py
{ "start": 0, "end": 387 }
class ____: def change(self, amount: int, coins: List[int]) -> int: m, n = len(coins), amount f = [[0] * (n + 1) for _ in range(m + 1)] f[0][0] = 1 for i, x in enumerate(coins, 1): for j in range(n + 1): f[i][j] = f[i - 1][j] if j >= x: f[i][j] += f[i][j - x] return f[m][n]
Solution
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 7273, "end": 7424 }
class ____(TrigRule): """integrate(sec(x)*tan(x), x) -> sec(x)""" def eval(self) -> Expr: return sec(self.variable) @dataclass
SecTanRule
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-qdrant/unit_tests/test_indexer.py
{ "start": 486, "end": 8383 }
class ____(unittest.TestCase): def setUp(self): self.mock_config = QdrantIndexingConfigModel( **{ "url": "https://client-url.io", "auth_method": {"mode": "api_key_auth", "api_key": "api_key"}, "prefer_grpc": False, "collection": "dummy-collection", "distance_metric": "dot", "text_field": "text", } ) self.qdrant_indexer = QdrantIndexer(self.mock_config, 100) self.qdrant_indexer._create_client = Mock() self.qdrant_indexer._client = Mock() def test_check_gets_existing_collection(self): mock_collections = Mock(collections=[Mock()]) mock_collections.collections[0].name = "dummy-collection" self.qdrant_indexer._client.get_collections.return_value = mock_collections self.qdrant_indexer._client.get_collection.return_value = Mock( config=Mock(params=Mock(vectors=Mock(size=100, distance=models.Distance.DOT))) ) check_result = self.qdrant_indexer.check() self.assertIsNone(check_result) self.qdrant_indexer._create_client.assert_called() self.qdrant_indexer._client.get_collections.assert_called() self.qdrant_indexer._client.get_collection.assert_called() self.qdrant_indexer._client.close.assert_called() def test_check_creates_new_collection_if_not_exists(self): self.qdrant_indexer._client.get_collections.return_value = Mock(collections=[]) check_result = self.qdrant_indexer.check() self.assertIsNone(check_result) self.qdrant_indexer._create_client.assert_called() self.qdrant_indexer._client.get_collections.assert_called() self.qdrant_indexer._client.recreate_collection.assert_called() self.qdrant_indexer._client.close.assert_called() def test_check_handles_failure_conditions(self): # Test 1: url starts with https:// self.qdrant_indexer.config.url = "client-url.io" result = self.qdrant_indexer.check() self.assertEqual(result, "Host must start with https://") # Test 2: random exception self.qdrant_indexer.config.url = "https://client-url.io" self.qdrant_indexer._create_client.side_effect = Exception("Random exception") result = self.qdrant_indexer.check() self.assertTrue("Random exception" in result) # Test 3: client server is not alive self.qdrant_indexer._create_client.side_effect = None self.qdrant_indexer._client = None result = self.qdrant_indexer.check() self.assertEqual(result, "Qdrant client is not alive.") # Test 4: Test vector size does not match mock_collections = Mock(collections=[Mock()]) mock_collections.collections[0].name = "dummy-collection" self.qdrant_indexer._client = Mock() self.qdrant_indexer._client.get_collections.return_value = mock_collections self.qdrant_indexer._client.get_collection.return_value = Mock( config=Mock(params=Mock(vectors=Mock(size=10, distance=models.Distance.DOT))) ) result = self.qdrant_indexer.check() self.assertTrue("The collection's vector's size must match the embedding dimensions" in result) # Test 5: Test distance metric does not match self.qdrant_indexer._client.get_collection.return_value = Mock( config=Mock(params=Mock(vectors=Mock(size=100, distance=models.Distance.COSINE))) ) result = self.qdrant_indexer.check() self.assertTrue("The colection's vector's distance metric must match the selected distance metric option" in result) def test_pre_sync_calls_delete(self): self.qdrant_indexer.pre_sync( Mock( streams=[ Mock( destination_sync_mode=DestinationSyncMode.overwrite, stream=AirbyteStream(name="some_stream", json_schema={}, supported_sync_modes=[SyncMode.full_refresh]), ), Mock( destination_sync_mode=DestinationSyncMode.overwrite, stream=AirbyteStream(name="another_stream", json_schema={}, supported_sync_modes=[SyncMode.full_refresh]), ), Mock( destination_sync_mode=DestinationSyncMode.append, stream=AirbyteStream(name="incremental_stream", json_schema={}, supported_sync_modes=[SyncMode.full_refresh]), ), ] ) ) self.qdrant_indexer._client.delete.assert_called_with( collection_name=self.mock_config.collection, points_selector=models.FilterSelector( filter=models.Filter( should=[ models.FieldCondition(key="_ab_stream", match=models.MatchValue(value="some_stream")), models.FieldCondition(key="_ab_stream", match=models.MatchValue(value="another_stream")), ] ) ), ) def test_pre_sync_does_not_call_delete(self): self.qdrant_indexer.pre_sync( Mock(streams=[Mock(destination_sync_mode=DestinationSyncMode.append, stream=Mock(name="some_stream"))]) ) self.qdrant_indexer._client.delete.assert_not_called() def test_pre_sync_calls_create_payload_index(self): self.qdrant_indexer.pre_sync(Mock(streams=[])) calls = [ call(collection_name=self.mock_config.collection, field_name="_ab_record_id", field_schema="keyword"), call(collection_name=self.mock_config.collection, field_name="_ab_stream", field_schema="keyword"), ] self.qdrant_indexer._client.create_payload_index.assert_has_calls(calls) def test_index_calls_insert(self): self.qdrant_indexer.index( [ Mock(metadata={"key": "value1"}, page_content="some content", embedding=[1.0, 2.0, 3.0]), Mock(metadata={"key": "value2"}, page_content="some other content", embedding=[4.0, 5.0, 6.0]), ], None, "some_stream", ) self.qdrant_indexer._client.upload_records.assert_called_once() def test_index_calls_delete(self): self.qdrant_indexer.delete(["some_id", "another_id"], None, "some_stream") self.qdrant_indexer._client.delete.assert_called_with( collection_name=self.mock_config.collection, points_selector=models.FilterSelector( filter=models.Filter( should=[ models.FieldCondition(key="_ab_record_id", match=models.MatchValue(value="some_id")), models.FieldCondition(key="_ab_record_id", match=models.MatchValue(value="another_id")), ] ) ), ) def test_post_sync_calls_close(self): result = self.qdrant_indexer.post_sync() self.qdrant_indexer._client.close.assert_called_once() self.assertEqual( result, [ AirbyteMessage( type=Type.LOG, log=AirbyteLogMessage(level=Level.INFO, message="Qdrant Database Client has been closed successfully") ) ], ) def test_post_sync_handles_failure(self): exception = Exception("Random exception") self.qdrant_indexer._client.close.side_effect = exception result = self.qdrant_indexer.post_sync() self.qdrant_indexer._client.close.assert_called_once() self.assertEqual( result, [AirbyteMessage(type=Type.LOG, log=AirbyteLogMessage(level=Level.ERROR, message=format_exception(exception)))] )
TestQdrantIndexer
python
huggingface__transformers
src/transformers/models/markuplm/modeling_markuplm.py
{ "start": 8879, "end": 9535 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->MarkupLM
MarkupLMIntermediate
python
instagram__MonkeyType
tests/test_stubs.py
{ "start": 1741, "end": 4214 }
class ____: def test_single_import(self): """Single imports should be on one line""" imports = ImportMap() imports['a.module'] = {'AClass'} imports['another.module'] = {'AnotherClass'} stub = ImportBlockStub(imports) expected = "\n".join([ 'from a.module import AClass', 'from another.module import AnotherClass', ]) assert stub.render() == expected def test_io_import_single(self): """Single _io imports should convert to io""" imports = ImportMap() imports['_io'] = {'BytesIO'} stub = ImportBlockStub(imports) expected = "\n".join([ 'from io import BytesIO', ]) assert stub.render() == expected def test_multiple_imports(self): """Multiple imports from a single module should each be on their own line""" imports = ImportMap() imports['a.module'] = {'AClass', 'AnotherClass', 'AThirdClass'} stub = ImportBlockStub(imports) expected = "\n".join([ 'from a.module import (', ' AClass,', ' AThirdClass,', ' AnotherClass,', ')', ]) assert stub.render() == expected def test_multiple_io_imports(self): """Multiple imports from single _io module should be convert to io import""" imports = ImportMap() imports['_io'] = {'BytesIO', 'FileIO'} stub = ImportBlockStub(imports) expected = "\n".join([ 'from io import (', ' BytesIO,', ' FileIO,', ')', ]) assert stub.render() == expected def simple_add(a: int, b: int) -> int: return a + b def strip_modules_helper(d1: Dummy, d2: Dummy) -> None: pass def has_optional_param(x: Optional[int] = None) -> None: pass def has_optional_union_param(x: Optional[Union[int, float]]) -> None: pass def has_optional_return() -> Optional[int]: return None def default_none_parameter(x: int = None) -> None: pass def has_length_exceeds_120_chars( very_long_name_parameter_1: float, very_long_name_parameter_2: float ) -> Optional[float]: return None def has_newtype_param(user_id: UserId) -> None: pass def has_forward_ref() -> Optional["TestFunctionStub"]: pass def has_forward_ref_within_generator() -> Generator['TestFunctionStub', None, int]: pass
TestImportBlockStub
python
instagram__MonkeyType
tests/test_encoding.py
{ "start": 846, "end": 8558 }
class ____: @pytest.mark.parametrize( 'typ', [ # Non-generics NoneType, NotImplementedType, mappingproxy, int, Outer, Outer.Inner, Any, # Simple generics Dict, Dict[Any, Any], Dict[int, str], List, List[str], Optional[str], Set[int], Tuple[int, str, str], Tuple, Tuple[()], # empty tuple Type[Outer], Union[Outer.Inner, str, None], # Nested generics Dict[str, Union[str, int]], List[Optional[str]], # Let's get craaaazy Dict[ str, Union[ Dict[str, int], Set[Outer.Inner], Optional[Dict[str, int]] ] ], ], ) def test_type_round_trip(self, typ): assert type_from_dict(type_to_dict(typ)) == typ assert type_from_json(type_to_json(typ)) == typ @pytest.mark.parametrize( 'typ, expected', [ ( Dict[str, int], { 'elem_types': [ {'module': 'builtins', 'qualname': 'str'}, {'module': 'builtins', 'qualname': 'int'}, ], 'module': 'typing', 'qualname': 'Dict', }, ), ( TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str}), { 'elem_types': { 'a': {'module': 'builtins', 'qualname': 'int'}, 'b': {'module': 'builtins', 'qualname': 'str'}, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, ), ( TypedDict(DUMMY_TYPED_DICT_NAME, {'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})}), { 'elem_types': { 'a': { 'elem_types': { 'a': {'module': 'builtins', 'qualname': 'int'}, 'b': {'module': 'builtins', 'qualname': 'str'}, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, ), ], ) def test_type_to_dict(self, typ, expected): assert type_to_dict(typ) == expected @pytest.mark.parametrize( 'type_dict, expected', [ ( { 'elem_types': { 'a': {'module': 'builtins', 'qualname': 'int'}, 'b': {'module': 'builtins', 'qualname': 'str'}, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str}), ), ], ) def test_type_from_dict(self, type_dict, expected): assert type_from_dict(type_dict) == expected @pytest.mark.parametrize( 'type_dict, expected', [ ( { 'elem_types': { 'a': { 'elem_types': { 'a': {'module': 'builtins', 'qualname': 'int'}, 'b': {'module': 'builtins', 'qualname': 'str'}, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, }, 'is_typed_dict': True, 'module': 'tests.test_encoding', 'qualname': DUMMY_TYPED_DICT_NAME, }, TypedDict(DUMMY_TYPED_DICT_NAME, {'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})}), ), ], ) def test_type_from_dict_nested(self, type_dict, expected): assert type_from_dict(type_dict) == expected @pytest.mark.parametrize( 'type_dict, expected', [ ( TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str}), '{"elem_types": {"a": {"module": "builtins", "qualname": "int"},' + ' "b": {"module": "builtins", "qualname": "str"}},' + ' "is_typed_dict": true, "module": "tests.test_encoding", "qualname": "DUMMY_NAME"}', ), ], ) def test_type_to_json(self, type_dict, expected): assert type_to_json(type_dict) == expected @pytest.mark.parametrize( 'type_dict_string, expected', [ ( '{"elem_types": {"a": {"module": "builtins", "qualname": "int"},' + ' "b": {"module": "builtins", "qualname": "str"}},' + ' "is_typed_dict": true, "module": "tests.test_encoding", "qualname": "DUMMY_NAME"}', TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str}), ), ], ) def test_type_from_json(self, type_dict_string, expected): assert type_from_json(type_dict_string) == expected @pytest.mark.parametrize( 'type_dict', [ (TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})), ], ) def test_type_round_trip_typed_dict(self, type_dict): assert type_from_dict(type_to_dict(type_dict)) == type_dict assert type_from_json(type_to_json(type_dict)) == type_dict def test_trace_round_trip(self): trace = CallTrace(dummy_func, {'a': int, 'b': int}, int) assert CallTraceRow.from_trace(trace).to_trace() == trace def test_convert_non_type(self): with pytest.raises(InvalidTypeError): type_from_dict({ 'module': Outer.Inner.f.__module__, 'qualname': Outer.Inner.f.__qualname__, }) @pytest.mark.parametrize( 'encoder, typ, expected, should_call_encoder', [ (Mock(), None, None, False), (Mock(return_value='foo'), str, 'foo', True), ] ) def test_maybe_encode_type(self, encoder, typ, expected, should_call_encoder): ret = maybe_encode_type(encoder, typ) if should_call_encoder: encoder.assert_called_with(typ) else: encoder.assert_not_called() assert ret == expected @pytest.mark.parametrize( 'encoder, typ, expected, should_call_encoder', [ (Mock(), None, None, False), (Mock(), 'null', None, False), (Mock(return_value='foo'), 'str', 'foo', True), ] ) def test_maybe_decode_type(self, encoder, typ, expected, should_call_encoder): ret = maybe_decode_type(encoder, typ) if should_call_encoder: encoder.assert_called_with(typ) else: encoder.assert_not_called() assert ret == expected
TestTypeConversion
python
getsentry__sentry
src/sentry/integrations/perforce/client.py
{ "start": 523, "end": 5208 }
class ____(RepositoryClient, CommitContextClient): """ Client for interacting with Perforce server. Uses P4Python library to execute P4 commands. Supports both plaintext and SSL connections. For production use over public internet, SSL is strongly recommended. """ def __init__( self, p4port: str | None = None, user: str | None = None, password: str | None = None, client: str | None = None, ssl_fingerprint: str | None = None, ): """ Initialize Perforce client. Args: p4port: P4PORT string (e.g., 'ssl:host:port', 'tcp:host:port', or 'host:port') user: Perforce username password: Perforce password OR P4 ticket (both are supported) client: Client/workspace name ssl_fingerprint: SSL trust fingerprint for secure connections """ self.p4port = p4port self.ssl_fingerprint = ssl_fingerprint self.user = user or "" self.password = password self.client_name = client self.P4 = P4 self.P4Exception = P4Exception def _connect(self): """Create and connect a P4 instance with SSL support.""" pass def _disconnect(self, p4): """Disconnect P4 instance.""" pass def check_file(self, repo: Repository, path: str, version: str | None) -> object | None: """ Check if a file exists in the depot. Args: repo: Repository object containing depot path (includes stream if specified) path: File path relative to depot version: Not used (streams are part of depot_path) Returns: File info dict if exists, None otherwise """ return None def get_depots(self) -> list[dict[str, Any]]: """ List all depots accessible to the user. Returns: List of depot info dictionaries """ return [] def get_changes( self, depot_path: str, max_changes: int = 20, start_cl: str | None = None ) -> list[dict[str, Any]]: """ Get changelists for a depot path. Args: depot_path: Depot path (e.g., //depot/main/...) max_changes: Maximum number of changes to return start_cl: Starting changelist number Returns: List of changelist dictionaries """ return [] def get_blame_for_files( self, files: Sequence[SourceLineInfo], extra: dict[str, Any] ) -> list[FileBlameInfo]: """ Get blame information for multiple files using p4 filelog. Uses 'p4 filelog' + 'p4 describe' which is much faster than 'p4 annotate'. Returns the most recent changelist that modified each file. Note: This does not provide line-specific blame. It returns the most recent changelist for the entire file, which is sufficient for suspect commit detection. Returns a list of FileBlameInfo objects containing commit details for each file. """ return [] def get_file( self, repo: Repository, path: str, ref: str | None, codeowners: bool = False ) -> str: """ Get file contents from Perforce depot. Required by abstract base class but not used (CODEOWNERS feature removed). """ raise NotImplementedError("get_file is not supported for Perforce") def create_comment(self, repo: str, issue_id: str, data: dict[str, Any]) -> Any: """Create comment. Not applicable for Perforce.""" raise NotImplementedError("Perforce does not support issue comments") def update_comment( self, repo: str, issue_id: str, comment_id: str, data: dict[str, Any] ) -> Any: """Update comment. Not applicable for Perforce.""" raise NotImplementedError("Perforce does not support issue comments") def create_pr_comment(self, repo: Repository, pr: PullRequest, data: dict[str, Any]) -> Any: """Create PR comment. Not applicable for Perforce.""" raise NotImplementedError("Perforce does not have native pull requests") def update_pr_comment( self, repo: Repository, pr: PullRequest, pr_comment: PullRequestComment, data: dict[str, Any], ) -> Any: """Update PR comment. Not applicable for Perforce.""" raise NotImplementedError("Perforce does not have native pull requests") def get_merge_commit_sha_from_commit(self, repo: Repository, sha: str) -> str | None: """Get merge commit. Not applicable for Perforce.""" return None
PerforceClient
python
realpython__materials
queue/src/queues.py
{ "start": 666, "end": 1033 }
class ____(IterableMixin): def __init__(self): self._elements = [] self._counter = count() def enqueue_with_priority(self, priority, value): element = (-priority, next(self._counter), value) heappush(self._elements, element) def dequeue(self): return heappop(self._elements)[-1] @dataclass(order=True)
PriorityQueue
python
sympy__sympy
sympy/physics/quantum/tests/test_represent.py
{ "start": 1321, "end": 1577 }
class ____(Ket): @classmethod def dual_class(self): return ABra def _represent_default_basis(self, **options): return self._represent_AOp(None, **options) def _represent_AOp(self, basis, **options): return Avec
AKet
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI063.py
{ "start": 2087, "end": 2244 }
class ____(type): @classmethod def __new__(metacls, __name: str, __bases: tuple[type, ...], __namespace: dict, **kwds) -> Self: ... # PYI063
Metaclass2
python
huggingface__transformers
src/transformers/models/vaultgemma/modular_vaultgemma.py
{ "start": 923, "end": 8791 }
class ____(Gemma2Config): r""" This is the configuration class to store the configuration of a [`VaultGemmaModel`]. It is used to instantiate an VaultGemma model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VaultGemma-7B. e.g. [google/vaultgemma-7b](https://huggingface.co/google/vaultgemma-7b) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the VaultGemma model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`VaultGemmaModel`] hidden_size (`int`, *optional*, defaults to 2304): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 9216): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 26): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. query_pre_attn_scalar (`float`, *optional*, defaults to 256): scaling factor used on the attention scores sliding_window (`int`, *optional*, defaults to 4096): in VaultGemma, every other layer uses sliding window attention. This is the size of the sliding window. layer_types (`list`, *optional*): Attention pattern for each layer. final_logit_softcapping (`float`, *optional*, defaults to 30.0): scaling factor when applying tanh softcapping on the logits. attn_logit_softcapping (`float`, *optional*, defaults to 50.0): scaling factor when applying tanh softcapping on the attention scores. ```python >>> from transformers import VaultGemmaModel, VaultGemmaConfig >>> # Initializing a VaultGemma vaultgemma-7b style configuration >>> configuration = VaultGemmaConfig() >>> # Initializing a model from the vaultgemma-7b style configuration >>> model = VaultGemmaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" def __init__( self, vocab_size: Optional[int] = 256000, hidden_size: Optional[int] = 2304, intermediate_size: Optional[int] = 9216, num_hidden_layers: Optional[int] = 26, num_attention_heads: Optional[int] = 8, num_key_value_heads: Optional[int] = 4, head_dim: Optional[int] = 256, hidden_activation: Optional[str] = "gelu_pytorch_tanh", max_position_embeddings: Optional[int] = 8192, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[int] = 1e-6, use_cache: Optional[bool] = True, pad_token_id: Optional[int] = 0, eos_token_id: Optional[int] = 1, bos_token_id: Optional[int] = 2, tie_word_embeddings: Optional[bool] = True, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, attention_bias: Optional[bool] = False, attention_dropout: Optional[float] = 0.0, query_pre_attn_scalar: Optional[int] = 256, sliding_window: Optional[int] = 4096, layer_types: Optional[list[str]] = None, final_logit_softcapping: Optional[float] = 30.0, attn_logit_softcapping: Optional[float] = 50.0, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, head_dim=head_dim, hidden_activation=hidden_activation, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, rms_norm_eps=rms_norm_eps, use_cache=use_cache, pad_token_id=pad_token_id, eos_token_id=eos_token_id, bos_token_id=bos_token_id, tie_word_embeddings=tie_word_embeddings, rope_parameters=rope_parameters, attention_bias=attention_bias, attention_dropout=attention_dropout, query_pre_attn_scalar=query_pre_attn_scalar, sliding_window=sliding_window, layer_types=layer_types, final_logit_softcapping=final_logit_softcapping, attn_logit_softcapping=attn_logit_softcapping, **kwargs, ) del self.use_bidirectional_attention
VaultGemmaConfig
python
eth-brownie__brownie
brownie/utils/docopt.py
{ "start": 10606, "end": 12237 }
class ____(_LeafPattern): def __init__( self, short: str | None = None, longer: str | None = None, argcount: int = 0, value: list[str] | str | int | None = False, ) -> None: assert argcount in {0, 1} self.short, self.longer, self.argcount = short, longer, argcount self.value = None if value is False and argcount else value @classmethod def parse(cls, option_description: str) -> _Option: short, longer, argcount, value = None, None, 0, False options, description = re.split( r"(?: )|$", option_description.strip(), flags=re.M, maxsplit=1 ) options = options.replace(",", " ").replace("=", " ") for s in options.split(): if s.startswith("--"): longer = s elif s.startswith("-"): short = s else: argcount = 1 if argcount: matched = regex_findall(r"\[default: (.*)\]", description, flags=re.I) value = matched[0] if matched else None return cls(short, longer, argcount, value) def single_match(self, left: list[_LeafPattern]) -> _SingleMatch: for n, pattern in enumerate(left): if self.name == pattern.name: return n, pattern return None, None @property def name(self) -> str | None: return self.longer or self.short def __repr__(self) -> str: return "Option(%r, %r, %r, %r)" % ( self.short, self.longer, self.argcount, self.value, )
_Option
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/genericType28.py
{ "start": 2617, "end": 2680 }
class ____(Contra_TA[Contra_TA[T_co]]): ...
ContraToContra_WithTA
python
pyca__cryptography
tests/x509/test_x509.py
{ "start": 220228, "end": 228005 }
class ____: def test_load_ecdsa_cert(self, backend): _skip_curve_unsupported(backend, ec.SECP384R1()) cert = _load_cert( os.path.join("x509", "ecdsa_root.pem"), x509.load_pem_x509_certificate, ) assert isinstance(cert.signature_hash_algorithm, hashes.SHA384) public_key = cert.public_key() assert isinstance(public_key, ec.EllipticCurvePublicKey) num = public_key.public_numbers() assert num.x == int( "dda7d9bb8ab80bfb0b7f21d2f0bebe73f3335d1abc34eadec69bbcd095f" "6f0ccd00bba615b51467e9e2d9fee8e630c17", 16, ) assert num.y == int( "ec0770f5cf842e40839ce83f416d3badd3a4145936789d0343ee10136c7" "2deae88a7a16bb543ce67dc23ff031ca3e23e", 16, ) assert isinstance(num.curve, ec.SECP384R1) assert isinstance(cert.signature_algorithm_parameters, ec.ECDSA) assert isinstance( cert.signature_algorithm_parameters.algorithm, hashes.SHA384 ) public_key.verify( cert.signature, cert.tbs_certificate_bytes, cert.signature_algorithm_parameters, ) def test_load_ecdsa_cert_null_alg_params(self, backend): """ This test verifies that we successfully load certificates with encoded null parameters in the signature AlgorithmIdentifier. This is invalid, but Java 11 (up to at least 11.0.19) generates certificates with this encoding so we need to tolerate it at the moment. """ with pytest.warns(utils.DeprecatedIn41): cert = _load_cert( os.path.join("x509", "custom", "ecdsa_null_alg.pem"), x509.load_pem_x509_certificate, ) assert isinstance(cert.signature_hash_algorithm, hashes.SHA256) assert isinstance(cert.public_key(), ec.EllipticCurvePublicKey) def test_load_bitstring_dn(self): cert = _load_cert( os.path.join("x509", "scottishpower-bitstring-dn.pem"), x509.load_pem_x509_certificate, ) assert cert.subject == x509.Name( [ x509.NameAttribute(x509.NameOID.COMMON_NAME, "ScottishPower"), x509.NameAttribute( x509.NameOID.ORGANIZATIONAL_UNIT_NAME, "02" ), x509.NameAttribute( NameOID.X500_UNIQUE_IDENTIFIER, b"\x00\x70\xb3\xd5\x1f\x30\x5f\x00\x01", _ASN1Type.BitString, ), ] ) assert repr(cert.subject) == ( "<Name(2.5.4.45=#0070b3d51f305f0001,OU=02,CN=ScottishPower)>" ) def test_load_name_attribute_long_form_asn1_tag(self, backend): cert = _load_cert( os.path.join("x509", "custom", "long-form-name-attribute.pem"), x509.load_pem_x509_certificate, ) with pytest.raises(ValueError, match="Long-form"): cert.subject with pytest.raises(ValueError, match="Long-form"): cert.issuer def test_ms_certificate_template(self, backend): cert = _load_cert( os.path.join("x509", "custom", "ms-certificate-template.pem"), x509.load_pem_x509_certificate, ) ext = cert.extensions.get_extension_for_class( x509.MSCertificateTemplate ) tpl = ext.value assert isinstance(tpl, x509.MSCertificateTemplate) assert tpl == x509.MSCertificateTemplate( template_id=x509.ObjectIdentifier("1.2.3.4.5.6.7.8.9.0"), major_version=1, minor_version=None, ) def test_signature(self, backend): cert = _load_cert( os.path.join("x509", "ecdsa_root.pem"), x509.load_pem_x509_certificate, ) assert cert.signature == binascii.unhexlify( b"3065023100adbcf26c3f124ad12d39c30a099773f488368c8827bbe6888d5085" b"a763f99e32de66930ff1ccb1098fdd6cabfa6b7fa0023039665bc2648db89e50" b"dca8d549a2edc7dcd1497f1701b8c8868f4e8c882ba89aa98ac5d100bdf854e2" b"9ae55b7cb32717" ) r, s = decode_dss_signature(cert.signature) assert r == int( "adbcf26c3f124ad12d39c30a099773f488368c8827bbe6888d5085a763f99e32" "de66930ff1ccb1098fdd6cabfa6b7fa0", 16, ) assert s == int( "39665bc2648db89e50dca8d549a2edc7dcd1497f1701b8c8868f4e8c882ba89a" "a98ac5d100bdf854e29ae55b7cb32717", 16, ) def test_tbs_certificate_bytes(self, backend): _skip_curve_unsupported(backend, ec.SECP384R1()) cert = _load_cert( os.path.join("x509", "ecdsa_root.pem"), x509.load_pem_x509_certificate, ) assert cert.tbs_certificate_bytes == binascii.unhexlify( b"308201c5a0030201020210055556bcf25ea43535c3a40fd5ab4572300a06082" b"a8648ce3d0403033061310b300906035504061302555331153013060355040a" b"130c446967694365727420496e6331193017060355040b13107777772e64696" b"769636572742e636f6d3120301e06035504031317446967694365727420476c" b"6f62616c20526f6f74204733301e170d3133303830313132303030305a170d3" b"338303131353132303030305a3061310b300906035504061302555331153013" b"060355040a130c446967694365727420496e6331193017060355040b1310777" b"7772e64696769636572742e636f6d3120301e06035504031317446967694365" b"727420476c6f62616c20526f6f742047333076301006072a8648ce3d0201060" b"52b8104002203620004dda7d9bb8ab80bfb0b7f21d2f0bebe73f3335d1abc34" b"eadec69bbcd095f6f0ccd00bba615b51467e9e2d9fee8e630c17ec0770f5cf8" b"42e40839ce83f416d3badd3a4145936789d0343ee10136c72deae88a7a16bb5" b"43ce67dc23ff031ca3e23ea3423040300f0603551d130101ff040530030101f" b"f300e0603551d0f0101ff040403020186301d0603551d0e04160414b3db48a4" b"f9a1c5d8ae3641cc1163696229bc4bc6" ) assert cert.signature_hash_algorithm is not None public_key = cert.public_key() assert isinstance(public_key, ec.EllipticCurvePublicKey) public_key.verify( cert.signature, cert.tbs_certificate_bytes, ec.ECDSA(cert.signature_hash_algorithm), ) def test_load_ecdsa_no_named_curve(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) cert = _load_cert( os.path.join("x509", "custom", "ec_no_named_curve.pem"), x509.load_pem_x509_certificate, ) # We map explicit parameters to known curves and this cert # contains explicit params for P256, so it should load. pk = cert.public_key() assert isinstance(pk, ec.EllipticCurvePublicKey) assert isinstance(pk.curve, ec.SECP256R1) def test_verify_directly_issued_by_ec(self): issuer_private_key = ec.generate_private_key(ec.SECP256R1()) subject_private_key = ec.generate_private_key(ec.SECP256R1()) ca, cert = _generate_ca_and_leaf( issuer_private_key, subject_private_key ) cert.verify_directly_issued_by(ca) def test_verify_directly_issued_by_ec_bad_sig(self): issuer_private_key = ec.generate_private_key(ec.SECP256R1()) subject_private_key = ec.generate_private_key(ec.SECP256R1()) ca, cert = _generate_ca_and_leaf( issuer_private_key, subject_private_key ) cert_bad_sig = _break_cert_sig(cert) with pytest.raises(InvalidSignature): cert_bad_sig.verify_directly_issued_by(ca)
TestECDSACertificate
python
pallets__werkzeug
src/werkzeug/exceptions.py
{ "start": 25235, "end": 26888 }
class ____: """When passed a dict of code -> exception items it can be used as callable that raises exceptions. If the first argument to the callable is an integer it will be looked up in the mapping, if it's a WSGI application it will be raised in a proxy exception. The rest of the arguments are forwarded to the exception constructor. """ def __init__( self, mapping: dict[int, type[HTTPException]] | None = None, extra: dict[int, type[HTTPException]] | None = None, ) -> None: if mapping is None: mapping = default_exceptions self.mapping = dict(mapping) if extra is not None: self.mapping.update(extra) def __call__( self, code: int | SansIOResponse, *args: t.Any, **kwargs: t.Any ) -> t.NoReturn: from .sansio.response import Response if isinstance(code, Response): raise HTTPException(response=code) if code not in self.mapping: raise LookupError(f"no exception for {code!r}") raise self.mapping[code](*args, **kwargs) def abort(status: int | SansIOResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn: """Raises an :py:exc:`HTTPException` for the given status code or WSGI application. If a status code is given, it will be looked up in the list of exceptions and will raise that exception. If passed a WSGI application, it will wrap it in a proxy WSGI exception and raise that:: abort(404) # 404 Not Found abort(Response('Hello World')) """ _aborter(status, *args, **kwargs) _aborter: Aborter = Aborter()
Aborter
python
Pylons__pyramid
src/pyramid/util.py
{ "start": 1708, "end": 2134 }
class ____: # this is just like reify but does not store the computed result on # the class such that subsequent invocations invoke the callable again def __init__(self, wrapped): self.wrapped = wrapped functools.update_wrapper(self, wrapped) def __get__(self, obj, type=None): if obj is None: # pragma: no cover return self return self.wrapped(obj)
SettableProperty
python
django__django
tests/contenttypes_tests/models.py
{ "start": 1012, "end": 1243 }
class ____(models.Model): """ Fake model not defining ``get_absolute_url`` for ContentTypesTests.test_shortcut_view_without_get_absolute_url() """ name = models.CharField(max_length=30, unique=True)
FooWithoutUrl
python
streamlit__streamlit
lib/tests/streamlit/elements/alert_test.py
{ "start": 2370, "end": 4335 }
class ____(DeltaGeneratorTestCase): """Test ability to marshall Alert proto.""" def test_st_error(self): """Test st.error.""" st.error("some error") el = self.get_delta_from_queue().new_element assert el.alert.body == "some error" assert el.alert.format == Alert.ERROR assert ( el.alert.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert el.alert.width_config.use_stretch def test_st_error_with_icon(self): """Test st.error with icon.""" st.error("some error", icon="😱") el = self.get_delta_from_queue().new_element assert el.alert.body == "some error" assert el.alert.icon == "😱" assert el.alert.format == Alert.ERROR assert ( el.alert.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert el.alert.width_config.use_stretch def test_st_error_with_width_pixels(self): """Test st.error with width in pixels.""" st.error("some error", width=500) el = self.get_delta_from_queue().new_element assert el.alert.body == "some error" assert el.alert.format == Alert.ERROR assert ( el.alert.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert el.alert.width_config.pixel_width == 500 def test_st_error_with_width_stretch(self): """Test st.error with width set to stretch.""" st.error("some error", width="stretch") el = self.get_delta_from_queue().new_element assert el.alert.body == "some error" assert el.alert.format == Alert.ERROR assert ( el.alert.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert el.alert.width_config.use_stretch
StErrorAPITest
python
doocs__leetcode
solution/1300-1399/1317.Convert Integer to the Sum of Two No-Zero Integers/Solution.py
{ "start": 0, "end": 186 }
class ____: def getNoZeroIntegers(self, n: int) -> List[int]: for a in count(1): b = n - a if "0" not in f"{a}{b}": return [a, b]
Solution