language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
mlflow__mlflow
mlflow/genai/judges/tools/list_spans.py
{ "start": 1552, "end": 4462 }
class ____(JudgeTool): """ Tool for listing and analyzing spans within a trace. This tool provides functionality to extract and analyze span information from MLflow traces, including span names, types, durations, and metadata. """ @property def name(self) -> str: return "list_spans" def get_definition(self) -> ToolDefinition: return ToolDefinition( function=FunctionToolDefinition( name="list_spans", description=( "List information about spans within a trace with pagination support. " "Returns span metadata including span_id, name, span_type, timing data " "(start_time_ms, end_time_ms, duration_ms), parent_id, status, and " "attribute_names (list of attribute keys). This provides an overview of " "all spans but does not fetch full span content." ), parameters=ToolParamsSchema( type="object", properties={ "max_results": ParamProperty( type="integer", description="Maximum number of spans to return (default: 100)", ), "page_token": ParamProperty( type="string", description="Token for retrieving the next page of results", ), }, required=[], ), ), type="function", ) def invoke( self, trace: Trace, max_results: int = 100, page_token: str | None = None ) -> ListSpansResult: """ List spans from a trace with pagination support. Args: trace: The MLflow trace object to analyze max_results: Maximum number of spans to return (default: 100) page_token: Token for retrieving the next page of results Returns: ListSpansResult containing spans list and optional next page token """ if not trace or not trace.data or not trace.data.spans: return ListSpansResult(spans=[]) start_index = parse_page_token(page_token) # Get the slice of spans for this page all_spans = trace.data.spans end_index = start_index + max_results page_spans = all_spans[start_index:end_index] # Build span info for this page spans_info = [_create_span_info(span) for span in page_spans] # Determine next page token - only include if there are more pages next_page_token = None if end_index < len(all_spans): next_page_token = create_page_token(end_index) return ListSpansResult(spans=spans_info, next_page_token=next_page_token)
ListSpansTool
python
pypa__pip
src/pip/_vendor/rich/progress.py
{ "start": 19207, "end": 20964 }
class ____(ProgressColumn): """A column with a 'spinner' animation. Args: spinner_name (str, optional): Name of spinner animation. Defaults to "dots". style (StyleType, optional): Style of spinner. Defaults to "progress.spinner". speed (float, optional): Speed factor of spinner. Defaults to 1.0. finished_text (TextType, optional): Text used when task is finished. Defaults to " ". """ def __init__( self, spinner_name: str = "dots", style: Optional[StyleType] = "progress.spinner", speed: float = 1.0, finished_text: TextType = " ", table_column: Optional[Column] = None, ): self.spinner = Spinner(spinner_name, style=style, speed=speed) self.finished_text = ( Text.from_markup(finished_text) if isinstance(finished_text, str) else finished_text ) super().__init__(table_column=table_column) def set_spinner( self, spinner_name: str, spinner_style: Optional[StyleType] = "progress.spinner", speed: float = 1.0, ) -> None: """Set a new spinner. Args: spinner_name (str): Spinner name, see python -m rich.spinner. spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner". speed (float, optional): Speed factor of spinner. Defaults to 1.0. """ self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed) def render(self, task: "Task") -> RenderableType: text = ( self.finished_text if task.finished else self.spinner.render(task.get_time()) ) return text
SpinnerColumn
python
getsentry__sentry
src/sentry/testutils/cases.py
{ "start": 35150, "end": 35846 }
class ____(TestCase): @cached_property def runner(self) -> CliRunner: return CliRunner() @property def command(self): raise NotImplementedError(f"implement for {type(self).__module__}.{type(self).__name__}") default_args: list[str] = [] def invoke(self, *args, **kwargs): args += tuple(self.default_args) return self.runner.invoke(self.command, args, obj={}, **kwargs) @pytest.mark.usefixtures("browser") # Assume acceptance tests are not using self-hosted, since most devs are developing for SaaS and # generally self-hosted specific pages should not appear during acceptance tests @override_settings(SENTRY_SELF_HOSTED=False)
CliTestCase
python
squidfunk__mkdocs-material
material/plugins/blog/author.py
{ "start": 1425, "end": 1672 }
class ____(Config): name = Type(str) description = Type(str) avatar = Type(str) slug = Optional(Type(str)) url = Optional(Type(str)) # ----------------------------------------------------------------------------- # Authors
Author
python
streamlit__streamlit
lib/tests/streamlit/components_test.py
{ "start": 2618, "end": 10306 }
class ____(unittest.TestCase): """Test component declaration.""" def setUp(self) -> None: config = RuntimeConfig( script_path="mock/script/path.py", command_line=None, component_registry=LocalComponentRegistry(), media_file_storage=MemoryMediaFileStorage("/mock/media"), uploaded_file_manager=MemoryUploadedFileManager("/mock/upload"), ) self.runtime = Runtime(config) # declare_component needs a script_run_ctx to be set add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx()) def tearDown(self) -> None: Runtime._instance = None def mock_isdir(self, path: str) -> bool: return path == PATH or path == os.path.abspath(PATH) def test_name(self): """Test component name generation""" # Test a component defined in a module with no package component = components.declare_component("foo", url=URL) assert component.name == "tests.streamlit.components_test.foo" # Test a component defined in __init__.py from tests.streamlit.component_test_data import component as init_component assert init_component.name == "tests.streamlit.component_test_data.foo" # Test a component defined in a module within a package from tests.streamlit.component_test_data.outer_module import ( component as outer_module_component, ) assert ( outer_module_component.name == "tests.streamlit.component_test_data.outer_module.foo" ) # Test a component defined in module within a nested package from tests.streamlit.component_test_data.nested.inner_module import ( component as inner_module_component, ) assert ( inner_module_component.name == "tests.streamlit.component_test_data.nested.inner_module.foo" ) def test_only_path_str(self): """Succeed when a path is provided via str.""" with mock.patch( "streamlit.components.v1.component_registry.os.path.isdir", side_effect=self.mock_isdir, ): component = components.declare_component("test", path=PATH) assert component.path == PATH assert component.url is None assert ( ComponentRegistry.instance().get_component_path(component.name) == component.abspath ) def test_only_path_pathlib(self): """Succeed when a path is provided via Path.""" with mock.patch( "streamlit.components.v1.component_registry.os.path.isdir", side_effect=self.mock_isdir, ): component = components.declare_component("test", path=Path(PATH)) assert component.path == PATH assert component.url is None assert ( ComponentRegistry.instance().get_component_path(component.name) == component.abspath ) def test_only_url(self): """Succeed when a URL is provided.""" component = components.declare_component("test", url=URL) assert component.url == URL assert component.path is None assert ( ComponentRegistry.instance().get_component_path("components_test") == component.abspath ) def test_both_path_and_url_ok(self): with mock.patch( "streamlit.components.v1.component_registry.os.path.isdir", side_effect=self.mock_isdir, ): component = components.declare_component("test", path=PATH, url=URL) assert component.url == URL assert component.path == PATH @patch_config_options( {"server.customComponentBaseUrlPath": "https://example.com/my/custom/component"} ) def test_url_via_base_path_config(self): with mock.patch( "streamlit.components.v1.component_registry.os.path.isdir", side_effect=self.mock_isdir, ): component = components.declare_component("test", path=PATH) assert ( component.url == "https://example.com/my/custom/component/tests.streamlit.components_test.test/" ) assert component.path == PATH def test_no_path_and_no_url(self): """Fail if neither path nor url is provided.""" with pytest.raises(StreamlitAPIException) as exception_message: components.declare_component("test", path=None, url=None) assert str(exception_message.value) == "Either 'path' or 'url' must be set." def test_module_name_not_none(self): caller_frame = inspect.currentframe() assert caller_frame is not None module_name = _get_module_name(caller_frame=caller_frame) component = components.declare_component("test", url=URL) assert ( ComponentRegistry.instance().get_module_name(component.name) == module_name ) def test_get_registered_components(self): component1 = components.declare_component("test1", url=URL) component2 = components.declare_component("test2", url=URL) component3 = components.declare_component("test3", url=URL) expected_registered_component_names = { component1.name, component2.name, component3.name, } registered_components = ComponentRegistry.instance().get_components() assert len(registered_components) == 3 registered_component_names = { component.name for component in registered_components } assert registered_component_names == expected_registered_component_names def test_when_registry_not_explicitly_initialized_return_defaultregistry(self): ComponentRegistry._instance = None components.declare_component("test", url=URL) assert isinstance(ComponentRegistry.instance(), LocalComponentRegistry) @patch("streamlit.components.v1.component_registry.inspect.currentframe") def test_declare_component_raises_runtime_error_if_current_frame_is_none( self, mock_currentframe ): """Test that declare_component raises RuntimeError if inspect.currentframe returns None.""" mock_currentframe.return_value = None with pytest.raises( RuntimeError, match=r"current_frame is None. This should never happen." ): components.declare_component("test_component", url="http://example.com") @patch("streamlit.components.v1.component_registry.inspect.currentframe") def test_declare_component_raises_runtime_error_if_caller_frame_is_none( self, mock_currentframe ): """Test that declare_component raises RuntimeError if inspect.currentframe().f_back is None.""" mock_frame = MagicMock() mock_frame.f_back = None mock_currentframe.return_value = mock_frame with pytest.raises( RuntimeError, match=r"caller_frame is None. This should never happen." ): components.declare_component("test_component", url="http://example.com") @patch("streamlit.components.v1.component_registry.inspect.getmodule") def test_declare_component_raises_runtime_error_if_module_is_none( self, mock_getmodule ): """Test that declare_component raises RuntimeError if inspect.getmodule returns None.""" mock_getmodule.return_value = None with pytest.raises( RuntimeError, match=r"module is None. This should never happen." ): components.declare_component("test_component", url="http://example.com")
DeclareComponentTest
python
pytorch__pytorch
torch/_dynamo/variables/lists.py
{ "start": 1672, "end": 12635 }
class ____(VariableTracker): @staticmethod def cls_for_instance(obj: Any) -> type["BaseListVariable"]: return BaseListVariable.cls_for(type(obj)) @staticmethod def cls_for(obj: Any) -> type: return { iter: ListIteratorVariable, list: ListVariable, slice: SliceVariable, torch.Size: SizeVariable, tuple: TupleVariable, odict_values: ListVariable, torch.nn.ParameterList: ListVariable, torch.nn.ModuleList: ListVariable, collections.deque: DequeVariable, }[obj] def __init__( self, items: list[VariableTracker], **kwargs: Any, ) -> None: super().__init__(**kwargs) assert isinstance(items, list) assert all(isinstance(x, VariableTracker) for x in items) self.items: list[VariableTracker] = items def _as_proxy(self) -> list[Any]: return [x.as_proxy() for x in self.items] def modified( self, items: list[VariableTracker], **kwargs: Any ) -> "BaseListVariable": return type(self)(items, **kwargs) @property def value(self) -> Any: return self.as_python_constant() def debug_repr_helper(self, prefix: str, suffix: str) -> str: return prefix + ", ".join(i.debug_repr() for i in self.items) + suffix def as_python_constant(self) -> Any: return self.python_type()([x.as_python_constant() for x in self.items]) def as_proxy(self) -> Any: assert self.python_type() is not SizeVariable return self.python_type()(self._as_proxy()) def getitem_const( self, tx: "InstructionTranslator", arg: VariableTracker ) -> VariableTracker: from .tensor import SymNodeVariable if isinstance(arg, SymNodeVariable): index = arg.sym_num else: index = arg.as_python_constant() if isinstance(index, slice): if index.step == 0: msg = ConstantVariable.create("slice step cannot be zero") raise_observed_exception(ValueError, tx, args=[msg]) # Set source to None because slicing a list gives a new local return self.clone( items=self.items[index], source=None, mutation_type=ValueMutationNew() if self.mutation_type else None, ) else: assert isinstance(index, (int, torch.SymInt)) try: return self.items[index] except IndexError: raise_observed_exception( IndexError, tx, args=["list index out of range"] ) def unpack_var_sequence(self, tx: "InstructionTranslator") -> list[VariableTracker]: return list(self.items) def call_tree_map_branch( self, tx: "InstructionTranslator", tree_map_fn: UserFunctionVariable, map_fn: VariableTracker, rest: Sequence[VariableTracker], tree_map_kwargs: dict[str, VariableTracker], ) -> VariableTracker: if not isinstance(self, (ListVariable, TupleVariable)): return self._tree_map_fallback( tx, tree_map_fn, map_fn, rest, tree_map_kwargs ) other_lists: list[BaseListVariable] = [] for candidate in rest: if ( not isinstance(candidate, BaseListVariable) or len(candidate.items) != len(self.items) or self.python_type() != candidate.python_type() ): return self._tree_map_fallback( tx, tree_map_fn, map_fn, rest, tree_map_kwargs ) other_lists.append(candidate) new_items: list[VariableTracker] = [] for idx, item in enumerate(self.items): sibling_leaves = [candidate.items[idx] for candidate in other_lists] new_items.append( item.call_tree_map( tx, tree_map_fn, map_fn, sibling_leaves, tree_map_kwargs, ) ) return self.clone( items=new_items, source=None, mutation_type=ValueMutationNew(), ) def call_method( self, tx: "InstructionTranslator", name: str, args: list[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: if name == "__getitem__": from .tensor import TensorVariable if kwargs or len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) if isinstance(args[0], TensorVariable): value = get_fake_value(args[0].as_proxy().node, tx) if value.constant is not None and value.constant.numel() == 1: value = variables.ConstantVariable.create(value.constant.item()) else: unimplemented( gb_type="Indexing list with non-scalar tensor", context=f"call_method {self} {name} {args} {kwargs}", explanation=( "Attempted to index list-like object with tensor with > 1 element." ), hints=[*graph_break_hints.USER_ERROR], ) else: value = args[0] if value.python_type() not in (int, slice): msg = f"indices must be integers or slices, not {value.python_type()}" raise_observed_exception(TypeError, tx, args=[ConstantVariable(msg)]) return self.getitem_const(tx, value) elif name == "__contains__": if kwargs or len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) return iter_contains(self.unpack_var_sequence(tx), args[0], tx) elif name == "index": if not len(args): raise_args_mismatch( tx, name, "0 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) return tx.inline_user_function_return( VariableTracker.build(tx, polyfills.index), [self] + list(args), kwargs, ) elif name == "count": if len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) return VariableTracker.build(tx, operator.countOf).call_function( tx, [self, args[0]], kwargs, ) elif name in ("__add__", "__iadd__"): if kwargs or len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) if type(self) is not type(args[0]): tp_name = self.python_type_name() other = args[0].python_type_name() msg_vt = ConstantVariable.create( f'can only concatenate {tp_name} (not "{other}") to {tp_name}' ) raise_observed_exception(TypeError, tx, args=[msg_vt]) if name == "__add__": return type(self)(self.items + args[0].items, source=self.source) # type: ignore[attr-defined] else: self.items += args[0].items # type: ignore[attr-defined] return self elif name in ("__mul__", "__imul__"): if kwargs or len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) if not (args[0].is_python_constant() and args[0].python_type() is int): msg_vt = ConstantVariable.create( f"can't multiply sequence by non-int type of '{args[0].python_type_name()}'" ) raise_observed_exception(TypeError, tx, args=[msg_vt]) val = args[0].as_python_constant() if name == "__mul__": return type(self)(self.items * val, source=self.source) else: self.items *= val return self elif name in cmp_name_to_op_mapping: if len(args) != 1: raise_args_mismatch( tx, name, "1 args and 0 kwargs", f"{len(args)} args and {len(kwargs)} kwargs", ) left = self right = args[0] # TODO this type check logic mirrors the following # https://github.com/python/cpython/blob/a1c52d1265c65bcf0d9edf87e143843ad54f9b8f/Objects/object.c#L991-L1007 # But we should probably move it up the stack to so that we don't # need to duplicate it for different VTs. if not isinstance(left, BaseListVariable) or not isinstance( right, BaseListVariable ): if name == "__eq__": return variables.BuiltinVariable(operator.is_).call_function( tx, (left, right), {} ) elif name == "__ne__": return variables.BuiltinVariable(operator.is_not).call_function( tx, (left, right), {} ) else: op_str = cmp_name_to_op_str_mapping[name] left_ty = left.python_type_name() right_ty = right.python_type_name() msg = f"{op_str} not supported between instances of '{left_ty}' and '{right_ty}'" raise_observed_exception(TypeError, tx, args=[msg]) return variables.UserFunctionVariable(polyfills.list_cmp).call_function( tx, [variables.BuiltinVariable(cmp_name_to_op_mapping[name]), left, right], {}, ) elif name == "__iter__": return ListIteratorVariable(self.items, mutation_type=ValueMutationNew()) return super().call_method(tx, name, args, kwargs)
BaseListVariable
python
dagster-io__dagster
python_modules/automation/automation/dagster_docs/watcher.py
{ "start": 2733, "end": 5060 }
class ____(FileSystemEventHandler): """File handler that respects .gitignore patterns.""" def __init__(self, root_path: Path, parent_watcher: "ChangedFilesWatcher"): self.root_path = root_path self.parent_watcher = parent_watcher self.gitignore_spec = self._load_gitignore_patterns() def _load_gitignore_patterns(self) -> pathspec.PathSpec: """Load .gitignore patterns using pathspec library.""" gitignore_path = self.root_path / ".gitignore" if not gitignore_path.exists(): return pathspec.PathSpec.from_lines("gitwildmatch", []) with open(gitignore_path) as f: return pathspec.PathSpec.from_lines("gitwildmatch", f.readlines()) def _should_ignore_path(self, file_path: Path) -> bool: """Check if path should be ignored based on .gitignore.""" # Get relative path from repo root try: rel_path = file_path.relative_to(self.root_path) return self.gitignore_spec.match_file(str(rel_path)) except ValueError: return True # Outside repo, ignore def on_modified(self, event) -> None: """Handle file modification events.""" if event.is_directory: return file_path = Path(str(event.src_path)) # Only watch .py files that aren't gitignored if file_path.suffix == ".py" and not self._should_ignore_path(file_path): self.parent_watcher._on_file_changed(file_path) # noqa: SLF001 def on_moved(self, event) -> None: """Handle file move events (common with atomic saves).""" if event.is_directory: return # Check if the destination is a .py file and not gitignored dest_path = Path(str(event.dest_path)) if dest_path.suffix == ".py" and not self._should_ignore_path(dest_path): self.parent_watcher._on_file_changed(dest_path) # noqa: SLF001 def on_created(self, event) -> None: """Handle file creation events (some editors recreate files).""" if event.is_directory: return file_path = Path(str(event.src_path)) if file_path.suffix == ".py" and not self._should_ignore_path(file_path): self.parent_watcher._on_file_changed(file_path) # noqa: SLF001
GitignoreAwareHandler
python
doocs__leetcode
solution/2000-2099/2031.Count Subarrays With More Ones Than Zeros/Solution.py
{ "start": 0, "end": 390 }
class ____: __slots__ = ["n", "c"] def __init__(self, n: int): self.n = n self.c = [0] * (n + 1) def update(self, x: int, v: int): while x <= self.n: self.c[x] += v x += x & -x def query(self, x: int) -> int: s = 0 while x: s += self.c[x] x -= x & -x return s
BinaryIndexedTree
python
django__django
tests/model_fields/models.py
{ "start": 3149, "end": 3250 }
class ____(models.Model): s = models.SlugField(max_length=255, allow_unicode=True)
UnicodeSlugField
python
davidhalter__parso
parso/python/tree.py
{ "start": 29563, "end": 29692 }
class ____(KeywordStatement): __slots__ = () @property def assertion(self): return self.children[1]
AssertStmt
python
walkccc__LeetCode
solutions/527. Word Abbreviation/527.py
{ "start": 0, "end": 925 }
class ____: def wordsAbbreviation(self, words: list[str]) -> list[str]: n = len(words) def getAbbrev(s: str, prefixIndex: int) -> str: n = len(s) num = n - (prefixIndex + 1) - 1 numLength = 1 if num < 10 else (2 if num < 100 else 3) abbrevLength = (prefixIndex + 1) + numLength + 1 if abbrevLength >= n: return s return s[:prefixIndex + 1] + str(num) + s[-1] ans = [getAbbrev(word, 0) for word in words] # prefix[i] := ans[i] takes words[i][0..prefix[i]] prefix = [0] * n for i in range(n): while True: dupeIndices = [] for j in range(i + 1, n): if ans[i] == ans[j]: dupeIndices.append(j) if not dupeIndices: break dupeIndices.append(i) for index in dupeIndices: prefix[index] += 1 ans[index] = getAbbrev(words[index], prefix[index]) return ans
Solution
python
sphinx-doc__sphinx
sphinx/builders/linkcheck.py
{ "start": 1986, "end": 2144 }
class ____: def __repr__(self) -> str: return '_SENTINEL_LAR' def __reduce__(self) -> str: return self.__class__.__name__
_SENTINEL_LAR
python
numpy__numpy
numpy/lib/tests/test_recfunctions.py
{ "start": 32751, "end": 39991 }
class ____: def _create_arrays(self): a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('d', int)]) return a, b def test_inner_join(self): # Basic test of join_by a, b = self._create_arrays() test = join_by('a', a, b, jointype='inner') control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), (9, 59, 69, 109, 104)], dtype=[('a', int), ('b1', int), ('b2', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_join(self): a, b = self._create_arrays() # Fixme, this test is broken #test = join_by(('a', 'b'), a, b) #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), # (7, 57, 107, 102), (8, 58, 108, 103), # (9, 59, 109, 104)], # dtype=[('a', int), ('b', int), # ('c', int), ('d', int)]) #assert_equal(test, control) join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), (9, 59, 109, 104)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) def test_join_subdtype(self): # tests the bug in https://stackoverflow.com/q/44769632/102441 foo = np.array([(1,)], dtype=[('key', int)]) bar = np.array([(1, np.array([1, 2, 3]))], dtype=[('key', int), ('value', 'uint16', 3)]) res = join_by('key', foo, bar) assert_equal(res, bar.view(ma.MaskedArray)) def test_outer_join(self): a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'outer') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (5, 65, -1, 100), (6, 56, 106, -1), (6, 66, -1, 101), (7, 57, 107, -1), (7, 67, -1, 102), (8, 58, 108, -1), (8, 68, -1, 103), (9, 59, 109, -1), (9, 69, -1, 104), (10, 70, -1, 105), (11, 71, -1, 106), (12, 72, -1, 107), (13, 73, -1, 108), (14, 74, -1, 109)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 0, 1), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0), (0, 0, 1, 0)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_leftouter_join(self): a, b = self._create_arrays() test = join_by(('a', 'b'), a, b, 'leftouter') control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), (2, 52, 102, -1), (3, 53, 103, -1), (4, 54, 104, -1), (5, 55, 105, -1), (6, 56, 106, -1), (7, 57, 107, -1), (8, 58, 108, -1), (9, 59, 109, -1)], mask=[(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1)], dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) assert_equal(test, control) def test_different_field_order(self): # gh-8940 a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) # this should not give a FutureWarning: j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) def test_duplicate_keys(self): a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) def test_same_name_different_dtypes_key(self): a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) expected_dtype = np.dtype([ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_same_name_different_dtypes(self): # gh-9338 a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')]) b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')]) expected_dtype = np.dtype([ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')]) a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype) b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype) res = join_by('key', a, b) assert_equal(res.dtype, expected_dtype) def test_subarray_key(self): a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')]) a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype) b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')]) b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype) expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')]) expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype) res = join_by('pos', a, b) assert_equal(res.dtype, expected_dtype) assert_equal(res, expected) def test_padded_dtype(self): dt = np.dtype('i1,f4', align=True) dt.names = ('k', 'v') assert_(len(dt.descr), 3) # padding field is inserted a = np.array([(1, 3), (3, 2)], dt) b = np.array([(1, 1), (2, 2)], dt) res = join_by('k', a, b) # no padding fields remain expected_dtype = np.dtype([ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4') ]) assert_equal(res.dtype, expected_dtype)
TestJoinBy
python
doocs__leetcode
solution/0000-0099/0090.Subsets II/Solution.py
{ "start": 0, "end": 462 }
class ____: def subsetsWithDup(self, nums: List[int]) -> List[List[int]]: def dfs(i: int): if i == len(nums): ans.append(t[:]) return t.append(nums[i]) dfs(i + 1) x = t.pop() while i + 1 < len(nums) and nums[i + 1] == x: i += 1 dfs(i + 1) nums.sort() ans = [] t = [] dfs(0) return ans
Solution
python
pennersr__django-allauth
allauth/headless/mfa/inputs.py
{ "start": 1802, "end": 1886 }
class ____(AuthenticateWebAuthnForm, inputs.Input): pass
AuthenticateWebAuthnInput
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/events.py
{ "start": 5314, "end": 5379 }
class ____(CollectionEndEvent): __slots__ = ()
SequenceEndEvent
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 100585, "end": 100827 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) def forward(self, indices): return self.emb(indices)
EmbeddingModule
python
fastapi__sqlmodel
docs_src/tutorial/fastapi/teams/tutorial001_py39.py
{ "start": 1058, "end": 4871 }
class ____(SQLModel): name: Optional[str] = None secret_name: Optional[str] = None age: Optional[int] = None team_id: Optional[int] = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" connect_args = {"check_same_thread": False} engine = create_engine(sqlite_url, echo=True, connect_args=connect_args) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def get_session(): with Session(engine) as session: yield session app = FastAPI() @app.on_event("startup") def on_startup(): create_db_and_tables() @app.post("/heroes/", response_model=HeroPublic) def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate): db_hero = Hero.model_validate(hero) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.get("/heroes/", response_model=list[HeroPublic]) def read_heroes( *, session: Session = Depends(get_session), offset: int = 0, limit: int = Query(default=100, le=100), ): heroes = session.exec(select(Hero).offset(offset).limit(limit)).all() return heroes @app.get("/heroes/{hero_id}", response_model=HeroPublic) def read_hero(*, session: Session = Depends(get_session), hero_id: int): hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") return hero @app.patch("/heroes/{hero_id}", response_model=HeroPublic) def update_hero( *, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate ): db_hero = session.get(Hero, hero_id) if not db_hero: raise HTTPException(status_code=404, detail="Hero not found") hero_data = hero.model_dump(exclude_unset=True) db_hero.sqlmodel_update(hero_data) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.delete("/heroes/{hero_id}") def delete_hero(*, session: Session = Depends(get_session), hero_id: int): hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") session.delete(hero) session.commit() return {"ok": True} @app.post("/teams/", response_model=TeamPublic) def create_team(*, session: Session = Depends(get_session), team: TeamCreate): db_team = Team.model_validate(team) session.add(db_team) session.commit() session.refresh(db_team) return db_team @app.get("/teams/", response_model=list[TeamPublic]) def read_teams( *, session: Session = Depends(get_session), offset: int = 0, limit: int = Query(default=100, le=100), ): teams = session.exec(select(Team).offset(offset).limit(limit)).all() return teams @app.get("/teams/{team_id}", response_model=TeamPublic) def read_team(*, team_id: int, session: Session = Depends(get_session)): team = session.get(Team, team_id) if not team: raise HTTPException(status_code=404, detail="Team not found") return team @app.patch("/teams/{team_id}", response_model=TeamPublic) def update_team( *, session: Session = Depends(get_session), team_id: int, team: TeamUpdate, ): db_team = session.get(Team, team_id) if not db_team: raise HTTPException(status_code=404, detail="Team not found") team_data = team.model_dump(exclude_unset=True) db_team.sqlmodel_update(team_data) session.add(db_team) session.commit() session.refresh(db_team) return db_team @app.delete("/teams/{team_id}") def delete_team(*, session: Session = Depends(get_session), team_id: int): team = session.get(Team, team_id) if not team: raise HTTPException(status_code=404, detail="Team not found") session.delete(team) session.commit() return {"ok": True}
HeroUpdate
python
GoogleCloudPlatform__python-docs-samples
dialogflow/streaming_transcription.py
{ "start": 1740, "end": 9488 }
class ____: """Opens a recording stream as a generator yielding the audio chunks.""" def __init__(self, rate, chunk_size): self._rate = rate self.chunk_size = chunk_size self._num_channels = 1 self._buff = queue.Queue() self.is_final = False self.closed = True # Count the number of times the stream analyze content restarts. self.restart_counter = 0 self.last_start_time = 0 # Time end of the last is_final in millisec since last_start_time. self.is_final_offset = 0 # Save the audio chunks generated from the start of the audio stream for # replay after restart. self.audio_input_chunks = [] self.new_stream = True self._audio_interface = pyaudio.PyAudio() self._audio_stream = self._audio_interface.open( format=pyaudio.paInt16, channels=self._num_channels, rate=self._rate, input=True, frames_per_buffer=self.chunk_size, # Run the audio stream asynchronously to fill the buffer object. # This is necessary so that the input device's buffer doesn't # overflow while the calling thread makes network requests, etc. stream_callback=self._fill_buffer, ) def __enter__(self): self.closed = False return self def __exit__(self, type, value, traceback): self._audio_stream.stop_stream() self._audio_stream.close() self.closed = True # Signal the generator to terminate so that the client's # streaming_recognize method will not block the process termination. self._buff.put(None) self._audio_interface.terminate() def _fill_buffer(self, in_data, *args, **kwargs): """Continuously collect data from the audio stream, into the buffer in chunksize.""" self._buff.put(in_data) return None, pyaudio.paContinue def generator(self): """Stream Audio from microphone to API and to local buffer""" try: # Handle restart. print("restart generator") # Flip the bit of is_final so it can continue stream. self.is_final = False total_processed_time = self.last_start_time + self.is_final_offset processed_bytes_length = ( int(total_processed_time * SAMPLE_RATE * 16 / 8) / 1000 ) self.last_start_time = total_processed_time # Send out bytes stored in self.audio_input_chunks that is after the # processed_bytes_length. if processed_bytes_length != 0: audio_bytes = b"".join(self.audio_input_chunks) # Lookback for unprocessed audio data. need_to_process_length = min( int(len(audio_bytes) - processed_bytes_length), int(MAX_LOOKBACK * SAMPLE_RATE * 16 / 8), ) # Note that you need to explicitly use `int` type for substring. need_to_process_bytes = audio_bytes[(-1) * need_to_process_length :] yield need_to_process_bytes while not self.closed and not self.is_final: data = [] # Use a blocking get() to ensure there's at least one chunk of # data, and stop iteration if the chunk is None, indicating the # end of the audio stream. chunk = self._buff.get() if chunk is None: return data.append(chunk) # Now try to the rest of chunks if there are any left in the _buff. while True: try: chunk = self._buff.get(block=False) if chunk is None: return data.append(chunk) except queue.Empty: break self.audio_input_chunks.extend(data) if data: yield b"".join(data) finally: print("Stop generator") def main(): """start bidirectional streaming from microphone input to Dialogflow API""" # Create conversation. conversation = conversation_management.create_conversation( project_id=PROJECT_ID, conversation_profile_id=CONVERSATION_PROFILE_ID ) conversation_id = conversation.name.split("conversations/")[1].rstrip() # Create end user participant. end_user = participant_management.create_participant( project_id=PROJECT_ID, conversation_id=conversation_id, role="END_USER" ) participant_id = end_user.name.split("participants/")[1].rstrip() mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE) print(mic_manager.chunk_size) sys.stdout.write(YELLOW) sys.stdout.write('\nListening, say "Quit" or "Exit" to stop.\n\n') sys.stdout.write("End (ms) Transcript Results/Status\n") sys.stdout.write("=====================================================\n") with mic_manager as stream: while not stream.closed: terminate = False while not terminate: try: print(f"New Streaming Analyze Request: {stream.restart_counter}") stream.restart_counter += 1 # Send request to streaming and get response. responses = participant_management.analyze_content_audio_stream( conversation_id=conversation_id, participant_id=participant_id, sample_rate_herz=SAMPLE_RATE, stream=stream, timeout=RESTART_TIMEOUT, language_code="en-US", single_utterance=False, ) # Now, print the final transcription responses to user. for response in responses: if response.message: print(response) if response.recognition_result.is_final: print(response) # offset return from recognition_result is relative # to the beginning of audio stream. offset = response.recognition_result.speech_end_offset stream.is_final_offset = int( offset.seconds * 1000 + offset.microseconds / 1000 ) transcript = response.recognition_result.transcript # Half-close the stream with gRPC (in Python just stop yielding requests) stream.is_final = True # Exit recognition if any of the transcribed phrase could be # one of our keywords. if re.search(r"\b(exit|quit)\b", transcript, re.I): sys.stdout.write(YELLOW) sys.stdout.write("Exiting...\n") terminate = True stream.closed = True break except DeadlineExceeded: print("Deadline Exceeded, restarting.") if terminate: conversation_management.complete_conversation( project_id=PROJECT_ID, conversation_id=conversation_id ) break if __name__ == "__main__": main()
ResumableMicrophoneStream
python
pytorch__pytorch
torch/nn/modules/pooling.py
{ "start": 48874, "end": 51461 }
class ____(_LPPoolNd): r"""Applies a 3D power-average pooling over an input signal composed of several input planes. On each window, the function computed is: .. math:: f(X) = \sqrt[p]{\sum_{x \in X} x^{p}} - At p = :math:`\infty`, one gets Max Pooling - At p = 1, one gets Sum Pooling (which is proportional to average pooling) The parameters :attr:`kernel_size`, :attr:`stride` can either be: - a single ``int`` -- in which case the same value is used for the height, width and depth dimension - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, the second `int` for the height dimension and the third `int` for the width dimension .. note:: If the sum to the power of `p` is zero, the gradient of this function is not defined. This implementation will set the gradient to zero in this case. Args: kernel_size: the size of the window stride: the stride of the window. Default value is :attr:`kernel_size` ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape Note: When :attr:`ceil_mode` is ``True``, sliding windows may go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. Shape: - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where .. math:: D_{out} = \left\lfloor\frac{D_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor .. math:: H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor Examples:: >>> # power-2 pool of square window of size=3, stride=2 >>> m = nn.LPPool3d(2, 3, stride=2) >>> # pool of non-square window of power 1.2 >>> m = nn.LPPool3d(1.2, (3, 2, 2), stride=(2, 1, 2)) >>> input = torch.randn(20, 16, 50, 44, 31) >>> output = m(input) """ kernel_size: _size_3_t stride: _size_3_t def forward(self, input: Tensor) -> Tensor: """Runs the forward pass.""" return F.lp_pool3d( input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode )
LPPool3d
python
django__django
tests/postgres_tests/test_hstore.py
{ "start": 9945, "end": 12163 }
class ____(PostgreSQLSimpleTestCase): field_values = [ ({"a": "b"}, [{"a": "b"}, {"b": "a"}]), ( {"все": "Трурль и Клапауций"}, [{"Трурль": "Клапауций"}, {"Клапауций": "Трурль"}], ), ] @staticmethod def create_json_data(field_value, array_field_value): fields = { "field": json.dumps(field_value, ensure_ascii=False), "array_field": json.dumps( [json.dumps(item, ensure_ascii=False) for item in array_field_value], ensure_ascii=False, ), } return json.dumps( [{"model": "postgres_tests.hstoremodel", "pk": None, "fields": fields}] ) def test_dumping(self): for field_value, array_field_value in self.field_values: with self.subTest(field_value=field_value, array_value=array_field_value): instance = HStoreModel(field=field_value, array_field=array_field_value) data = serializers.serialize("json", [instance]) json_data = self.create_json_data(field_value, array_field_value) self.assertEqual(json.loads(data), json.loads(json_data)) def test_loading(self): for field_value, array_field_value in self.field_values: with self.subTest(field_value=field_value, array_value=array_field_value): json_data = self.create_json_data(field_value, array_field_value) instance = list(serializers.deserialize("json", json_data))[0].object self.assertEqual(instance.field, field_value) self.assertEqual(instance.array_field, array_field_value) def test_roundtrip_with_null(self): for field_value in [ {"a": "b", "c": None}, {"Енеїда": "Ти знаєш, він який суціга", "Зефір": None}, ]: with self.subTest(field_value=field_value): instance = HStoreModel(field=field_value) data = serializers.serialize("json", [instance]) new_instance = list(serializers.deserialize("json", data))[0].object self.assertEqual(instance.field, new_instance.field)
TestSerialization
python
facebook__pyre-check
client/command_arguments.py
{ "start": 9299, "end": 9916 }
class ____: watchman_root: Optional[Path] = None project_name: Optional[str] = None preset: Optional[str] = None cache_critical_files: List[str] = field(default_factory=list) def serialize(self) -> Dict[str, Any]: return { "watchman_root": ( str(self.watchman_root) if self.watchman_root is not None else None ), "project_name": self.project_name, "preset": self.preset, "cache_critical_files": self.cache_critical_files, } @dataclasses_merge.dataclass_merge @dataclass(frozen=True)
PysaSavedStateArguments
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 392798, "end": 393440 }
class ____(sgqlc.types.Interface): """Metadata for an audit entry containing enterprise account information. """ __schema__ = github_schema __field_names__ = ("enterprise_resource_path", "enterprise_slug", "enterprise_url") enterprise_resource_path = sgqlc.types.Field(URI, graphql_name="enterpriseResourcePath") """The HTTP path for this enterprise.""" enterprise_slug = sgqlc.types.Field(String, graphql_name="enterpriseSlug") """The slug of the enterprise.""" enterprise_url = sgqlc.types.Field(URI, graphql_name="enterpriseUrl") """The HTTP URL for this enterprise."""
EnterpriseAuditEntryData
python
Textualize__textual
src/textual/widgets/_markdown.py
{ "start": 22573, "end": 22647 }
class ____(MarkdownBlock): """A table data Markdown block."""
MarkdownTD
python
sympy__sympy
sympy/core/tests/test_constructor_postprocessor.py
{ "start": 155, "end": 711 }
class ____(Symbol): # Test class for a symbol that can only appear once in a `Mul` expression. pass Basic._constructor_postprocessor_mapping[SymbolInMulOnce] = { "Mul": [lambda x: x], "Pow": [lambda x: x.base if isinstance(x.base, SymbolInMulOnce) else x], "Add": [lambda x: x], } def _postprocess_SymbolRemovesOtherSymbols(expr): args = tuple(i for i in expr.args if not isinstance(i, Symbol) or isinstance(i, SymbolRemovesOtherSymbols)) if args == expr.args: return expr return Mul.fromiter(args)
SymbolInMulOnce
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_embed_image10.py
{ "start": 381, "end": 1398 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("embed_image10.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.embed_image( 0, 0, self.image_dir + "red.png", {"url": "http://www.cpan.org/"} ) workbook.close() self.assertExcelEqual() def test_create_file_with_image_and_url_objects(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() image = Image(self.image_dir + "red.png") image.url = Url("http://www.cpan.org/") worksheet.embed_image(0, 0, image) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/traditional_relationship.py
{ "start": 1114, "end": 3158 }
class ____(Base): __tablename__ = "address" id = mapped_column(Integer, primary_key=True) user_id = mapped_column(ForeignKey("user.id")) email = mapped_column(String, nullable=False) user_style_one = relationship(User) user_style_one_typed: Mapped[User] = relationship(User) user_style_two = relationship("User") user_style_two_typed: Mapped["User"] = relationship("User") # this is obviously not correct relationally but want to see the typing # work out user_style_three: Mapped[List[User]] = relationship(User) user_style_four: Mapped[List[User]] = relationship("User") user_style_five = relationship(User, collection_class=set) user_fk_style_one: Mapped[List[User]] = relationship( foreign_keys="Address.user_id" ) user_fk_style_two: Mapped[List[User]] = relationship( foreign_keys=lambda: Address.user_id ) user_fk_style_three: Mapped[List[User]] = relationship( foreign_keys=[user_id] ) user_pj_style_one: Mapped[List[User]] = relationship( primaryjoin=user_id == User.id ) user_pj_style_two: Mapped[List[User]] = relationship( primaryjoin=lambda: Address.user_id == User.id ) user_pj_style_three: Mapped[List[User]] = relationship( primaryjoin="Address.user_id == User.id" ) if typing.TYPE_CHECKING: assert_type(User.addresses_style_one, InstrumentedAttribute[list[Address]]) assert_type(User.addresses_style_two, InstrumentedAttribute[set[Address]]) assert_type(Address.user_style_one, InstrumentedAttribute[Any]) assert_type(Address.user_style_one_typed, InstrumentedAttribute[User]) assert_type(Address.user_style_two, InstrumentedAttribute[Any]) assert_type(Address.user_style_two_typed, InstrumentedAttribute[User]) assert_type(Address.user_style_three, InstrumentedAttribute[list[User]]) assert_type(Address.user_style_four, InstrumentedAttribute[list[User]]) assert_type(Address.user_style_five, InstrumentedAttribute[Any])
Address
python
python-pillow__Pillow
Tests/test_imagefile.py
{ "start": 8113, "end": 8396 }
class ____(ImageFile.ImageFile): def _open(self) -> None: self.rawmode = "RGBA" self._mode = "RGBA" self._size = (200, 200) self.tile = [ ImageFile._Tile("MOCK", (xoff, yoff, xoff + xsize, yoff + ysize), 32, None) ]
MockImageFile
python
walkccc__LeetCode
solutions/3298. Count Substrings That Can Be Rearranged to Contain a String II/3298.py
{ "start": 0, "end": 661 }
class ____: # Same as 3297. Count Substrings That Can Be Rearranged to Contain a String I def validSubstringCount(self, word1: str, word2: str) -> int: ans = 0 count = collections.Counter(word2) required = len(word2) l = 0 for r, c in enumerate(word1): count[c] -= 1 if count[c] >= 0: required -= 1 while required == 0: # Add valid substrings containing word1[l..r] to the answer. They are # word1[l..r], word1[l..r + 1], ..., word1[l..n - 1]. ans += len(word1) - r count[word1[l]] += 1 if count[word1[l]] > 0: required += 1 l += 1 return ans
Solution
python
encode__django-rest-framework
tests/test_validators.py
{ "start": 581, "end": 681 }
class ____(models.Model): username = models.CharField(unique=True, max_length=100)
UniquenessModel
python
scrapy__scrapy
tests/test_addons.py
{ "start": 658, "end": 1032 }
class ____: def __init__(self, crawler: Crawler) -> None: super().__init__() self.crawler = crawler self.config = crawler.settings.getdict("MYADDON") @classmethod def from_crawler(cls, crawler: Crawler): return cls(crawler) def update_settings(self, settings): settings.update(self.config, "addon")
CreateInstanceAddon
python
scipy__scipy
scipy/stats/tests/test_multivariate.py
{ "start": 167354, "end": 179328 }
class ____: @pytest.mark.parametrize("dim", [2, 3, 4, 6]) @pytest.mark.parametrize("size", [None, 1, 5, (5, 4)]) def test_samples(self, dim, size): # test that samples have correct shape and norm 1 rng = np.random.default_rng(2777937887058094419) mu = np.full((dim, ), 1/np.sqrt(dim)) vmf_dist = vonmises_fisher(mu, 1, seed=rng) samples = vmf_dist.rvs(size) mean, cov = np.zeros(dim), np.eye(dim) expected_shape = rng.multivariate_normal(mean, cov, size=size).shape assert samples.shape == expected_shape norms = np.linalg.norm(samples, axis=-1) assert_allclose(norms, 1.) @pytest.mark.parametrize("dim", [5, 8]) @pytest.mark.parametrize("kappa", [1e15, 1e20, 1e30]) def test_sampling_high_concentration(self, dim, kappa): # test that no warnings are encountered for high values rng = np.random.default_rng(2777937887058094419) mu = np.full((dim, ), 1/np.sqrt(dim)) vmf_dist = vonmises_fisher(mu, kappa, seed=rng) vmf_dist.rvs(10) def test_two_dimensional_mu(self): mu = np.ones((2, 2)) msg = "'mu' must have one-dimensional shape." with pytest.raises(ValueError, match=msg): vonmises_fisher(mu, 1) def test_wrong_norm_mu(self): mu = np.ones((2, )) msg = "'mu' must be a unit vector of norm 1." with pytest.raises(ValueError, match=msg): vonmises_fisher(mu, 1) def test_one_entry_mu(self): mu = np.ones((1, )) msg = "'mu' must have at least two entries." with pytest.raises(ValueError, match=msg): vonmises_fisher(mu, 1) @pytest.mark.parametrize("kappa", [-1, (5, 3)]) def test_kappa_validation(self, kappa): msg = "'kappa' must be a positive scalar." with pytest.raises(ValueError, match=msg): vonmises_fisher([1, 0], kappa) @pytest.mark.parametrize("kappa", [0, 0.]) def test_kappa_zero(self, kappa): msg = ("For 'kappa=0' the von Mises-Fisher distribution " "becomes the uniform distribution on the sphere " "surface. Consider using 'scipy.stats.uniform_direction' " "instead.") with pytest.raises(ValueError, match=msg): vonmises_fisher([1, 0], kappa) @pytest.mark.parametrize("method", [vonmises_fisher.pdf, vonmises_fisher.logpdf]) def test_invalid_shapes_pdf_logpdf(self, method): x = np.array([1., 0., 0]) msg = ("The dimensionality of the last axis of 'x' must " "match the dimensionality of the von Mises Fisher " "distribution.") with pytest.raises(ValueError, match=msg): method(x, [1, 0], 1) @pytest.mark.parametrize("method", [vonmises_fisher.pdf, vonmises_fisher.logpdf]) def test_unnormalized_input(self, method): x = np.array([0.5, 0.]) msg = "'x' must be unit vectors of norm 1 along last dimension." with pytest.raises(ValueError, match=msg): method(x, [1, 0], 1) # Expected values of the vonmises-fisher logPDF were computed via mpmath # from mpmath import mp # import numpy as np # mp.dps = 50 # def logpdf_mpmath(x, mu, kappa): # dim = mu.size # halfdim = mp.mpf(0.5 * dim) # kappa = mp.mpf(kappa) # const = (kappa**(halfdim - mp.one)/((2*mp.pi)**halfdim * \ # mp.besseli(halfdim -mp.one, kappa))) # return float(const * mp.exp(kappa*mp.fdot(x, mu))) @pytest.mark.parametrize('x, mu, kappa, reference', [(np.array([1., 0., 0.]), np.array([1., 0., 0.]), 1e-4, 0.0795854295583605), (np.array([1., 0., 0]), np.array([0., 0., 1.]), 1e-4, 0.07957747141331854), (np.array([1., 0., 0.]), np.array([1., 0., 0.]), 100, 15.915494309189533), (np.array([1., 0., 0]), np.array([0., 0., 1.]), 100, 5.920684802611232e-43), (np.array([1., 0., 0.]), np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]), 2000, 5.930499050746588e-07), (np.array([1., 0., 0]), np.array([1., 0., 0.]), 2000, 318.3098861837907), (np.array([1., 0., 0., 0., 0.]), np.array([1., 0., 0., 0., 0.]), 2000, 101371.86957712633), (np.array([1., 0., 0., 0., 0.]), np.array([np.sqrt(0.98), np.sqrt(0.02), 0., 0, 0.]), 2000, 0.00018886808182653578), (np.array([1., 0., 0., 0., 0.]), np.array([np.sqrt(0.8), np.sqrt(0.2), 0., 0, 0.]), 2000, 2.0255393314603194e-87)]) def test_pdf_accuracy(self, x, mu, kappa, reference): pdf = vonmises_fisher(mu, kappa).pdf(x) assert_allclose(pdf, reference, rtol=1e-13) # Expected values of the vonmises-fisher logPDF were computed via mpmath # from mpmath import mp # import numpy as np # mp.dps = 50 # def logpdf_mpmath(x, mu, kappa): # dim = mu.size # halfdim = mp.mpf(0.5 * dim) # kappa = mp.mpf(kappa) # two = mp.mpf(2.) # const = (kappa**(halfdim - mp.one)/((two*mp.pi)**halfdim * \ # mp.besseli(halfdim - mp.one, kappa))) # return float(mp.log(const * mp.exp(kappa*mp.fdot(x, mu)))) @pytest.mark.parametrize('x, mu, kappa, reference', [(np.array([1., 0., 0.]), np.array([1., 0., 0.]), 1e-4, -2.5309242486359573), (np.array([1., 0., 0]), np.array([0., 0., 1.]), 1e-4, -2.5310242486359575), (np.array([1., 0., 0.]), np.array([1., 0., 0.]), 100, 2.767293119578746), (np.array([1., 0., 0]), np.array([0., 0., 1.]), 100, -97.23270688042125), (np.array([1., 0., 0.]), np.array([np.sqrt(0.98), np.sqrt(0.02), 0.]), 2000, -14.337987284534103), (np.array([1., 0., 0]), np.array([1., 0., 0.]), 2000, 5.763025393132737), (np.array([1., 0., 0., 0., 0.]), np.array([1., 0., 0., 0., 0.]), 2000, 11.526550911307156), (np.array([1., 0., 0., 0., 0.]), np.array([np.sqrt(0.98), np.sqrt(0.02), 0., 0, 0.]), 2000, -8.574461766359684), (np.array([1., 0., 0., 0., 0.]), np.array([np.sqrt(0.8), np.sqrt(0.2), 0., 0, 0.]), 2000, -199.61906708886113)]) def test_logpdf_accuracy(self, x, mu, kappa, reference): logpdf = vonmises_fisher(mu, kappa).logpdf(x) assert_allclose(logpdf, reference, rtol=1e-14) # Expected values of the vonmises-fisher entropy were computed via mpmath # from mpmath import mp # import numpy as np # mp.dps = 50 # def entropy_mpmath(dim, kappa): # mu = np.full((dim, ), 1/np.sqrt(dim)) # kappa = mp.mpf(kappa) # halfdim = mp.mpf(0.5 * dim) # logconstant = (mp.log(kappa**(halfdim - mp.one) # /((2*mp.pi)**halfdim # * mp.besseli(halfdim -mp.one, kappa))) # return float(-logconstant - kappa * mp.besseli(halfdim, kappa)/ # mp.besseli(halfdim -1, kappa)) @pytest.mark.parametrize('dim, kappa, reference', [(3, 1e-4, 2.531024245302624), (3, 100, -1.7672931195787458), (5, 5000, -11.359032310024453), (8, 1, 3.4189526482545527)]) def test_entropy_accuracy(self, dim, kappa, reference): mu = np.full((dim, ), 1/np.sqrt(dim)) entropy = vonmises_fisher(mu, kappa).entropy() assert_allclose(entropy, reference, rtol=2e-14) @pytest.mark.parametrize("method", [vonmises_fisher.pdf, vonmises_fisher.logpdf]) def test_broadcasting(self, method): # test that pdf and logpdf values are correctly broadcasted testshape = (2, 2) rng = np.random.default_rng(2777937887058094419) x = uniform_direction(3).rvs(testshape, random_state=rng) mu = np.full((3, ), 1/np.sqrt(3)) kappa = 5 result_all = method(x, mu, kappa) assert result_all.shape == testshape for i in range(testshape[0]): for j in range(testshape[1]): current_val = method(x[i, j, :], mu, kappa) assert_allclose(current_val, result_all[i, j], rtol=1e-15) def test_vs_vonmises_2d(self): # test that in 2D, von Mises-Fisher yields the same results # as the von Mises distribution rng = np.random.default_rng(2777937887058094419) mu = np.array([0, 1]) mu_angle = np.arctan2(mu[1], mu[0]) kappa = 20 vmf = vonmises_fisher(mu, kappa) vonmises_dist = vonmises(loc=mu_angle, kappa=kappa) vectors = uniform_direction(2).rvs(10, random_state=rng) angles = np.arctan2(vectors[:, 1], vectors[:, 0]) assert_allclose(vonmises_dist.entropy(), vmf.entropy()) assert_allclose(vonmises_dist.pdf(angles), vmf.pdf(vectors)) assert_allclose(vonmises_dist.logpdf(angles), vmf.logpdf(vectors)) @pytest.mark.parametrize("dim", [2, 3, 6]) @pytest.mark.parametrize("kappa, mu_tol, kappa_tol", [(1, 5e-2, 5e-2), (10, 1e-2, 1e-2), (100, 5e-3, 2e-2), (1000, 1e-3, 2e-2)]) def test_fit_accuracy(self, dim, kappa, mu_tol, kappa_tol): mu = np.full((dim, ), 1/np.sqrt(dim)) vmf_dist = vonmises_fisher(mu, kappa) rng = np.random.default_rng(2777937887058094419) n_samples = 10000 samples = vmf_dist.rvs(n_samples, random_state=rng) mu_fit, kappa_fit = vonmises_fisher.fit(samples) angular_error = np.arccos(mu.dot(mu_fit)) assert_allclose(angular_error, 0., atol=mu_tol, rtol=0) assert_allclose(kappa, kappa_fit, rtol=kappa_tol) def test_fit_error_one_dimensional_data(self): x = np.zeros((3, )) msg = "'x' must be two dimensional." with pytest.raises(ValueError, match=msg): vonmises_fisher.fit(x) def test_fit_error_unnormalized_data(self): x = np.ones((3, 3)) msg = "'x' must be unit vectors of norm 1 along last dimension." with pytest.raises(ValueError, match=msg): vonmises_fisher.fit(x) def test_frozen_distribution(self): mu = np.array([0, 0, 1]) kappa = 5 frozen = vonmises_fisher(mu, kappa) frozen_seed = vonmises_fisher(mu, kappa, seed=514) rvs1 = frozen.rvs(random_state=514) rvs2 = vonmises_fisher.rvs(mu, kappa, random_state=514) rvs3 = frozen_seed.rvs() assert_equal(rvs1, rvs2) assert_equal(rvs1, rvs3)
TestVonMises_Fisher
python
getsentry__sentry
src/sentry/testutils/cases.py
{ "start": 92704, "end": 94325 }
class ____(APITestCase): def setUp(self): super().setUp() self.user = self.create_user(is_staff=False, is_superuser=False) self.org = self.create_organization() self.team = self.create_team(organization=self.org) self.project = self.create_project(name="foo", organization=self.org, teams=[self.team]) self.create_member(teams=[self.team], user=self.user, organization=self.org) self.login_as(user=self.user) self.group = self.create_group(project=self.project) self.repo = Repository.objects.create(organization_id=self.org.id, name="test/repo") def assert_fetch_commits(self, mock_fetch_commit, prev_release_id, release_id, refs): assert len(mock_fetch_commit.method_calls) == 1 kwargs = mock_fetch_commit.method_calls[0][2]["kwargs"] assert kwargs == { "prev_release_id": prev_release_id, "refs": refs, "release_id": release_id, "user_id": self.user.id, } def assert_head_commit(self, head_commit, commit_key, release_id=None): assert self.org.id == head_commit.organization_id assert self.repo.id == head_commit.repository_id if release_id: assert release_id == head_commit.release_id else: assert self.release.id == head_commit.release_id self.assert_commit(head_commit.commit, commit_key) def assert_commit(self, commit, key): assert self.org.id == commit.organization_id assert self.repo.id == commit.repository_id assert commit.key == key
SetRefsTestCase
python
scipy__scipy
scipy/stats/_discrete_distns.py
{ "start": 55357, "end": 57762 }
class ____(rv_discrete): r"""A Yule-Simon discrete random variable. %(before_notes)s Notes ----- The probability mass function for the `yulesimon` is: .. math:: f(k) = \alpha B(k, \alpha+1) for :math:`k=1,2,3,...`, where :math:`\alpha>0`. Here :math:`B` refers to the `scipy.special.beta` function. The sampling of random variates is based on pg 553, Section 6.3 of [1]_. Our notation maps to the referenced logic via :math:`\alpha=a-1`. For details see the wikipedia entry [2]_. References ---------- .. [1] Devroye, Luc. "Non-uniform Random Variate Generation", (1986) Springer, New York. .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution %(after_notes)s %(example)s """ def _shape_info(self): return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))] def _rvs(self, alpha, size=None, random_state=None): E1 = random_state.standard_exponential(size) E2 = random_state.standard_exponential(size) ans = ceil(-E1 / log1p(-exp(-E2 / alpha))) return ans def _pmf(self, x, alpha): return alpha * special.beta(x, alpha + 1) def _argcheck(self, alpha): return (alpha > 0) def _logpmf(self, x, alpha): return log(alpha) + special.betaln(x, alpha + 1) def _cdf(self, x, alpha): return 1 - x * special.beta(x, alpha + 1) def _sf(self, x, alpha): return x * special.beta(x, alpha + 1) def _logsf(self, x, alpha): return log(x) + special.betaln(x, alpha + 1) def _stats(self, alpha): mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1)) mu2 = np.where(alpha > 2, alpha**2 / ((alpha - 2.0) * (alpha - 1)**2), np.inf) mu2 = np.where(alpha <= 1, np.nan, mu2) g1 = np.where(alpha > 3, sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)), np.inf) g1 = np.where(alpha <= 2, np.nan, g1) g2 = np.where(alpha > 4, alpha + 3 + ((11 * alpha**3 - 49 * alpha - 22) / (alpha * (alpha - 4) * (alpha - 3))), np.inf) g2 = np.where(alpha <= 2, np.nan, g2) return mu, mu2, g1, g2 yulesimon = yulesimon_gen(name='yulesimon', a=1)
yulesimon_gen
python
sqlalchemy__sqlalchemy
test/orm/test_options.py
{ "start": 25754, "end": 27301 }
class ____(PathTest, fixtures.DeclarativeMappedTest): # test for regression to #3963 run_setup_mappers = "once" run_inserts = "once" run_deletes = None @classmethod def setup_mappers(cls): Base = cls.DeclarativeBasic class BaseCls(Base): __tablename__ = "basecls" id = Column(Integer, primary_key=True) type = Column(String(30)) related_id = Column(ForeignKey("related.id")) related = relationship("Related") class SubClass(BaseCls): __tablename__ = "subcls" id = Column(ForeignKey("basecls.id"), primary_key=True) class Related(Base): __tablename__ = "related" id = Column(Integer, primary_key=True) sub_related_id = Column(ForeignKey("sub_related.id")) sub_related = relationship("SubRelated") class SubRelated(Base): __tablename__ = "sub_related" id = Column(Integer, primary_key=True) def test_with_current_nonmatching_entity_subclasses(self): BaseCls, SubClass, Related, SubRelated = self.classes( "BaseCls", "SubClass", "Related", "SubRelated" ) sess = fixture_session() q = sess.query(Related)._with_current_path( self._make_path_registry([inspect(SubClass), "related"]) ) opt = subqueryload(SubClass.related).subqueryload(Related.sub_related) self._assert_path_result(opt, q, [(Related, "sub_related")])
FromSubclassOptionsTest
python
django__django
tests/model_fields/models.py
{ "start": 19126, "end": 19389 }
class ____(models.Model): a = models.IntegerField() a_squared = models.GeneratedField( expression=F("a") * F("a"), output_field=models.IntegerField(), db_persist=True, ) class Meta: abstract = True
GeneratedModelBase
python
doocs__leetcode
solution/2200-2299/2234.Maximum Total Beauty of the Gardens/Solution.py
{ "start": 0, "end": 951 }
class ____: def maximumBeauty( self, flowers: List[int], newFlowers: int, target: int, full: int, partial: int ) -> int: flowers.sort() n = len(flowers) s = list(accumulate(flowers, initial=0)) ans, i = 0, n - bisect_left(flowers, target) for x in range(i, n + 1): newFlowers -= 0 if x == 0 else max(target - flowers[n - x], 0) if newFlowers < 0: break l, r = 0, n - x - 1 while l < r: mid = (l + r + 1) >> 1 if flowers[mid] * (mid + 1) - s[mid + 1] <= newFlowers: l = mid else: r = mid - 1 y = 0 if r != -1: cost = flowers[l] * (l + 1) - s[l + 1] y = min(flowers[l] + (newFlowers - cost) // (l + 1), target - 1) ans = max(ans, x * full + y * partial) return ans
Solution
python
allegroai__clearml
clearml/backend_api/services/v2_20/projects.py
{ "start": 117203, "end": 119426 }
class ____(Response): """ Response of projects.get_project_tags endpoint. :param tags: The list of unique tag values :type tags: Sequence[str] :param system_tags: The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request :type system_tags: Sequence[str] """ _service = "projects" _action = "get_project_tags" _version = "2.20" _schema = { "definitions": {}, "properties": { "system_tags": { "description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "The list of unique tag values", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", } def __init__( self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any ) -> None: super(GetProjectTagsResponse, self).__init__(**kwargs) self.tags = tags self.system_tags = system_tags @schema_property("tags") def tags(self) -> Optional[List[str]]: return self._property_tags @tags.setter def tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self) -> Optional[List[str]]: return self._property_system_tags @system_tags.setter def system_tags(self, value: Optional[List[str]]) -> None: if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value
GetProjectTagsResponse
python
numba__numba
numba/tests/test_parfors.py
{ "start": 4828, "end": 24725 }
class ____(TestCase): """ Base class for testing parfors. Provides functions for compilation and three way comparison between python functions, njit'd functions and parfor njit'd functions. """ _numba_parallel_test_ = False def _compile_this(self, func, sig, **flags): # This method originally used `compile_isolated` which returns a # "CompileResult", hence this does the same. return njit(sig, **flags)(func).overloads[sig] def compile_parallel(self, func, sig): return self._compile_this(func, sig, parallel=True) def compile_parallel_fastmath(self, func, sig): return self._compile_this(func, sig, parallel=True, fastmath=True) def compile_njit(self, func, sig): return self._compile_this(func, sig) def compile_all(self, pyfunc, *args, **kwargs): sig = tuple([numba.typeof(x) for x in args]) # compile the prange injected function cpfunc = self.compile_parallel(pyfunc, sig) # compile a standard njit of the original function cfunc = self.compile_njit(pyfunc, sig) return cfunc, cpfunc def check_parfors_vs_others(self, pyfunc, cfunc, cpfunc, *args, **kwargs): """ Checks python, njit and parfor impls produce the same result. Arguments: pyfunc - the python function to test cfunc - CompilerResult from njit of pyfunc cpfunc - CompilerResult from njit(parallel=True) of pyfunc args - arguments for the function being tested Keyword Arguments: scheduler_type - 'signed', 'unsigned' or None, default is None. Supply in cases where the presence of a specific scheduler is to be asserted. fastmath_pcres - a fastmath parallel compile result, if supplied will be run to make sure the result is correct check_arg_equality - some functions need to check that a parameter is modified rather than a certain value returned. If this keyword argument is supplied, it should be a list of comparison functions such that the i'th function in the list is used to compare the i'th parameter of the njit and parallel=True functions against the i'th parameter of the standard Python function, asserting if they differ. The length of this list must be equal to the number of parameters to the function. The null comparator is available for use when you do not desire to test if some particular parameter is changed. Remaining kwargs are passed to np.testing.assert_almost_equal """ scheduler_type = kwargs.pop('scheduler_type', None) check_fastmath = kwargs.pop('check_fastmath', None) fastmath_pcres = kwargs.pop('fastmath_pcres', None) check_scheduling = kwargs.pop('check_scheduling', True) check_args_for_equality = kwargs.pop('check_arg_equality', None) def copy_args(*args): if not args: return tuple() new_args = [] for x in args: if isinstance(x, np.ndarray): new_args.append(x.copy('k')) elif isinstance(x, np.number): new_args.append(x.copy()) elif isinstance(x, numbers.Number): new_args.append(x) elif x is None: new_args.append(x) elif isinstance(x, tuple): new_args.append(copy.deepcopy(x)) elif isinstance(x, list): new_args.append(x[:]) elif isinstance(x, Dict): new_args.append(copy.copy(x)) elif isinstance(x, List): new_args.append(copy.copy(x)) else: raise ValueError('Unsupported argument type encountered') return tuple(new_args) # python result py_args = copy_args(*args) py_expected = pyfunc(*py_args) # njit result njit_args = copy_args(*args) njit_output = cfunc.entry_point(*njit_args) # parfor result parfor_args = copy_args(*args) parfor_output = cpfunc.entry_point(*parfor_args) if check_args_for_equality is None: np.testing.assert_almost_equal(njit_output, py_expected, **kwargs) np.testing.assert_almost_equal(parfor_output, py_expected, **kwargs) self.assertEqual(type(njit_output), type(parfor_output)) else: assert(len(py_args) == len(check_args_for_equality)) for pyarg, njitarg, parforarg, argcomp in zip( py_args, njit_args, parfor_args, check_args_for_equality): argcomp(njitarg, pyarg, **kwargs) argcomp(parforarg, pyarg, **kwargs) if check_scheduling: self.check_scheduling(cpfunc, scheduler_type) # if requested check fastmath variant if fastmath_pcres is not None: parfor_fastmath_output = fastmath_pcres.entry_point(*copy_args(*args)) np.testing.assert_almost_equal(parfor_fastmath_output, py_expected, **kwargs) def check(self, pyfunc, *args, **kwargs): """Checks that pyfunc compiles for *args under parallel=True and njit and asserts that all version execute and produce the same result""" cfunc, cpfunc = self.compile_all(pyfunc, *args) self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs) def check_variants(self, impl, arg_gen, **kwargs): """Run self.check(impl, ...) on array data generated from arg_gen. """ for args in arg_gen(): with self.subTest(list(map(typeof, args))): self.check(impl, *args, **kwargs) def count_parfors_variants(self, impl, arg_gen, **kwargs): """Run self.countParfors(impl, ...) on array types generated from arg_gen. """ for args in arg_gen(): with self.subTest(list(map(typeof, args))): argtys = tuple(map(typeof, args)) # At least one parfors self.assertGreaterEqual(countParfors(impl, argtys), 1) def check_scheduling(self, cres, scheduler_type): # make sure parfor set up scheduling scheduler_str = '@do_scheduling' if scheduler_type is not None: if scheduler_type in ['signed', 'unsigned']: scheduler_str += '_' + scheduler_type else: msg = "Unknown scheduler_type specified: %s" raise ValueError(msg % scheduler_type) self.assertIn(scheduler_str, cres.library.get_llvm_str()) def gen_linspace(self, n, ct): """Make *ct* sample 1D arrays of length *n* using np.linspace(). """ def gen(): yield np.linspace(0, 1, n) yield np.linspace(2, 1, n) yield np.linspace(1, 2, n) src = cycle(gen()) return [next(src) for i in range(ct)] def gen_linspace_variants(self, ct): """Make 1D, 2D, 3D variants of the data in C and F orders """ # 1D yield self.gen_linspace(10, ct=ct) # 2D arr2ds = [x.reshape((2, 3)) for x in self.gen_linspace(n=2 * 3, ct=ct)] yield arr2ds # Fortran order yield [np.asfortranarray(x) for x in arr2ds] # 3D arr3ds = [x.reshape((2, 3, 4)) for x in self.gen_linspace(n=2 * 3 * 4, ct=ct)] yield arr3ds # Fortran order yield [np.asfortranarray(x) for x in arr3ds] def _filter_mod(self, mod, magicstr, checkstr=None): """ helper function to filter out modules by name""" filt = [x for x in mod if magicstr in x.name] if checkstr is not None: for x in filt: assert checkstr in str(x) return filt def _get_gufunc_modules(self, cres, magicstr, checkstr=None): """ gets the gufunc LLVM Modules""" _modules = [x for x in cres.library._codegen._engine._ee._modules] # make sure to only use modules that are actually used by cres and # aren't just in the EE by virtue of shared compilation context. potential_matches = self._filter_mod(_modules, magicstr, checkstr=checkstr) lib_asm = cres.library.get_asm_str() ret = [] for mod in potential_matches: if mod.name in lib_asm: ret.append(mod) return ret def _get_gufunc_info(self, cres, fn): """ helper for gufunc IR/asm generation""" # get the gufunc modules magicstr = '__numba_parfor_gufunc' gufunc_mods = self._get_gufunc_modules(cres, magicstr) x = dict() for mod in gufunc_mods: x[mod.name] = fn(mod) return x def _get_gufunc_ir(self, cres): """ Returns the IR of the gufuncs used as parfor kernels as a dict mapping the gufunc name to its IR. Arguments: cres - a CompileResult from `njit(parallel=True, ...)` """ return self._get_gufunc_info(cres, str) def _get_gufunc_asm(self, cres): """ Returns the assembly of the gufuncs used as parfor kernels as a dict mapping the gufunc name to its assembly. Arguments: cres - a CompileResult from `njit(parallel=True, ...)` """ tm = cres.library._codegen._tm def emit_asm(mod): return str(tm.emit_assembly(mod)) return self._get_gufunc_info(cres, emit_asm) def assert_fastmath(self, pyfunc, sig): """ Asserts that the fastmath flag has some effect in that suitable instructions are now labelled as `fast`. Whether LLVM can actually do anything to optimise better now the derestrictions are supplied is another matter! Arguments: pyfunc - a function that contains operations with parallel semantics sig - the type signature of pyfunc """ cres = self.compile_parallel_fastmath(pyfunc, sig) _ir = self._get_gufunc_ir(cres) def _get_fast_instructions(ir): splitted = ir.splitlines() fast_inst = [] for x in splitted: m = re.search(r'\bfast\b', x) # \b for wholeword if m is not None: fast_inst.append(x) return fast_inst def _assert_fast(instrs): ops = ('fadd', 'fsub', 'fmul', 'fdiv', 'frem', 'fcmp', 'call') for inst in instrs: count = 0 for op in ops: match = op + ' fast' if match in inst: count += 1 self.assertTrue(count > 0) for name, guir in _ir.items(): inst = _get_fast_instructions(guir) _assert_fast(inst) def blackscholes_impl(sptprice, strike, rate, volatility, timev): # blackscholes example logterm = np.log(sptprice / strike) powterm = 0.5 * volatility * volatility den = volatility * np.sqrt(timev) d1 = (((rate + powterm) * timev) + logterm) / den d2 = d1 - den NofXd1 = 0.5 + 0.5 * 2.0 * d1 NofXd2 = 0.5 + 0.5 * 2.0 * d2 futureValue = strike * np.exp(- rate * timev) c1 = futureValue * NofXd2 call = sptprice * NofXd1 - c1 put = call - futureValue + sptprice return put def lr_impl(Y, X, w, iterations): # logistic regression example for i in range(iterations): w -= np.dot(((1.0 / (1.0 + np.exp(-Y * np.dot(X, w))) - 1.0) * Y), X) return w def example_kmeans_test(A, numCenter, numIter, init_centroids): centroids = init_centroids N, D = A.shape for l in range(numIter): dist = np.array([[math.sqrt(np.sum((A[i,:]-centroids[j,:])**2)) for j in range(numCenter)] for i in range(N)]) labels = np.array([dist[i,:].argmin() for i in range(N)]) centroids = np.array([[np.sum(A[labels==i, j])/np.sum(labels==i) for j in range(D)] for i in range(numCenter)]) return centroids def get_optimized_numba_ir(test_func, args, **kws): typingctx = cpu_target.typing_context targetctx = cpu_target.target_context test_ir = compiler.run_frontend(test_func) if kws: options = cpu.ParallelOptions(kws) else: options = cpu.ParallelOptions(True) tp = TestPipeline(typingctx, targetctx, args, test_ir) typingctx.refresh() targetctx.refresh() inline_pass = inline_closurecall.InlineClosureCallPass(tp.state.func_ir, options, typed=True) inline_pass.run() rewrites.rewrite_registry.apply('before-inference', tp.state) tp.state.typemap, tp.state.return_type, tp.state.calltypes, _ = \ typed_passes.type_inference_stage(tp.state.typingctx, tp.state.targetctx, tp.state.func_ir, tp.state.args, None) type_annotations.TypeAnnotation( func_ir=tp.state.func_ir, typemap=tp.state.typemap, calltypes=tp.state.calltypes, lifted=(), lifted_from=None, args=tp.state.args, return_type=tp.state.return_type, html_output=config.HTML) diagnostics = numba.parfors.parfor.ParforDiagnostics() preparfor_pass = numba.parfors.parfor.PreParforPass( tp.state.func_ir, tp.state.typemap, tp.state.calltypes, tp.state.typingctx, tp.state.targetctx, options, swapped=diagnostics.replaced_fns) preparfor_pass.run() rewrites.rewrite_registry.apply('after-inference', tp.state) flags = compiler.Flags() parfor_pass = numba.parfors.parfor.ParforPass( tp.state.func_ir, tp.state.typemap, tp.state.calltypes, tp.state.return_type, tp.state.typingctx, tp.state.targetctx, options, flags, tp.state.metadata, diagnostics=diagnostics) parfor_pass.run() parfor_pass = numba.parfors.parfor.ParforFusionPass( tp.state.func_ir, tp.state.typemap, tp.state.calltypes, tp.state.return_type, tp.state.typingctx, tp.state.targetctx, options, flags, tp.state.metadata, diagnostics=diagnostics) parfor_pass.run() parfor_pass = numba.parfors.parfor.ParforPreLoweringPass( tp.state.func_ir, tp.state.typemap, tp.state.calltypes, tp.state.return_type, tp.state.typingctx, tp.state.targetctx, options, flags, tp.state.metadata, diagnostics=diagnostics) parfor_pass.run() test_ir._definitions = build_definitions(test_ir.blocks) return test_ir, tp def countParfors(test_func, args, **kws): test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) ret_count = 0 for label, block in test_ir.blocks.items(): for i, inst in enumerate(block.body): if isinstance(inst, numba.parfors.parfor.Parfor): ret_count += 1 return ret_count def countArrays(test_func, args, **kws): test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) return _count_arrays_inner(test_ir.blocks, tp.state.typemap) def get_init_block_size(test_func, args, **kws): test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) blocks = test_ir.blocks ret_count = 0 for label, block in blocks.items(): for i, inst in enumerate(block.body): if isinstance(inst, numba.parfors.parfor.Parfor): ret_count += len(inst.init_block.body) return ret_count def _count_arrays_inner(blocks, typemap): ret_count = 0 arr_set = set() for label, block in blocks.items(): for i, inst in enumerate(block.body): if isinstance(inst, numba.parfors.parfor.Parfor): parfor_blocks = inst.loop_body.copy() parfor_blocks[0] = inst.init_block ret_count += _count_arrays_inner(parfor_blocks, typemap) if (isinstance(inst, ir.Assign) and isinstance(typemap[inst.target.name], types.ArrayCompatible)): arr_set.add(inst.target.name) ret_count += len(arr_set) return ret_count def countArrayAllocs(test_func, args, **kws): test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) ret_count = 0 for block in test_ir.blocks.values(): ret_count += _count_array_allocs_inner(test_ir, block) return ret_count def _count_array_allocs_inner(func_ir, block): ret_count = 0 for inst in block.body: if isinstance(inst, numba.parfors.parfor.Parfor): ret_count += _count_array_allocs_inner(func_ir, inst.init_block) for b in inst.loop_body.values(): ret_count += _count_array_allocs_inner(func_ir, b) if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr) and inst.value.op == 'call' and (guard(find_callname, func_ir, inst.value) == ('empty', 'numpy') or guard(find_callname, func_ir, inst.value) == ('empty_inferred', 'numba.np.unsafe.ndarray'))): ret_count += 1 return ret_count def countNonParforArrayAccesses(test_func, args, **kws): test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) return _count_non_parfor_array_accesses_inner(test_ir, test_ir.blocks, tp.state.typemap) def _count_non_parfor_array_accesses_inner(f_ir, blocks, typemap, parfor_indices=None): ret_count = 0 if parfor_indices is None: parfor_indices = set() for label, block in blocks.items(): for stmt in block.body: if isinstance(stmt, numba.parfors.parfor.Parfor): parfor_indices.add(stmt.index_var.name) parfor_blocks = stmt.loop_body.copy() parfor_blocks[0] = stmt.init_block ret_count += _count_non_parfor_array_accesses_inner( f_ir, parfor_blocks, typemap, parfor_indices) # getitem elif (is_getitem(stmt) and isinstance(typemap[stmt.value.value.name], types.ArrayCompatible) and not _uses_indices( f_ir, index_var_of_get_setitem(stmt), parfor_indices)): ret_count += 1 # setitem elif (is_setitem(stmt) and isinstance(typemap[stmt.target.name], types.ArrayCompatible) and not _uses_indices( f_ir, index_var_of_get_setitem(stmt), parfor_indices)): ret_count += 1 # find parfor_index aliases elif (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var) and stmt.value.name in parfor_indices): parfor_indices.add(stmt.target.name) return ret_count def _uses_indices(f_ir, index, index_set): if index.name in index_set: return True ind_def = guard(get_definition, f_ir, index) if isinstance(ind_def, ir.Expr) and ind_def.op == 'build_tuple': varnames = set(v.name for v in ind_def.items) return len(varnames & index_set) != 0 return False
TestParforsBase
python
numba__numba
numba/core/annotations/type_annotations.py
{ "start": 984, "end": 11184 }
class ____(object): # func_data dict stores annotation data for all functions that are # compiled. We store the data in the TypeAnnotation class since a new # TypeAnnotation instance is created for each function that is compiled. # For every function that is compiled, we add the type annotation data to # this dict and write the html annotation file to disk (rewrite the html # file for every function since we don't know if this is the last function # to be compiled). func_data = OrderedDict() def __init__(self, func_ir, typemap, calltypes, lifted, lifted_from, args, return_type, html_output=None): self.func_id = func_ir.func_id self.blocks = func_ir.blocks self.typemap = typemap self.calltypes = calltypes self.filename = func_ir.loc.filename self.linenum = str(func_ir.loc.line) self.signature = str(args) + ' -> ' + str(return_type) # lifted loop information self.lifted = lifted self.num_lifted_loops = len(lifted) # If this is a lifted loop function that is being compiled, lifted_from # points to annotation data from function that this loop lifted function # was lifted from. This is used to stick lifted loop annotations back # into original function. self.lifted_from = lifted_from def prepare_annotations(self): # Prepare annotations groupedinst = defaultdict(list) found_lifted_loop = False #for blkid, blk in self.blocks.items(): for blkid in sorted(self.blocks.keys()): blk = self.blocks[blkid] groupedinst[blk.loc.line].append("label %s" % blkid) for inst in blk.body: lineno = inst.loc.line if isinstance(inst, ir.Assign): if found_lifted_loop: atype = 'XXX Lifted Loop XXX' found_lifted_loop = False elif (isinstance(inst.value, ir.Expr) and inst.value.op == 'call'): atype = self.calltypes[inst.value] elif (isinstance(inst.value, ir.Const) and isinstance(inst.value.value, numba.core.dispatcher.LiftedLoop)): atype = 'XXX Lifted Loop XXX' found_lifted_loop = True else: # TODO: fix parfor lowering so that typemap is valid. atype = self.typemap.get(inst.target.name, "<missing>") aline = "%s = %s :: %s" % (inst.target, inst.value, atype) elif isinstance(inst, ir.SetItem): atype = self.calltypes[inst] aline = "%s :: %s" % (inst, atype) else: aline = "%s" % inst groupedinst[lineno].append(" %s" % aline) return groupedinst def annotate(self): source = SourceLines(self.func_id.func) # if not source.avail: # return "Source code unavailable" groupedinst = self.prepare_annotations() # Format annotations io = StringIO() with closing(io): if source.avail: print("# File: %s" % self.filename, file=io) for num in source: srcline = source[num] ind = _getindent(srcline) print("%s# --- LINE %d --- " % (ind, num), file=io) for inst in groupedinst[num]: print('%s# %s' % (ind, inst), file=io) print(file=io) print(srcline, file=io) print(file=io) if self.lifted: print("# The function contains lifted loops", file=io) for loop in self.lifted: print("# Loop at line %d" % loop.get_source_location(), file=io) print("# Has %d overloads" % len(loop.overloads), file=io) for cres in loop.overloads.values(): print(cres.type_annotation, file=io) else: print("# Source code unavailable", file=io) for num in groupedinst: for inst in groupedinst[num]: print('%s' % (inst,), file=io) print(file=io) return io.getvalue() def html_annotate(self, outfile): # ensure that annotation information is assembled self.annotate_raw() # make a deep copy ahead of the pending mutations func_data = copy.deepcopy(self.func_data) key = 'python_indent' for this_func in func_data.values(): if key in this_func: idents = {} for line, amount in this_func[key].items(): idents[line] = '&nbsp;' * amount this_func[key] = idents key = 'ir_indent' for this_func in func_data.values(): if key in this_func: idents = {} for line, ir_id in this_func[key].items(): idents[line] = ['&nbsp;' * amount for amount in ir_id] this_func[key] = idents try: from jinja2 import Template except ImportError: raise ImportError("please install the 'jinja2' package") root = os.path.join(os.path.dirname(__file__)) template_filename = os.path.join(root, 'template.html') with open(template_filename, 'r') as template: html = template.read() template = Template(html) rendered = template.render(func_data=func_data) outfile.write(rendered) def annotate_raw(self): """ This returns "raw" annotation information i.e. it has no output format specific markup included. """ python_source = SourceLines(self.func_id.func) ir_lines = self.prepare_annotations() line_nums = [num for num in python_source] lifted_lines = [l.get_source_location() for l in self.lifted] def add_ir_line(func_data, line): line_str = line.strip() line_type = '' if line_str.endswith('pyobject'): line_str = line_str.replace('pyobject', '') line_type = 'pyobject' func_data['ir_lines'][num].append((line_str, line_type)) indent_len = len(_getindent(line)) func_data['ir_indent'][num].append(indent_len) func_key = (self.func_id.filename + ':' + str(self.func_id.firstlineno + 1), self.signature) if self.lifted_from is not None and self.lifted_from[1]['num_lifted_loops'] > 0: # This is a lifted loop function that is being compiled. Get the # numba ir for lines in loop function to use for annotating # original python function that the loop was lifted from. func_data = self.lifted_from[1] for num in line_nums: if num not in ir_lines.keys(): continue func_data['ir_lines'][num] = [] func_data['ir_indent'][num] = [] for line in ir_lines[num]: add_ir_line(func_data, line) if line.strip().endswith('pyobject'): func_data['python_tags'][num] = 'object_tag' # If any pyobject line is found, make sure original python # line that was marked as a lifted loop start line is tagged # as an object line instead. Lifted loop start lines should # only be marked as lifted loop lines if the lifted loop # was successfully compiled in nopython mode. func_data['python_tags'][self.lifted_from[0]] = 'object_tag' # We're done with this lifted loop, so decrement lifted loop counter. # When lifted loop counter hits zero, that means we're ready to write # out annotations to html file. self.lifted_from[1]['num_lifted_loops'] -= 1 elif func_key not in TypeAnnotation.func_data.keys(): TypeAnnotation.func_data[func_key] = {} func_data = TypeAnnotation.func_data[func_key] for i, loop in enumerate(self.lifted): # Make sure that when we process each lifted loop function later, # we'll know where it originally came from. loop.lifted_from = (lifted_lines[i], func_data) func_data['num_lifted_loops'] = self.num_lifted_loops func_data['filename'] = self.filename func_data['funcname'] = self.func_id.func_name func_data['python_lines'] = [] func_data['python_indent'] = {} func_data['python_tags'] = {} func_data['ir_lines'] = {} func_data['ir_indent'] = {} for num in line_nums: func_data['python_lines'].append((num, python_source[num].strip())) indent_len = len(_getindent(python_source[num])) func_data['python_indent'][num] = indent_len func_data['python_tags'][num] = '' func_data['ir_lines'][num] = [] func_data['ir_indent'][num] = [] for line in ir_lines[num]: add_ir_line(func_data, line) if num in lifted_lines: func_data['python_tags'][num] = 'lifted_tag' elif line.strip().endswith('pyobject'): func_data['python_tags'][num] = 'object_tag' return self.func_data def __str__(self): return self.annotate() re_longest_white_prefix = re.compile(r'^\s*') def _getindent(text): m = re_longest_white_prefix.match(text) if not m: return '' else: return ' ' * len(m.group(0))
TypeAnnotation
python
doocs__leetcode
solution/1900-1999/1952.Three Divisors/Solution.py
{ "start": 0, "end": 112 }
class ____: def isThree(self, n: int) -> bool: return sum(n % i == 0 for i in range(2, n)) == 1
Solution
python
huggingface__transformers
src/transformers/models/florence2/modeling_florence2.py
{ "start": 19684, "end": 21703 }
class ____(Florence2VisionPreTrainedModel): def __init__(self, config: Florence2VisionConfig): super().__init__(config) self.config = config self.embed_dim = config.embed_dim self.num_heads = config.num_heads self.num_groups = config.num_groups self.num_stages = len(self.embed_dim) if not (self.num_stages == len(self.num_heads) == len(self.num_groups)): raise ValueError( f"Expected self.num_stages ({self.num_stages}) == " f"len(self.num_heads) ({len(self.num_heads)}) == " f"len(self.num_groups) ({len(self.num_groups)})" ) dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths) * 2, device="cpu")] depth_offset = 0 convs = [] blocks = [] for stage_idx in range(self.num_stages): conv_embed = Florence2VisionConvEmbed( config=config, stage_idx=stage_idx, ) convs.append(conv_embed) block = nn.ModuleList( Florence2VisionBlock( config=config, stage_idx=stage_idx, spatial_drop_path_rate=dpr[depth_offset + block_idx * 2], channel_drop_path_rate=dpr[depth_offset + block_idx * 2 + 1], ) for block_idx in range(config.depths[stage_idx]) ) blocks.append(block) depth_offset += config.depths[stage_idx] * 2 self.convs = nn.ModuleList(convs) self.blocks = nn.ModuleList(blocks) # Initialize weights and apply final processing self.post_init() def forward(self, hidden_states: torch.Tensor): for conv, block in zip(self.convs, self.blocks): hidden_states = conv(hidden_states) for layer in block: hidden_states = layer(hidden_states) return hidden_states
Florence2VisionBackbone
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF053.py
{ "start": 2194, "end": 2268 }
class ____[T](Generic[*_As, _A]): ... from somewhere import APublicTypeVar
C
python
ray-project__ray
python/ray/_private/runtime_env/image_uri.py
{ "start": 4808, "end": 5923 }
class ____(RuntimeEnvPlugin): """Starts worker in a container of a custom image.""" name = "image_uri" @staticmethod def get_compatible_keys(): return {"image_uri", "config", "env_vars"} def __init__(self, ray_tmp_dir: str): self._ray_tmp_dir = ray_tmp_dir async def create( self, uri: Optional[str], runtime_env: "RuntimeEnv", # noqa: F821 context: RuntimeEnvContext, logger: logging.Logger, ) -> float: if not runtime_env.image_uri(): return self.worker_path = await _create_impl(runtime_env.image_uri(), logger) def modify_context( self, uris: List[str], runtime_env: "RuntimeEnv", # noqa: F821 context: RuntimeEnvContext, logger: Optional[logging.Logger] = default_logger, ): if not runtime_env.image_uri(): return _modify_context_impl( runtime_env.image_uri(), self.worker_path, [], context, logger, self._ray_tmp_dir, )
ImageURIPlugin
python
doocs__leetcode
solution/3500-3599/3503.Longest Palindrome After Substring Concatenation I/Solution.py
{ "start": 0, "end": 968 }
class ____: def longestPalindrome(self, s: str, t: str) -> int: def expand(s: str, g: List[int], l: int, r: int): while l >= 0 and r < len(s) and s[l] == s[r]: g[l] = max(g[l], r - l + 1) l, r = l - 1, r + 1 def calc(s: str) -> List[int]: n = len(s) g = [0] * n for i in range(n): expand(s, g, i, i) expand(s, g, i, i + 1) return g m, n = len(s), len(t) t = t[::-1] g1, g2 = calc(s), calc(t) ans = max(*g1, *g2) f = [[0] * (n + 1) for _ in range(m + 1)] for i, a in enumerate(s, 1): for j, b in enumerate(t, 1): if a == b: f[i][j] = f[i - 1][j - 1] + 1 ans = max(ans, f[i][j] * 2 + (0 if i >= m else g1[i])) ans = max(ans, f[i][j] * 2 + (0 if j >= n else g2[j])) return ans
Solution
python
doocs__leetcode
solution/1800-1899/1886.Determine Whether Matrix Can Be Obtained By Rotation/Solution2.py
{ "start": 0, "end": 262 }
class ____: def findRotation(self, mat: List[List[int]], target: List[List[int]]) -> bool: for _ in range(4): mat = [list(col) for col in zip(*mat[::-1])] if mat == target: return True return False
Solution
python
doocs__leetcode
solution/3600-3699/3690.Split and Merge Array Transformation/Solution.py
{ "start": 0, "end": 854 }
class ____: def minSplitMerge(self, nums1: List[int], nums2: List[int]) -> int: n = len(nums1) target = tuple(nums2) start = tuple(nums1) q = [start] vis = set() vis.add(start) for ans in count(0): t = q q = [] for cur in t: if cur == target: return ans for l in range(n): for r in range(l, n): remain = list(cur[:l]) + list(cur[r + 1 :]) sub = cur[l : r + 1] for i in range(len(remain) + 1): nxt = tuple(remain[:i] + list(sub) + remain[i:]) if nxt not in vis: vis.add(nxt) q.append(nxt)
Solution
python
chroma-core__chroma
chromadb/execution/expression/operator.py
{ "start": 11758, "end": 12003 }
class ____(Where): """Negative regular expression matching""" key: str pattern: str def to_dict(self) -> Dict[str, Any]: return {self.key: {"$not_regex": self.pattern}} # Field proxy for building Where conditions
NotRegex
python
apache__airflow
providers/google/tests/unit/google/cloud/triggers/test_vertex_ai.py
{ "start": 32868, "end": 39624 }
class ____: def test_serialize(self, custom_python_package_training_job_trigger): actual_data = custom_python_package_training_job_trigger.serialize() expected_data = ( "airflow.providers.google.cloud.triggers.vertex_ai.CustomPythonPackageTrainingJobTrigger", { "conn_id": TEST_CONN_ID, "project_id": TEST_PROJECT_ID, "location": TEST_LOCATION, "job_id": TEST_HPT_JOB_ID, "poll_interval": TEST_POLL_INTERVAL, "impersonation_chain": TEST_IMPERSONATION_CHAIN, }, ) actual_data == expected_data @pytest.mark.parametrize( "pipeline_state_value", [ PipelineState.PIPELINE_STATE_SUCCEEDED, PipelineState.PIPELINE_STATE_PAUSED, ], ) @pytest.mark.asyncio @mock.patch("google.cloud.aiplatform_v1.types.TrainingPipeline.to_dict") @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_training_pipeline")) @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_pipeline_service_client")) async def test_run_yields_success_event_on_successful_pipeline_state( self, mock_get_pipeline_service_client, mock_get_training_pipeline, mock_pipeline_job_dict, custom_python_package_training_job_trigger, pipeline_state_value, test_training_pipeline_name, ): mock_get_training_pipeline.return_value = types.TrainingPipeline( state=pipeline_state_value, name=test_training_pipeline_name, ) mock_pipeline_job_dict.return_value = {} expected_event = TriggerEvent( { "status": "success", "message": ( f"{custom_python_package_training_job_trigger.job_type_verbose_name} {test_training_pipeline_name} " f"completed with status {pipeline_state_value.name}" ), "job": {}, } ) actual_event = await custom_python_package_training_job_trigger.run().asend(None) mock_get_pipeline_service_client.assert_awaited_once_with(region=TEST_LOCATION) assert actual_event == expected_event @pytest.mark.parametrize( "pipeline_state_value", [ PipelineState.PIPELINE_STATE_FAILED, PipelineState.PIPELINE_STATE_CANCELLED, ], ) @pytest.mark.asyncio @mock.patch("google.cloud.aiplatform_v1.types.TrainingPipeline.to_dict") @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_training_pipeline")) @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_pipeline_service_client")) async def test_run_yields_error_event_on_failed_pipeline_state( self, mock_get_pipeline_service_client, mock_get_training_pipeline, mock_pipeline_job_dict, pipeline_state_value, custom_python_package_training_job_trigger, test_training_pipeline_name, ): mock_get_training_pipeline.return_value = types.TrainingPipeline( state=pipeline_state_value, name=test_training_pipeline_name, ) mock_pipeline_job_dict.return_value = {} expected_event = TriggerEvent( { "status": "error", "message": ( f"{custom_python_package_training_job_trigger.job_type_verbose_name} {test_training_pipeline_name} " f"completed with status {pipeline_state_value.name}" ), "job": {}, } ) actual_event = await custom_python_package_training_job_trigger.run().asend(None) mock_get_pipeline_service_client.assert_awaited_once_with(region=TEST_LOCATION) assert actual_event == expected_event @pytest.mark.parametrize( "pipeline_state_value", [ PipelineState.PIPELINE_STATE_CANCELLING, PipelineState.PIPELINE_STATE_PENDING, PipelineState.PIPELINE_STATE_QUEUED, PipelineState.PIPELINE_STATE_RUNNING, PipelineState.PIPELINE_STATE_UNSPECIFIED, ], ) @pytest.mark.asyncio @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_training_pipeline")) @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_pipeline_service_client")) async def test_run_test_run_loop_is_still_running_if_pipeline_is_running( self, mock_get_pipeline_service_client, mock_get_training_pipeline, pipeline_state_value, custom_python_package_training_job_trigger, ): mock_get_training_pipeline.return_value = types.TrainingPipeline(state=pipeline_state_value) task = asyncio.create_task(custom_python_package_training_job_trigger.run().__anext__()) await asyncio.sleep(0.5) mock_get_pipeline_service_client.assert_awaited_once_with(region=TEST_LOCATION) assert task.done() is False task.cancel() @pytest.mark.asyncio @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_training_pipeline")) @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.get_pipeline_service_client")) async def test_run_raises_exception( self, mock_get_pipeline_service_client, mock_get_training_pipeline, custom_python_package_training_job_trigger, ): mock_get_training_pipeline.side_effect = mock.AsyncMock(side_effect=Exception("Test exception")) expected_event = TriggerEvent( { "status": "error", "message": "Test exception", } ) actual_event = await custom_python_package_training_job_trigger.run().asend(None) mock_get_pipeline_service_client.assert_awaited_once_with(region=TEST_LOCATION) assert expected_event == actual_event @pytest.mark.asyncio @mock.patch(VERTEX_AI_TRIGGER_PATH.format("CustomJobAsyncHook.wait_for_training_pipeline")) async def test_wait_training_pipeline( self, mock_wait_for_training_pipeline, custom_python_package_training_job_trigger ): await custom_python_package_training_job_trigger._wait_job() mock_wait_for_training_pipeline.assert_awaited_once_with( project_id=custom_python_package_training_job_trigger.project_id, location=custom_python_package_training_job_trigger.location, pipeline_id=custom_python_package_training_job_trigger.job_id, poll_interval=custom_python_package_training_job_trigger.poll_interval, )
TestCustomPythonPackageTrainingJobTrigger
python
walkccc__LeetCode
solutions/145. Binary Tree Postorder Traversal/145.py
{ "start": 0, "end": 301 }
class ____: def postorderTraversal(self, root: TreeNode | None) -> list[int]: ans = [] def postorder(root: TreeNode | None) -> None: if not root: return postorder(root.left) postorder(root.right) ans.append(root.val) postorder(root) return ans
Solution
python
ray-project__ray
doc/source/serve/doc_code/aws_neuron_core_inference_serve_stable_diffusion.py
{ "start": 940, "end": 2049 }
class ____: def __init__(self): from optimum.neuron import NeuronStableDiffusionXLPipeline compiled_model_id = "aws-neuron/stable-diffusion-xl-base-1-0-1024x1024" self.pipe = NeuronStableDiffusionXLPipeline.from_pretrained( compiled_model_id, device_ids=[0, 1] ) async def generate(self, prompt: str): assert len(prompt), "prompt parameter cannot be empty" image = self.pipe(prompt).images[0] return image entrypoint = APIIngress.bind(StableDiffusionV2.bind()) # __neuron_serve_code_end__ if __name__ == "__main__": import requests import ray # On inf2.8xlarge instance, there are 2 Neuron cores. ray.init(resources={"neuron_cores": 2}) serve.run(entrypoint) prompt = "a zebra is dancing in the grass, river, sunlit" input = "%20".join(prompt.split(" ")) resp = requests.get(f"http://127.0.0.1:8000/imagine?prompt={input}") print("Write the response to `output.png`.") with open("output.png", "wb") as f: f.write(resp.content) assert resp.status_code == 200
StableDiffusionV2
python
jina-ai__jina
jina/helper.py
{ "start": 30200, "end": 31016 }
class ____: """The decorator to cache property of a class.""" def __init__(self, func): """ Create the :class:`cached_property`. :param func: Cached function. """ self.func = func def __get__(self, obj, cls): cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None) if cached_value is not None: return cached_value value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj) return value def __delete__(self, obj): cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None) if cached_value is not None: if hasattr(cached_value, 'close'): cached_value.close() del obj.__dict__[f'CACHED_{self.func.__name__}']
cached_property
python
pytorch__pytorch
test/functorch/test_eager_transforms.py
{ "start": 110334, "end": 122506 }
class ____(TestCase): def test_deprecation_vmap(self, device): # functorch version of the API is deprecated with self.assertWarnsRegex(FutureWarning, "Please use `torch.vmap`"): vmap(torch.sin) # the non-functorch version is not deprecated with warnings.catch_warnings(): warnings.simplefilter("error") torch.vmap(torch.sin) # Some of these pass, some of these don't @parametrize( "transform", ["grad", "jacrev", "jacfwd", "grad_and_value", "hessian", "functionalize"], ) def test_deprecation_transforms(self, device, transform): api = getattr(functorch, transform) new_api = getattr(torch.func, transform) # functorch version of the API is deprecated with self.assertWarnsRegex( FutureWarning, f"Please use `torch.func.{transform}`" ): api(torch.sin) # the non-functorch version is not deprecated with warnings.catch_warnings(): warnings.simplefilter("error") new_api(torch.sin) def test_grad_grad(self, device): x = torch.randn([], device=device) y = grad(grad(torch.sin))(x) self.assertEqual(y, -x.sin()) def test_grad_vmap(self, device): def foo(x): y = vmap(torch.sin)(x) return y.sum() x = torch.randn(3, device=device) y = grad(foo)(x) self.assertEqual(y, x.cos()) def test_grad_vjp(self, device): x = torch.randn(3, device=device) def foo(x): _, vjp_fn = vjp(torch.sin, x) return vjp_fn(x)[0].sum() y = grad(foo)(x) expected = grad(lambda x: (x * x.cos()).sum())(x) self.assertEqual(y, expected) def test_vmap_grad(self, device): x = torch.randn(3, device=device) y = vmap(grad(torch.sin))(x) self.assertEqual(y, x.cos()) def test_vmap_vmap(self, device): x = torch.randn(2, 3, device=device) y = vmap(vmap(torch.sin))(x) self.assertEqual(y, x.sin()) def test_vmap_vjp(self, device): x = torch.randn(3, device=device) _, vjp_fn = vjp(torch.sin, x) def foo(x): _, vjp_fn = vjp(torch.sin, x) return vjp_fn(x) y = vmap(foo)(x) self.assertEqual(y, vjp_fn(x)) # TODO: there's a very interesting error message when the following # is on CPU xs = torch.randn(5, 3, device=device) expected = torch.stack([vjp_fn(x)[0] for x in xs]) result = vmap(lambda x: vjp_fn(x)[0])(xs) self.assertEqual(result, expected) def test_vjp_grad(self, device): x = torch.randn([], device=device) y, vjp_fn = vjp(grad(torch.sin), x) self.assertEqual(y, x.cos()) v = torch.randn([]) self.assertEqual(vjp_fn(v)[0], -x.sin() * v) def test_vjp_vmap(self, device): x = torch.randn(3, device=device) y, vjp_fn = vjp(vmap(torch.sin), x) self.assertEqual(y, x.sin()) v = torch.randn(3, device=device) self.assertEqual(vjp_fn(v)[0], x.cos() * v) def test_vjp_vjp(self, device): x = torch.randn(3, device=device) y, vjp_fn = vjp(torch.sin, x) self.assertEqual(y, x.sin()) y, vjp_fn = vjp(lambda x: vjp_fn(x)[0], x) self.assertEqual(y, x * x.cos()) y = vjp_fn(x)[0] # Honestly IDK what the result here is... but at least it runs def test_make_fx_vmap(self, device): def f(x): return torch.sin(x) inp = torch.randn(5, 3) f = vmap(f) fx_f = make_fx(f)(inp) new_inp = torch.randn(5, 3) self.assertEqual(fx_f(new_inp), f(new_inp)) def test_make_fx_jacrev(self, device): def f(x): return x.sin().sum() inp = torch.randn(3) f = jacrev(jacrev(f)) fx_f = make_fx(f)(inp) new_inp = torch.randn(3) self.assertEqual(fx_f(new_inp), f(new_inp)) def test_make_fx_vjp(self, device): def f(x): return torch.sin(x).sum() primals = torch.randn(3) _, vjp_fn = vjp(f, primals) cotangent = torch.randn(()) fx_f = make_fx(vjp_fn)(cotangent, True, True) new_cotangent = torch.randn(()) self.assertEqual(fx_f(new_cotangent, True, True), vjp_fn(new_cotangent)) # FIXME: test fails in Windows @unittest.skipIf(IS_WINDOWS, "fails in Windows; needs investigation") @unittest.skipIf(IS_FBCODE, "can't subprocess in fbcode") # it is redundant to run this test twice on a machine that has GPUs @onlyCPU def test_no_warning_on_import_functorch(self, device): out = subprocess.check_output( [sys.executable, "-W", "always", "-c", "import functorch"], stderr=subprocess.STDOUT, cwd=os.path.dirname(os.path.realpath(__file__)), ).decode("utf-8") self.assertEqual(out, "") def test_requires_grad_inside_transform(self, device): def f(x): x.requires_grad_() return x.sin().sum() x = torch.randn(3) with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"): vmap(f)(x) with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"): grad(f)(x) with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"): vmap(grad(f))(x) x = torch.randn([]) with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"): grad(grad(f))(x) def test_retain_grad_inside_transform(self, device): def f(x): y = x.sin() y.retain_grad() return y.sum() x = torch.randn(3) with self.assertRaisesRegex(RuntimeError, "Tensor.retain_grad()"): grad(f)(x) def test_autograd_functional_jacrev_inside_transform(self, device): def f(x): y = torch.autograd.functional.jacobian(lambda x: x.sin().sum(), x) return y B = 5 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): vmap(f)(x) x = torch.randn([]) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): grad(f)(x) def test_autograd_functional_vjp_inside_transform(self, device): def f(x): y = torch.autograd.functional.vjp(lambda x: x.sin().sum(), x) return y B = 5 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): vmap(f)(x) x = torch.randn([]) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): grad(f)(x) def test_autograd_functional_jvp_inside_transform(self, device): def f(x): t = torch.ones_like(x) y = torch.autograd.functional.jvp(lambda x: x.sin().sum(), (x,), (t,)) return y B = 5 x = torch.randn(B, 3) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): vmap(f)(x) x = torch.randn([]) with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"): grad(f)(x) def test_autograd_functional_jacfwd_inside_transform(self, device): def f(x): y = torch.autograd.functional.jacobian( lambda x: x.sin().sum(), x, strategy="forward-mode", vectorize=True ) return y B = 5 x = torch.randn(B, 3) with self.assertRaisesRegex( RuntimeError, "Batching rule not implemented for aten::_make_dual" ): vmap(f)(x) @parametrize( "transform", [ "vmap", "grad", "jacrev", "jacfwd", "grad_and_value", "hessian", "functionalize", ], ) def test_autograd_function_no_setup_context(self, device, transform): class MySin(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x.sin() @staticmethod def backward(ctx, gy): (x,) = ctx.saved_tensors return gy * x.cos() x = torch.randn(3, device=device) transform = getattr(functorch, transform) with self.assertRaisesRegex(RuntimeError, "must override the setup_context"): transform(MySin.apply)(x) # Some of these pass, some of these don't @parametrize( "transform", [ "grad", "jacrev", "grad_and_value", "hessian", ], ) def test_transforms_dont_support_saved_tensor_hooks(self, device, transform): def f(x): return torch.sin(x).sum() def g(x): with torch.autograd.graph.save_on_cpu(): return f(x) x = torch.randn(3, device=device) if transform == "functionalize": transform = functorch.experimental.functionalize else: transform = getattr(functorch, transform) with self.assertRaisesRegex(RuntimeError, "saved tensor hooks"): with torch.autograd.graph.save_on_cpu(): transform(f)(x) with self.assertRaisesRegex(RuntimeError, "saved tensor hooks"): transform(g)(x) def test_vjp_doesnt_support_saved_tensor_hooks(self, device): def f(x): return torch.sin(x).sum() def g(x): with torch.autograd.graph.save_on_cpu(): return f(x) x = torch.randn(3, device=device) with self.assertRaisesRegex(RuntimeError, "saved tensor hooks"): with torch.autograd.graph.save_on_cpu(): vjp(f, x) with self.assertRaisesRegex(RuntimeError, "saved tensor hooks"): vjp(g, x) def test_jvp_supports_saved_tensor_hooks(self, device): def f(x): return torch.sin(x).sum() def g(x): with torch.autograd.graph.save_on_cpu(): return f(x) x = torch.randn(3, device=device) t = torch.randn(3, device=device) # smoke tests with torch.autograd.graph.save_on_cpu(): jvp(f, (x,), (t,)) # smoke tests jvp(g, (x,), (t,)) def test_can_use_functionalize_when_key_is_excluded(self, device): def f(x): y = x.clone() y.sin_() return y x = torch.randn([], device=device) expected = f(x) with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)): gm = make_fx(functorch.functionalize(f))(x) self.assertTrue("sin_" not in gm.code) self.assertEqual(gm(x), expected) local_exclude_set = torch._C._dispatch_tls_local_exclude_set() self.assertTrue(local_exclude_set.has(DispatchKey.Functionalize)) def test_can_use_vmap_when_key_is_excluded(self, device): def f(x): return x.sum(0) x = torch.randn(3, device=device) expected = vmap(f)(x) with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.FuncTorchBatched)): result = vmap(f)(x) self.assertEqual(result, expected) local_exclude_set = torch._C._dispatch_tls_local_exclude_set() self.assertTrue(local_exclude_set.has(DispatchKey.FuncTorchBatched)) def test_can_use_grad_when_key_is_excluded(self, device): def f(x): return x.sin() x = torch.randn([], device=device) expected = grad(f)(x) with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Autograd)): result = grad(f)(x) self.assertEqual(result, expected) local_exclude_set = torch._C._dispatch_tls_local_exclude_set() self.assertTrue(local_exclude_set.has(DispatchKey.Autograd)) @markDynamoStrictTest
TestComposability
python
pypa__warehouse
tests/unit/test_views.py
{ "start": 13699, "end": 14518 }
class ____: def test_index(self, db_request): project = ProjectFactory.create() release1 = ReleaseFactory.create(project=project) release1.created = datetime.date(2011, 1, 1) release2 = ReleaseFactory.create(project=project) release2.created = datetime.date(2012, 1, 1) FileFactory.create( release=release1, filename=f"{project.name}-{release1.version}.tar.gz", python_version="source", ) UserFactory.create() # Make sure that the task to update the database counts has been # called. compute_row_counts(db_request) assert index(db_request) == { "num_projects": 1, "num_users": 3, "num_releases": 2, "num_files": 1, }
TestIndex
python
kamyu104__LeetCode-Solutions
Python/check-if-all-the-integers-in-a-range-are-covered.py
{ "start": 1035, "end": 1325 }
class ____(object): def isCovered(self, ranges, left, right): """ :type ranges: List[List[int]] :type left: int :type right: int :rtype: bool """ return all(any(l <= i <= r for l, r in ranges) for i in xrange(left, right+1))
Solution3
python
tiangolo__fastapi
tests/test_pydantic_v1_v2_multifile/modelsv2b.py
{ "start": 65, "end": 115 }
class ____(BaseModel): dup_sub_name: str
SubItem
python
facebookresearch__faiss
tests/test_refine.py
{ "start": 4613, "end": 6261 }
class ____(unittest.TestCase): def do_test(self, factory_string): d = 32 radius = 8 ds = datasets.SyntheticDataset(d, 1024, 512, 256) index = faiss.index_factory(d, factory_string) index.train(ds.get_train()) index.add(ds.get_database()) xq = ds.get_queries() xb = ds.get_database() # perform a range_search lims_1, D1, I1 = index.range_search(xq, radius) # create a baseline (FlatL2) index_flat = faiss.IndexFlatL2(d) index_flat.train(ds.get_train()) index_flat.add(ds.get_database()) lims_ref, Dref, Iref = index_flat.range_search(xq, radius) # add a refine index on top of the index index_r = faiss.IndexRefine(index, index_flat) lims_2, D2, I2 = index_r.range_search(xq, radius) # validate: refined range_search() keeps indices untouched precision_1, recall_1 = evaluation.range_PR(lims_ref, Iref, lims_1, I1) precision_2, recall_2 = evaluation.range_PR(lims_ref, Iref, lims_2, I2) self.assertAlmostEqual(recall_1, recall_2) # validate: refined range_search() updates distances, and new distances are correct L2 distances for iq in range(0, ds.nq): start_lim = lims_2[iq] end_lim = lims_2[iq + 1] for i_lim in range(start_lim, end_lim): idx = I2[i_lim] l2_dis = np.sum(np.square(xq[iq : iq + 1,] - xb[idx : idx + 1,])) self.assertAlmostEqual(l2_dis, D2[i_lim], places=4) def test_refine_1(self): self.do_test("SQ4")
TestIndexRefineRangeSearch
python
doocs__leetcode
lcof2/剑指 Offer II 119. 最长连续序列/Solution2.py
{ "start": 0, "end": 306 }
class ____: def longestConsecutive(self, nums: List[int]) -> int: s = set(nums) ans = 0 for x in nums: if x - 1 not in s: y = x + 1 while y in s: y += 1 ans = max(ans, y - x) return ans
Solution
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-lancedb/tests/test_vector_stores_lancedb.py
{ "start": 888, "end": 12714 }
class ____(MockEmbedding): async def _aget_text_embedding(self, text: str) -> list[float]: if text == "test1": return [0.1, 0.2, 0.3] elif text == "test2": return [0.4, 0.5, 0.7] elif text == "test3": return [0.6, 0.2, 0.1] return self._get_vector() async def _aget_query_embedding(self, query: str) -> list[float]: return await self._aget_text_embedding(text=query) def _get_query_embedding(self, query: str) -> list[float]: """Get query embedding.""" return self._get_text_embedding(text=query) def _get_text_embedding(self, text: str) -> list[float]: """Get text embedding.""" if text == "test1": return [0.1, 0.2, 0.3] elif text == "test2": return [0.4, 0.5, 0.6] elif text == "test3": return [0.6, 0.2, 0.1] return self._get_vector() @pytest.fixture(scope="module") def embed_model() -> BaseEmbedding: embed_model = TmpMockEmbedding(embed_dim=3) embed_model.callback_manager = Settings.callback_manager return embed_model @pytest.fixture(scope="module") def text_node_list(embed_model) -> list[TextNode]: nodes = [ TextNode( text="test1", id_="11111111-1111-1111-1111-111111111111", relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")}, ), TextNode( text="test2", id_="22222222-2222-2222-2222-222222222222", relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")}, ), TextNode( text="test3", id_="33333333-3333-3333-3333-333333333333", relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")}, ), ] for node in nodes: node_embedding = embed_model.get_text_embedding(node.text) node.embedding = node_embedding return nodes def test_class(): names_of_base_classes = [b.__name__ for b in LanceDBVectorStore.__mro__] assert BasePydanticVectorStore.__name__ in names_of_base_classes @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) @pytest.mark.parametrize("mode", ["overwrite", "create"]) def test_vector_store_init_create_pass(tmp_path: Path, mode: str) -> None: # given # when vector_store = LanceDBVectorStore(uri=str(tmp_path / "test_lancedb"), mode=mode) # then assert vector_store._table is None with pytest.raises(TableNotFoundError): vector_store.table @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) @pytest.mark.parametrize("mode", ["overwrite", "create", "append"]) def test_vector_store_init_table_exists(tmp_path: Path, mode: str) -> None: # given connection = lancedb.connect(str(tmp_path / "test_lancedb")) connection.create_table(name="test_table", schema=TestModel) # when vector_store = LanceDBVectorStore( mode=mode, table_name="test_table", connection=connection ) # then assert isinstance(vector_store._table, lancedb.db.LanceTable) assert isinstance(vector_store.table, lancedb.db.LanceTable) @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_vector_store_init_append_error(tmp_path: Path) -> None: # given connection = lancedb.connect(str(tmp_path / "test_lancedb")) # when & then with pytest.raises(TableNotFoundError): LanceDBVectorStore( mode="append", table_name="test_table", connection=connection ) @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_vector_store_from_table(tmp_path: Path) -> None: # given connection = lancedb.connect(str(tmp_path / "test_lancedb")) table = connection.create_table(name="test_table", schema=TestModel) # when vector_store = LanceDBVectorStore.from_table(table=table) # then assert isinstance(vector_store, LanceDBVectorStore) assert isinstance(vector_store._table, lancedb.db.LanceTable) assert vector_store._table.name == table.name assert connection == table._conn @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_table_exists(tmp_path: Path) -> None: # given connection = lancedb.connect(str(tmp_path / "test_lancedb")) table = connection.create_table(name="test_table", schema=TestModel) # when vector_store = LanceDBVectorStore.from_table(table=table) # then assert vector_store._table_exists(tbl_name=table.name) assert not vector_store._table_exists(tbl_name="non_existent_table") @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_create_index_pass(tmp_path: Path, text_node_list: list[TextNode]) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) # when vector_store.create_index(num_partitions=2, index_type="IVF_FLAT", sample_rate=2) # then assert isinstance(vector_store._table, lancedb.db.LanceTable) assert vector_store._table.list_indices() @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_add(tmp_path: Path, text_node_list: list[TextNode]) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) # when vector_store.add(text_node_list) # then assert vector_store._table_exists() assert vector_store._table.count_rows() == len(text_node_list) @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_delete(tmp_path: Path, text_node_list: list[TextNode]) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) # when vector_store.delete(ref_doc_id="test-0") # then assert vector_store._table.count_rows() == len(text_node_list) - 1 @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_delete_nodes(tmp_path: Path, text_node_list: list[TextNode]) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) # when vector_store.delete_nodes(node_ids=[text_node_list[0].id_, text_node_list[1].id_]) # then assert vector_store._table.count_rows() == len(text_node_list) - 2 @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_get_nodes(tmp_path: Path, text_node_list: list[TextNode]) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) # when retieved_nodes = vector_store.get_nodes( node_ids=[text_node_list[0].id_, text_node_list[1].id_] ) # then assert len(retieved_nodes) == 2 assert retieved_nodes[0].id_ == text_node_list[0].id_ assert retieved_nodes[1].id_ == text_node_list[1].id_ @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) def test_vector_query( tmp_path: Path, text_node_list: list[TextNode], embed_model ) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) retriever = index.as_retriever(similarity_top_k=1) # when response = retriever.retrieve("test1") # then assert len(response) == 1 assert response[0].id_ == "11111111-1111-1111-1111-111111111111" @pytest.mark.skipif( deps is None, reason="Need to install lancedb and huggingface locally to run this test.", ) def test_fts_query(tmp_path: Path, text_node_list: list[TextNode], embed_model) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) # when response = index.as_retriever(vector_store_kwargs={"query_type": "fts"}).retrieve( "test1" ) # then assert len(response) == 1 assert response[0].id_ == "11111111-1111-1111-1111-111111111111" @pytest.mark.skipif( deps is None, reason="Need to install lancedb and huggingface locally to run this test.", ) def test_hybrid_query( tmp_path: Path, text_node_list: list[TextNode], embed_model ) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) vector_store.add(text_node_list) index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) # when response = index.as_retriever( vector_store_kwargs={"query_type": "hybrid"}, similarity_top_k=3 ).retrieve("test1") # then assert len(response) == 3 assert response[0].id_ == "11111111-1111-1111-1111-111111111111" @pytest.mark.skipif( deps is None, reason="Need to install lancedb locally to run this test.", ) @pytest.mark.parametrize( ("method", "kwargs"), [ ("create_index", {}), ("delete", {"ref_doc_id": "test-0"}), ("delete_nodes", {"node_ids": []}), ("get_nodes", {}), ("query", {"query": VectorStoreQuery()}), ], ) def test_method_table_error( tmp_path: Path, method: str, kwargs: dict[str, Any] ) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite" ) # when with pytest.raises(TableNotFoundError): getattr(vector_store, method)(**kwargs) @pytest.mark.skipif( deps is None, reason="Need to install lancedb and huggingface locally to run this test.", ) @pytest.mark.parametrize("query_type", ["fts", "hybrid"]) def test_fts_index_ready_flag( tmp_path: Path, text_node_list: list[TextNode], embed_model, query_type: str ) -> None: # given vector_store = LanceDBVectorStore( uri=str(tmp_path / "test_lancedb"), mode="overwrite", ) vector_store.add(text_node_list) index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) # then - flag should be False initially assert vector_store._fts_index_ready is False # when - perform FTS | Hybrid query index.as_retriever(vector_store_kwargs={"query_type": query_type}).retrieve("test1") # then - flag should be True after FTS index is created assert vector_store._fts_index_ready is True # when - add more data new_node = TextNode( text="test4", id_="44444444-4444-4444-4444-444444444444", relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-3")}, ) new_node.embedding = embed_model.get_text_embedding(new_node.text) vector_store.add([new_node]) # then - flag should be False after adding data assert vector_store._fts_index_ready is False # when - perform another FTS | Hybrid query index.as_retriever(vector_store_kwargs={"query_type": query_type}).retrieve("test4") # then - flag should be True again after FTS index is recreated assert vector_store._fts_index_ready is True
TmpMockEmbedding
python
readthedocs__readthedocs.org
readthedocs/config/models.py
{ "start": 426, "end": 789 }
class ____(BaseModel): """ Base class for all the models used in the configuration object. Useful to define common configuration options for all the models. """ model_config = ConfigDict( # Don't allow extra fields in the models. # It will raise an error if there are extra fields. extra="forbid", )
ConfigBaseModel
python
redis__redis-py
redis/multidb/healthcheck.py
{ "start": 4124, "end": 5356 }
class ____(AbstractHealthCheckPolicy): """ Policy that returns True if at least one health check probe is successful. """ def __init__(self, health_check_probes: int, health_check_delay: float): super().__init__(health_check_probes, health_check_delay) def execute(self, health_checks: List[HealthCheck], database) -> bool: is_healthy = False for health_check in health_checks: exception = None for attempt in range(self.health_check_probes): try: if health_check.check_health(database): is_healthy = True break else: is_healthy = False except Exception as e: exception = UnhealthyDatabaseException( "Unhealthy database", database, e ) if attempt < self.health_check_probes - 1: sleep(self._health_check_delay) if not is_healthy and not exception: return is_healthy elif not is_healthy and exception: raise exception return is_healthy
HealthyAnyPolicy
python
bottlepy__bottle
test/test_importhook.py
{ "start": 70, "end": 1290 }
class ____(unittest.TestCase): def make_module(self, name, **args): mod = sys.modules.setdefault(name, bottle.new_module(name)) mod.__file__ = '<virtual %s>' % name mod.__dict__.update(**args) return mod def test_direkt_import(self): mod = self.make_module('bottle_test') import bottle.ext.test self.assertEqual(bottle.ext.test, mod) def test_from_import(self): mod = self.make_module('bottle_test') from bottle.ext import test self.assertEqual(test, mod) def test_data_import(self): mod = self.make_module('bottle_test', item='value') from bottle.ext.test import item self.assertEqual(item, 'value') def test_import_fail(self): ''' Test a simple static page with this server adapter. ''' def test(): import bottle.ext.doesnotexist self.assertRaises(ImportError, test) def test_ext_isfile(self): ''' The virtual module needs a valid __file__ attribute. If not, the Google app engine development server crashes on windows. ''' from bottle import ext self.assertTrue(os.path.isfile(ext.__file__))
TestImportHooks
python
huggingface__transformers
tests/models/eomt/test_modeling_eomt.py
{ "start": 3741, "end": 9222 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EomtForUniversalSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": EomtForUniversalSegmentation} if is_torch_available() else {} is_encoder_decoder = False test_missing_keys = False test_torch_exportable = False def setUp(self): self.model_tester = EomtForUniversalSegmentationTester(self) self.config_tester = ConfigTester(self, config_class=EomtConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model_with_labels(self): size = (self.model_tester.image_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } config = self.model_tester.get_config() model = EomtForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # Check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="EoMT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EoMT does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="EoMT is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="EoMT does not use token embeddings") def test_resize_tokens_embeddings(self): pass def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_training() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @require_torch
EomtForUniversalSegmentationTest
python
python__mypy
mypy/checker.py
{ "start": 392623, "end": 403043 }
class ____(TransformVisitor): def __init__(self, map: dict[TypeVarId, Type]) -> None: super().__init__() self.map = map def type(self, type: Type) -> Type: return expand_type(type, self.map) def are_argument_counts_overlapping(t: CallableType, s: CallableType) -> bool: """Can a single call match both t and s, based just on positional argument counts?""" min_args = max(t.min_args, s.min_args) max_args = min(t.max_possible_positional_args(), s.max_possible_positional_args()) return min_args <= max_args def expand_callable_variants(c: CallableType) -> list[CallableType]: """Expand a generic callable using all combinations of type variables' values/bounds.""" for tv in c.variables: # We need to expand self-type before other variables, because this is the only # type variable that can have other type variables in the upper bound. if tv.id.is_self(): c = expand_type(c, {tv.id: tv.upper_bound}).copy_modified( variables=[v for v in c.variables if not v.id.is_self()] ) break if not c.is_generic(): # Fast path. return [c] tvar_values = [] for tvar in c.variables: if isinstance(tvar, TypeVarType) and tvar.values: tvar_values.append(tvar.values) else: tvar_values.append([tvar.upper_bound]) variants = [] for combination in itertools.product(*tvar_values): tvar_map = {tv.id: subst for (tv, subst) in zip(c.variables, combination)} variants.append(expand_type(c, tvar_map).copy_modified(variables=[])) return variants def is_unsafe_overlapping_overload_signatures( signature: CallableType, other: CallableType, class_type_vars: list[TypeVarLikeType], partial_only: bool = True, ) -> bool: """Check if two overloaded signatures are unsafely overlapping or partially overlapping. We consider two functions 's' and 't' to be unsafely overlapping if three conditions hold: 1. s's parameters are partially overlapping with t's. i.e. there are calls that are valid for both signatures. 2. for these common calls, some of t's parameters types are wider that s's. 3. s's return type is NOT a subset of t's. Note that we use subset rather than subtype relationship in these checks because: * Overload selection happens at runtime, not statically. * This results in more lenient behavior. This can cause false negatives (e.g. if overloaded function returns an externally visible attribute with invariant type), but such situations are rare. In general, overloads in Python are generally unsafe, so we intentionally try to avoid giving non-actionable errors (see more details in comments below). Assumes that 'signature' appears earlier in the list of overload alternatives then 'other' and that their argument counts are overlapping. """ # Try detaching callables from the containing class so that all TypeVars # are treated as being free, i.e. the signature is as seen from inside the class, # where "self" is not yet bound to anything. signature = detach_callable(signature, class_type_vars) other = detach_callable(other, class_type_vars) # Note: We repeat this check twice in both directions compensate for slight # asymmetries in 'is_callable_compatible'. for sig_variant in expand_callable_variants(signature): for other_variant in expand_callable_variants(other): # Using only expanded callables may cause false negatives, we can add # more variants (e.g. using inference between callables) in the future. if is_subset_no_promote(sig_variant.ret_type, other_variant.ret_type): continue if not ( is_callable_compatible( sig_variant, other_variant, is_compat=is_overlapping_types_for_overload, check_args_covariantly=False, is_proper_subtype=False, is_compat_return=lambda l, r: not is_subset_no_promote(l, r), allow_partial_overlap=True, ) or is_callable_compatible( other_variant, sig_variant, is_compat=is_overlapping_types_for_overload, check_args_covariantly=True, is_proper_subtype=False, is_compat_return=lambda l, r: not is_subset_no_promote(r, l), allow_partial_overlap=True, ) ): continue # Using the same `allow_partial_overlap` flag as before, can cause false # negatives in case where star argument is used in a catch-all fallback overload. # But again, practicality beats purity here. if not partial_only or not is_callable_compatible( other_variant, sig_variant, is_compat=is_subset_no_promote, check_args_covariantly=True, is_proper_subtype=False, ignore_return=True, allow_partial_overlap=True, ): return True return False def detach_callable(typ: CallableType, class_type_vars: list[TypeVarLikeType]) -> CallableType: """Ensures that the callable's type variables are 'detached' and independent of the context. A callable normally keeps track of the type variables it uses within its 'variables' field. However, if the callable is from a method and that method is using a class type variable, the callable will not keep track of that type variable since it belongs to the class. """ if not class_type_vars: # Fast path, nothing to update. return typ return typ.copy_modified(variables=list(typ.variables) + class_type_vars) def overload_can_never_match(signature: CallableType, other: CallableType) -> bool: """Check if the 'other' method can never be matched due to 'signature'. This can happen if signature's parameters are all strictly broader then other's parameters. Assumes that both signatures have overlapping argument counts. """ # The extra erasure is needed to prevent spurious errors # in situations where an `Any` overload is used as a fallback # for an overload with type variables. The spurious error appears # because the type variables turn into `Any` during unification in # the below subtype check and (surprisingly?) `is_proper_subtype(Any, Any)` # returns `True`. # TODO: find a cleaner solution instead of this ad-hoc erasure. exp_signature = expand_type( signature, {tvar.id: erase_def_to_union_or_bound(tvar) for tvar in signature.variables} ) return is_callable_compatible( exp_signature, other, is_compat=is_more_precise, is_proper_subtype=True, ignore_return=True ) def is_more_general_arg_prefix(t: FunctionLike, s: FunctionLike) -> bool: """Does t have wider arguments than s?""" # TODO should an overload with additional items be allowed to be more # general than one with fewer items (or just one item)? if isinstance(t, CallableType): if isinstance(s, CallableType): return is_callable_compatible( t, s, is_compat=is_proper_subtype, is_proper_subtype=True, ignore_return=True ) elif isinstance(t, FunctionLike): if isinstance(s, FunctionLike): if len(t.items) == len(s.items): return all( is_same_arg_prefix(items, itemt) for items, itemt in zip(t.items, s.items) ) return False def is_same_arg_prefix(t: CallableType, s: CallableType) -> bool: return is_callable_compatible( t, s, is_compat=is_same_type, is_proper_subtype=True, ignore_return=True, check_args_covariantly=True, ignore_pos_arg_names=True, ) def infer_operator_assignment_method(typ: Type, operator: str) -> tuple[bool, str]: """Determine if operator assignment on given value type is in-place, and the method name. For example, if operator is '+', return (True, '__iadd__') or (False, '__add__') depending on which method is supported by the type. """ typ = get_proper_type(typ) method = operators.op_methods[operator] existing_method = None if isinstance(typ, Instance): existing_method = _find_inplace_method(typ, method, operator) elif isinstance(typ, TypedDictType): existing_method = _find_inplace_method(typ.fallback, method, operator) if existing_method is not None: return True, existing_method return False, method def _find_inplace_method(inst: Instance, method: str, operator: str) -> str | None: if operator in operators.ops_with_inplace_method: inplace_method = "__i" + method[2:] if inst.type.has_readable_member(inplace_method): return inplace_method return None def is_valid_inferred_type( typ: Type, options: Options, is_lvalue_final: bool = False, is_lvalue_member: bool = False ) -> bool: """Is an inferred type valid and needs no further refinement? Examples of invalid types include the None type (when we are not assigning None to a final lvalue) or List[<uninhabited>]. When not doing strict Optional checking, all types containing None are invalid. When doing strict Optional checking, only None and types that are incompletely defined (i.e. contain UninhabitedType) are invalid. """ proper_type = get_proper_type(typ) if isinstance(proper_type, NoneType): # If the lvalue is final, we may immediately infer NoneType when the # initializer is None. # # If not, we want to defer making this decision. The final inferred # type could either be NoneType or an Optional type, depending on # the context. This resolution happens in leave_partial_types when # we pop a partial types scope. return is_lvalue_final or (not is_lvalue_member and options.allow_redefinition_new) elif isinstance(proper_type, UninhabitedType): return False return not typ.accept(InvalidInferredTypes())
TypeTransformVisitor
python
eth-brownie__brownie
brownie/network/middlewares/ganache7.py
{ "start": 195, "end": 2056 }
class ____(BrownieMiddlewareABC): @classmethod def get_layer(cls, w3: Web3, network_type: str) -> Optional[int]: return -100 if w3.client_version.lower().startswith("ganache/v7") else None def process_request( self, make_request: Callable, method: RPCEndpoint, params: Sequence[Any], ) -> Dict[str, Any]: result = make_request(method, params) # reformat failed eth_call / eth_sendTransaction output to mimic that of Ganache 6.x # yes, this is hacky and awful and in the future we should stop supporting # the older version of ganache. but doing so will cause unexpected issues # in projects that are still pinned to the old version, so for now we support # both and simply raise a warning of a pending deprecation. data: dict error: dict if ( method in {"eth_sendTransaction", "eth_sendRawTransaction"} and "error" in result and "data" in (error := result["error"]) ): data = error["data"] data["error"] = data.pop("message") data["program_counter"] = data.pop("programCounter") error["data"] = {data.pop("hash"): data} if method == "eth_call" and "error" in result: error = result["error"] if error.get("message", "").startswith("VM Exception"): # "VM Exception while processing transaction: {reason} {message}" msg: str = error["message"] msg = msg.split(": ", maxsplit=1)[-1] if msg.startswith("revert"): data = {"error": "revert", "reason": error["data"]} else: data = {"error": msg, "reason": None} error["data"] = {"0x": data} return result
Ganache7MiddleWare
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/elements.py
{ "start": 177693, "end": 177840 }
class ____(Executable, ClauseElement): __visit_name__ = "identified" def __init__(self, ident): self.ident = ident
_IdentifiedClause
python
keras-team__keras
keras/src/ops/nn.py
{ "start": 16125, "end": 17126 }
class ____(Operation): def __init__(self, alpha=1.0, *, name=None): super().__init__(name=name) self.alpha = alpha def call(self, x): return backend.nn.celu(x, self.alpha) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.celu", "keras.ops.nn.celu"]) def celu(x, alpha=1.0): """Continuously-differentiable exponential linear unit. It is defined as: `f(x) = alpha * (exp(x / alpha) - 1) for x < 0`, `f(x) = x for x >= 0`. Args: x: Input tensor. alpha: the α value for the CELU formulation. Defaults to `1.0`. Returns: A tensor with the same shape as `x`. Example: >>> x = np.array([-1., 0., 1.]) >>> x_celu = keras.ops.celu(x) >>> print(x_celu) array([-0.63212056, 0. , 1. ], shape=(3,), dtype=float64) """ if any_symbolic_tensors((x,)): return Celu(alpha).symbolic_call(x) return backend.nn.celu(x, alpha)
Celu
python
allegroai__clearml
clearml/backend_api/services/v2_23/queues.py
{ "start": 57426, "end": 58854 }
class ____(Response): """ Response of queues.get_default endpoint. :param id: Queue id :type id: str :param name: Queue name :type name: str """ _service = "queues" _action = "get_default" _version = "2.23" _schema = { "definitions": {}, "properties": { "id": {"description": "Queue id", "type": ["string", "null"]}, "name": {"description": "Queue name", "type": ["string", "null"]}, }, "type": "object", } def __init__(self, id: Optional[str] = None, name: Optional[str] = None, **kwargs: Any) -> None: super(GetDefaultResponse, self).__init__(**kwargs) self.id = id self.name = name @schema_property("id") def id(self) -> Optional[str]: return self._property_id @id.setter def id(self, value: Optional[str]) -> None: if value is None: self._property_id = None return self.assert_isinstance(value, "id", six.string_types) self._property_id = value @schema_property("name") def name(self) -> Optional[str]: return self._property_name @name.setter def name(self, value: Optional[str]) -> None: if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value
GetDefaultResponse
python
wandb__wandb
wandb/vendor/watchdog_0_9_0/wandb_watchdog/tricks/__init__.py
{ "start": 3586, "end": 5198 }
class ____(Trick): """Starts a long-running subprocess and restarts it on matched events. The command parameter is a list of command arguments, such as ['bin/myserver', '-c', 'etc/myconfig.ini']. Call start() after creating the Trick. Call stop() when stopping the process. """ def __init__(self, command, patterns=None, ignore_patterns=None, ignore_directories=False, stop_signal=signal.SIGINT, kill_after=10): super(AutoRestartTrick, self).__init__( patterns, ignore_patterns, ignore_directories) self.command = command self.stop_signal = stop_signal self.kill_after = kill_after self.process = None def start(self): self.process = subprocess.Popen(self.command, preexec_fn=os.setsid) def stop(self): if self.process is None: return try: os.killpg(os.getpgid(self.process.pid), self.stop_signal) except OSError: # Process is already gone pass else: kill_time = time.time() + self.kill_after while time.time() < kill_time: if self.process.poll() is not None: break time.sleep(0.25) else: try: os.killpg(os.getpgid(self.process.pid), 9) except OSError: # Process is already gone pass self.process = None @echo.echo def on_any_event(self, event): self.stop() self.start()
AutoRestartTrick
python
huggingface__transformers
src/transformers/models/sam/modeling_sam.py
{ "start": 46308, "end": 47126 }
class ____(SamPreTrainedModel): config: SamVisionConfig main_input_name = "pixel_values" def __init__(self, config: SamVisionConfig): super().__init__(config) self.vision_encoder = SamVisionEncoder(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_encoder.patch_embed @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, SamVisionEncoderOutput]: return self.vision_encoder(pixel_values, **kwargs) @auto_docstring( custom_intro=""" Segment Anything Model (SAM) for generating segmentation masks, given an input image and input points and labels, boxes, or masks. """ )
SamVisionModel
python
RaRe-Technologies__gensim
gensim/test/test_text_analysis.py
{ "start": 292, "end": 3735 }
class ____: class TextAnalyzerTestBase(unittest.TestCase): texts = [ ['this', 'is', 'a'], ['test', 'document'], ['this', 'test', 'document'], ['test', 'test', 'this'] ] token2id = { 'this': 10, 'is': 15, 'a': 20, 'test': 21, 'document': 17 } dictionary = Dictionary(texts) dictionary.token2id = token2id dictionary.id2token = {v: k for k, v in token2id.items()} top_ids = set(token2id.values()) texts2 = common_texts + [['user', 'user']] dictionary2 = Dictionary(texts2) dictionary2.id2token = {v: k for k, v in dictionary2.token2id.items()} top_ids2 = set(dictionary2.token2id.values()) accumulator_cls = None def init_accumulator(self): return self.accumulator_cls(self.top_ids, self.dictionary) def init_accumulator2(self): return self.accumulator_cls(self.top_ids2, self.dictionary2) def test_occurrence_counting(self): accumulator = self.init_accumulator().accumulate(self.texts, 3) self.assertEqual(3, accumulator.get_occurrences("this")) self.assertEqual(1, accumulator.get_occurrences("is")) self.assertEqual(1, accumulator.get_occurrences("a")) self.assertEqual(2, accumulator.get_co_occurrences("test", "document")) self.assertEqual(2, accumulator.get_co_occurrences("test", "this")) self.assertEqual(1, accumulator.get_co_occurrences("is", "a")) def test_occurrence_counting2(self): accumulator = self.init_accumulator2().accumulate(self.texts2, 110) self.assertEqual(2, accumulator.get_occurrences("human")) self.assertEqual(4, accumulator.get_occurrences("user")) self.assertEqual(3, accumulator.get_occurrences("graph")) self.assertEqual(3, accumulator.get_occurrences("trees")) cases = [ (1, ("human", "interface")), (2, ("system", "user")), (2, ("graph", "minors")), (2, ("graph", "trees")), (4, ("user", "user")), (3, ("graph", "graph")), (0, ("time", "eps")) ] for expected_count, (word1, word2) in cases: # Verify co-occurrence counts are correct, regardless of word order. self.assertEqual(expected_count, accumulator.get_co_occurrences(word1, word2)) self.assertEqual(expected_count, accumulator.get_co_occurrences(word2, word1)) # Also verify that using token ids instead of tokens works the same. word_id1 = self.dictionary2.token2id[word1] word_id2 = self.dictionary2.token2id[word2] self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id1, word_id2)) self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id2, word_id1)) def test_occurences_for_irrelevant_words(self): accumulator = self.init_accumulator().accumulate(self.texts, 2) with self.assertRaises(KeyError): accumulator.get_occurrences("irrelevant") with self.assertRaises(KeyError): accumulator.get_co_occurrences("test", "irrelevant")
BaseTestCases
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride3.py
{ "start": 446, "end": 490 }
class ____: def func1(self) -> int: ...
B1
python
run-llama__llama_index
llama-index-core/llama_index/core/node_parser/text/code.py
{ "start": 525, "end": 7102 }
class ____(TextSplitter): """ Split code using a AST parser. Thank you to Kevin Lu / SweepAI for suggesting this elegant code splitting solution. https://docs.sweep.dev/blogs/chunking-2m-files """ language: str = Field( description="The programming language of the code being split." ) chunk_lines: int = Field( default=DEFAULT_CHUNK_LINES, description="The number of lines to include in each chunk.", gt=0, ) chunk_lines_overlap: int = Field( default=DEFAULT_LINES_OVERLAP, description="How many lines of code each chunk overlaps with.", gt=0, ) max_chars: int = Field( default=DEFAULT_MAX_CHARS, description="Maximum number of characters per chunk.", gt=0, ) _parser: Any = PrivateAttr() def __init__( self, language: str, chunk_lines: int = DEFAULT_CHUNK_LINES, chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP, max_chars: int = DEFAULT_MAX_CHARS, parser: Any = None, callback_manager: Optional[CallbackManager] = None, include_metadata: bool = True, include_prev_next_rel: bool = True, id_func: Optional[Callable[[int, Document], str]] = None, ) -> None: """Initialize a CodeSplitter.""" from tree_sitter import Parser # pants: no-infer-dep callback_manager = callback_manager or CallbackManager([]) id_func = id_func or default_id_func super().__init__( language=language, chunk_lines=chunk_lines, chunk_lines_overlap=chunk_lines_overlap, max_chars=max_chars, callback_manager=callback_manager, include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, id_func=id_func, ) if parser is None: try: import tree_sitter_language_pack # pants: no-infer-dep parser = tree_sitter_language_pack.get_parser(language) # type: ignore except ImportError: raise ImportError( "Please install tree_sitter_language_pack to use CodeSplitter." "Or pass in a parser object." ) except Exception: print( f"Could not get parser for language {language}. Check " "https://github.com/Goldziher/tree-sitter-language-pack?tab=readme-ov-file#available-languages " "for a list of valid languages." ) raise if not isinstance(parser, Parser): raise ValueError("Parser must be a tree-sitter Parser object.") self._parser = parser @classmethod def from_defaults( cls, language: str, chunk_lines: int = DEFAULT_CHUNK_LINES, chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP, max_chars: int = DEFAULT_MAX_CHARS, callback_manager: Optional[CallbackManager] = None, parser: Any = None, ) -> "CodeSplitter": """Create a CodeSplitter with default values.""" return cls( language=language, chunk_lines=chunk_lines, chunk_lines_overlap=chunk_lines_overlap, max_chars=max_chars, callback_manager=callback_manager, parser=parser, ) @classmethod def class_name(cls) -> str: return "CodeSplitter" def _chunk_node(self, node: Any, text_bytes: bytes, last_end: int = 0) -> List[str]: """ Recursively chunk a node into smaller pieces based on character limits. Args: node (Any): The AST node to chunk. text_bytes (bytes): The original source code text as bytes. last_end (int, optional): The ending position of the last processed chunk. Defaults to 0. Returns: List[str]: A list of code chunks that respect the max_chars limit. """ new_chunks = [] current_chunk = "" for child in node.children: if child.end_byte - child.start_byte > self.max_chars: # Child is too big, recursively chunk the child if len(current_chunk) > 0: new_chunks.append(current_chunk) current_chunk = "" new_chunks.extend(self._chunk_node(child, text_bytes, last_end)) elif ( len(current_chunk) + child.end_byte - child.start_byte > self.max_chars ): # Child would make the current chunk too big, so start a new chunk new_chunks.append(current_chunk) current_chunk = text_bytes[last_end : child.end_byte].decode("utf-8") else: current_chunk += text_bytes[last_end : child.end_byte].decode("utf-8") last_end = child.end_byte if len(current_chunk) > 0: new_chunks.append(current_chunk) return new_chunks def split_text(self, text: str) -> List[str]: """ Split incoming code into chunks using the AST parser. This method parses the input code into an AST and then chunks it while preserving syntactic structure. It handles error cases and ensures the code can be properly parsed. Args: text (str): The source code text to split. Returns: List[str]: A list of code chunks. Raises: ValueError: If the code cannot be parsed for the specified language. """ """Split incoming code and return chunks using the AST.""" with self.callback_manager.event( CBEventType.CHUNKING, payload={EventPayload.CHUNKS: [text]} ) as event: text_bytes = bytes(text, "utf-8") tree = self._parser.parse(text_bytes) if ( not tree.root_node.children or tree.root_node.children[0].type != "ERROR" ): chunks = [ chunk.strip() for chunk in self._chunk_node(tree.root_node, text_bytes) ] event.on_end( payload={EventPayload.CHUNKS: chunks}, ) return chunks else: raise ValueError(f"Could not parse code with language {self.language}.") # TODO: set up auto-language detection using something like https://github.com/yoeo/guesslang.
CodeSplitter
python
airbytehq__airbyte
airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py
{ "start": 6673, "end": 8798 }
class ____(OutbrainAmplifyStream, HttpSubStream): primary_key = None def __init__(self, authenticator, config, parent: CampaignsByMarketers, **kwargs): super().__init__(parent=parent, **kwargs) self.config = config self._authenticator = authenticator self._session = requests.sessions.Session() def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: return {} def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: return None def stream_slices( self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: parent_stream_slices = self.parent.stream_slices( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state ) for stream_slice in parent_stream_slices: parent_records = self.parent.read_records( sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state ) for record in parent_records: yield {"campaign_id": record.get("id")} def parse_response( self, response: requests.Response, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, ) -> Iterable[Mapping]: if response.json(): for x in response.json().get("geoLocations"): x["campaign_id"] = stream_slice["campaign_id"] yield x def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return f"campaigns/{stream_slice['campaign_id']}/locations" # List PromotedLinks for Campaign. # Collection of all PromotedLinks for the specified Campaign.
CampaignsGeoLocation
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol16.py
{ "start": 212, "end": 475 }
class ____(Protocol): def execute(self, stmt: Any, *args: Any, **kwargs: Any) -> None: ... def func1(arg: Session) -> None: ... def func2(x: CoolSession): # This should generate an error because "statement" and "stmt" don't match. func1(x)
CoolSession
python
pytorch__pytorch
torch/cuda/_sanitizer.py
{ "start": 22374, "end": 24179 }
class ____: """Manages the lifetime of a CUDASanitizer dispatch mode object. The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode context manager in the enable function/destructor, respectively. This is to explicitly set the lifetime of the dispatch mode object to that of the application. This approach was deemed more elegant than using the atexit module. """ def __init__(self) -> None: self.dispatch = CUDASanitizerDispatchMode() self.enabled = False def enable(self): self.dispatch.__enter__() self.enabled = True def disable(self): self.dispatch.__exit__(None, None, None) self.enabled = False def __del__(self): # Since this object lifetime is linked to the `torch.cuda._sanitizer` python # module, it often gets deleted as part of the overall `torch` module cleanup # At that time, depending on CPython version, the torch.* module might be in # different states of being already cleaned up. # Similarly other imports might already have been cleaned up so `sys` might # be already gone as well. # Skip exiting the mode if it outlived the runtime. if (sys is not None) and (not sys.is_finalizing()) and self.enabled: self.disable() def enable_cuda_sanitizer(): """Enable CUDA Sanitizer. The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions for synchronization errors. All data races found will be printed to the standard error output along with stack traces of suspected causes. For best results, the sanitizer should be enabled at the very beginning of the program. """ cuda_sanitizer.enable() cuda_sanitizer = CUDASanitizer()
CUDASanitizer
python
walkccc__LeetCode
solutions/770. Basic Calculator IV/770.py
{ "start": 1848, "end": 3775 }
class ____: def basicCalculatorIV( self, expression: str, evalvars: list[str], evalints: list[int], ) -> list[str]: tokens = list(self._getTokens(expression)) evalMap = {a: b for a, b in zip(evalvars, evalints)} for i, token in enumerate(tokens): if token in evalMap: tokens[i] = str(evalMap[token]) postfix = self._infixToPostfix(tokens) return self._evaluate(postfix).toList() def _getTokens(self, s: str) -> Iterator[str]: i = 0 for j, c in enumerate(s): if c == ' ': if i < j: yield s[i:j] i = j + 1 elif c in '()+-*': if i < j: yield s[i:j] yield c i = j + 1 if i < len(s): yield s[i:] def _infixToPostfix(self, tokens: list[str]) -> list[str]: postfix = [] ops = [] def precedes(prevOp: str, currOp: str) -> bool: if prevOp == '(': return False return prevOp == '*' or currOp in '+-' for token in tokens: if token == '(': ops.append(token) elif token == ')': while ops[-1] != '(': postfix.append(ops.pop()) ops.pop() elif token in '+-*': # isOperator(token) while ops and precedes(ops[-1], token): postfix.append(ops.pop()) ops.append(token) else: # isOperand(token) postfix.append(token) return postfix + ops[::-1] def _evaluate(self, postfix: list[str]) -> Poly: polys: list[Poly] = [] for token in postfix: if token in '+-*': b = polys.pop() a = polys.pop() if token == '+': polys.append(a + b) elif token == '-': polys.append(a - b) else: # token == '*' polys.append(a * b) elif token.lstrip('-').isnumeric(): polys.append(Poly("1", int(token))) else: polys.append(Poly(token, 1)) return polys[0]
Solution
python
numpy__numpy
numpy/_core/_exceptions.py
{ "start": 945, "end": 1446 }
class ____(UFuncTypeError): """ Thrown when a ufunc loop cannot be found """ def __init__(self, ufunc, dtypes): super().__init__(ufunc) self.dtypes = tuple(dtypes) def __str__(self): return ( f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" ) @_display_as_base
_UFuncNoLoopError
python
Netflix__metaflow
metaflow/exception.py
{ "start": 4059, "end": 4137 }
class ____(MetaflowException): headline = "Invalid command"
CommandException
python
openai__openai-python
tests/api_resources/beta/threads/test_runs.py
{ "start": 21999, "end": 44806 }
class ____: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { "content": "string", "role": "user", "attachments": [ { "file_id": "file_id", "tools": [{"type": "code_interpreter"}], } ], "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={"foo": "string"}, model="string", parallel_tool_calls=True, reasoning_effort="none", response_format="auto", stream=False, temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", "last_messages": 1, }, ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.create( thread_id="thread_id", assistant_id="assistant_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.create( thread_id="thread_id", assistant_id="assistant_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_create_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.create( thread_id="", assistant_id="assistant_id", ) @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run_stream = await async_client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", stream=True, ) await run_stream.response.aclose() @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run_stream = await async_client.beta.threads.runs.create( thread_id="thread_id", assistant_id="assistant_id", stream=True, include=["step_details.tool_calls[*].file_search.results[*].content"], additional_instructions="additional_instructions", additional_messages=[ { "content": "string", "role": "user", "attachments": [ { "file_id": "file_id", "tools": [{"type": "code_interpreter"}], } ], "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, metadata={"foo": "string"}, model="string", parallel_tool_calls=True, reasoning_effort="none", response_format="auto", temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}], top_p=1, truncation_strategy={ "type": "auto", "last_messages": 1, }, ) await run_stream.response.aclose() @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.create( thread_id="thread_id", assistant_id="assistant_id", stream=True, ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() await stream.close() @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.create( thread_id="thread_id", assistant_id="assistant_id", stream=True, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() await stream.close() assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_create_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.create( thread_id="", assistant_id="assistant_id", stream=True, ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.retrieve( run_id="run_id", thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.retrieve( run_id="run_id", thread_id="thread_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.retrieve( run_id="run_id", thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( run_id="", thread_id="thread_id", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", metadata={"foo": "string"}, ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.update( run_id="run_id", thread_id="thread_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.update( run_id="run_id", thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( run_id="", thread_id="thread_id", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.list( thread_id="thread_id", ) assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.list( thread_id="thread_id", after="after", before="before", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.list( thread_id="thread_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.list( thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.list( thread_id="", ) @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.cancel( run_id="run_id", thread_id="thread_id", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.cancel( run_id="run_id", thread_id="thread_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.cancel( run_id="run_id", thread_id="thread_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( run_id="run_id", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( run_id="", thread_id="thread_id", ) @parametrize async def test_method_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.submit_tool_outputs( run_id="run_id", thread_id="thread_id", tool_outputs=[{}], ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_submit_tool_outputs_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run = await async_client.beta.threads.runs.submit_tool_outputs( run_id="run_id", thread_id="thread_id", tool_outputs=[ { "output": "output", "tool_call_id": "tool_call_id", } ], stream=False, ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", tool_outputs=[{}], ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_submit_tool_outputs_overload_1(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="", tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="", thread_id="thread_id", tool_outputs=[{}], ) @parametrize async def test_method_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): run_stream = await async_client.beta.threads.runs.submit_tool_outputs( run_id="run_id", thread_id="thread_id", stream=True, tool_outputs=[{}], ) await run_stream.response.aclose() @parametrize async def test_raw_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", stream=True, tool_outputs=[{}], ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() await stream.close() @parametrize async def test_streaming_response_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( run_id="run_id", thread_id="thread_id", stream=True, tool_outputs=[{}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() await stream.close() assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_submit_tool_outputs_overload_2(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="run_id", thread_id="", stream=True, tool_outputs=[{}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( run_id="", thread_id="thread_id", stream=True, tool_outputs=[{}], )
TestAsyncRuns
python
django-crispy-forms__django-crispy-forms
crispy_forms/layout.py
{ "start": 3583, "end": 4833 }
class ____(LayoutObject): """ Form Layout. It is conformed by Layout objects: `Fieldset`, `Row`, `Column`, `MultiField`, `HTML`, `ButtonHolder`, `Button`, `Hidden`, `Reset`, `Submit` and fields. Form fields have to be strings. Layout objects `Fieldset`, `Row`, `Column`, `MultiField` and `ButtonHolder` can hold other Layout objects within. Though `ButtonHolder` should only hold `HTML` and BaseInput inherited classes: `Button`, `Hidden`, `Reset` and `Submit`. Example:: helper.layout = Layout( Fieldset('Company data', 'is_company' ), Fieldset(_('Contact details'), 'email', Row('password1', 'password2'), 'first_name', 'last_name', HTML('<img src="/media/somepicture.jpg"/>'), 'company' ), ButtonHolder( Submit('Save', 'Save', css_class='button white'), ), ) """ def __init__(self, *fields): self.fields = list(fields) def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs): return self.get_rendered_fields(form, context, template_pack, **kwargs)
Layout
python
getsentry__sentry
src/sentry/api/bases/project.py
{ "start": 4039, "end": 4341 }
class ____(ProjectPermission): scope_map = { "GET": ["project:read", "project:write", "project:admin"], "POST": ["project:write", "project:admin"], "PUT": ["project:read", "project:write", "project:admin"], "DELETE": ["project:admin"], }
ProjectOwnershipPermission
python
python-pillow__Pillow
src/PIL/ImageMode.py
{ "start": 369, "end": 2395 }
class ____(NamedTuple): """Wrapper for mode strings.""" mode: str bands: tuple[str, ...] basemode: str basetype: str typestr: str def __str__(self) -> str: return self.mode @lru_cache def getmode(mode: str) -> ModeDescriptor: """Gets a mode descriptor for the given mode.""" endian = "<" if sys.byteorder == "little" else ">" modes = { # core modes # Bits need to be extended to bytes "1": ("L", "L", ("1",), "|b1"), "L": ("L", "L", ("L",), "|u1"), "I": ("L", "I", ("I",), f"{endian}i4"), "F": ("L", "F", ("F",), f"{endian}f4"), "P": ("P", "L", ("P",), "|u1"), "RGB": ("RGB", "L", ("R", "G", "B"), "|u1"), "RGBX": ("RGB", "L", ("R", "G", "B", "X"), "|u1"), "RGBA": ("RGB", "L", ("R", "G", "B", "A"), "|u1"), "CMYK": ("RGB", "L", ("C", "M", "Y", "K"), "|u1"), "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr"), "|u1"), # UNDONE - unsigned |u1i1i1 "LAB": ("RGB", "L", ("L", "A", "B"), "|u1"), "HSV": ("RGB", "L", ("H", "S", "V"), "|u1"), # extra experimental modes "RGBa": ("RGB", "L", ("R", "G", "B", "a"), "|u1"), "LA": ("L", "L", ("L", "A"), "|u1"), "La": ("L", "L", ("L", "a"), "|u1"), "PA": ("RGB", "L", ("P", "A"), "|u1"), } if mode in modes: base_mode, base_type, bands, type_str = modes[mode] return ModeDescriptor(mode, bands, base_mode, base_type, type_str) mapping_modes = { # I;16 == I;16L, and I;32 == I;32L "I;16": "<u2", "I;16S": "<i2", "I;16L": "<u2", "I;16LS": "<i2", "I;16B": ">u2", "I;16BS": ">i2", "I;16N": f"{endian}u2", "I;16NS": f"{endian}i2", "I;32": "<u4", "I;32B": ">u4", "I;32L": "<u4", "I;32S": "<i4", "I;32BS": ">i4", "I;32LS": "<i4", } type_str = mapping_modes[mode] return ModeDescriptor(mode, ("I",), "L", "L", type_str)
ModeDescriptor
python
tensorflow__tensorflow
tensorflow/python/eager/monitoring.py
{ "start": 12898, "end": 13508 }
class ____(Buckets): """Exponential bucketing strategy. Sets up buckets of the form: [-DBL_MAX, ..., scale * growth^i, scale * growth_factor^(i + 1), ..., DBL_MAX]. """ __slots__ = [] def __init__(self, scale, growth_factor, bucket_count): """Creates a new exponential Buckets. Args: scale: float growth_factor: float bucket_count: integer """ super(ExponentialBuckets, self).__init__( pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, bucket_count))
ExponentialBuckets
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/asset_backfill.py
{ "start": 3342, "end": 3465 }
class ____(Enum): IN_PROGRESS = "IN_PROGRESS" MATERIALIZED = "MATERIALIZED" FAILED = "FAILED"
AssetBackfillStatus
python
pikepdf__pikepdf
src/pikepdf/models/image.py
{ "start": 29697, "end": 32339 }
class ____(PdfImage): """Support class for JPEG 2000 images. Implements the same API as :class:`PdfImage`. If you call PdfImage(object_that_is_actually_jpeg2000_image), pikepdf will return this class instead, due to the check in PdfImage.__new__. """ def __init__(self, obj): """Initialize a JPEG 2000 image.""" super().__init__(obj) self._jpxpil = self.as_pil_image() def __eq__(self, other): if not isinstance(other, PdfImageBase): return NotImplemented return ( self.obj == other.obj and isinstance(other, PdfJpxImage) and self._jpxpil == other._jpxpil ) def _extract_direct(self, *, stream: BinaryIO) -> str | None: data, filters = self._remove_simple_filters() if filters != ['/JPXDecode']: return None stream.write(data) return '.jp2' def _extract_transcoded(self) -> Image.Image: return super()._extract_transcoded() @property def _colorspaces(self): """Return the effective colorspace of a JPEG 2000 image. If the ColorSpace dictionary is present, the colorspace embedded in the JPEG 2000 data will be ignored, as required by the specification. """ # (PDF 1.7 Table 89) If ColorSpace is present, any colour space # specifications in the JPEG2000 data shall be ignored. super_colorspaces = super()._colorspaces if super_colorspaces: return super_colorspaces if self._jpxpil.mode == 'L': return ['/DeviceGray'] if self._jpxpil.mode == 'RGB': return ['/DeviceRGB'] raise NotImplementedError('Complex JP2 colorspace') @property def _bpc(self) -> int: """Return 8, since bpc is not meaningful for JPEG 2000 encoding.""" # (PDF 1.7 Table 89) If the image stream uses the JPXDecode filter, this # entry is optional and shall be ignored if present. The bit depth is # determined by the conforming reader in the process of decoding the # JPEG2000 image. return 8 @property def indexed(self) -> bool: """Return False, since JPEG 2000 should not be indexed.""" # Nothing in the spec precludes an Indexed JPXDecode image, except for # the fact that doing so is madness. Let's assume it no one is that # insane. return False def __repr__(self): return ( f'<pikepdf.PdfJpxImage JPEG2000 image mode={self.mode} ' f'size={self.width}x{self.height} at {hex(id(self))}>' )
PdfJpxImage
python
sympy__sympy
sympy/stats/crv_types.py
{ "start": 94149, "end": 96337 }
class ____(SingleContinuousDistribution): _argnames = ('a', 'b') @property def set(self): return Interval(self.a, self.b) @staticmethod def check(a, b): _value_check(b > a, "Parameter b must be in range (%s, oo)."%(a)) def pdf(self, x): a, b = self.a, self.b alpha = 12 / (b-a)**3 beta = (a+b) / 2 return Piecewise( (alpha * (x-beta)**2, And(a<=x, x<=b)), (S.Zero, True)) def _moment_generating_function(self, t): a, b = self.a, self.b return -3 * (exp(a*t) * (4 + (a**2 + 2*a*(-2 + b) + b**2) * t) \ - exp(b*t) * (4 + (-4*b + (a + b)**2) * t)) / ((a-b)**3 * t**2) def _characteristic_function(self, t): a, b = self.a, self.b return -3*I*(exp(I*a*t*exp(I*b*t)) * (4*I - (-4*b + (a+b)**2)*t)) \ / ((a-b)**3 * t**2) def QuadraticU(name, a, b): r""" Create a Continuous Random Variable with a U-quadratic distribution. Explanation =========== The density of the U-quadratic distribution is given by .. math:: f(x) := \alpha (x-\beta)^2 with :math:`x \in [a,b]`. Parameters ========== a : Real number b : Real number, :math:`a < b` Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import QuadraticU, density >>> from sympy import Symbol, pprint >>> a = Symbol("a", real=True) >>> b = Symbol("b", real=True) >>> z = Symbol("z") >>> X = QuadraticU("x", a, b) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) / 2 | / a b \ |12*|- - - - + z| | \ 2 2 / <----------------- for And(b >= z, a <= z) | 3 | (-a + b) | \ 0 otherwise References ========== .. [1] https://en.wikipedia.org/wiki/U-quadratic_distribution """ return rv(name, QuadraticUDistribution, (a, b)) #------------------------------------------------------------------------------- # RaisedCosine distribution ----------------------------------------------------
QuadraticUDistribution
python
python__mypy
mypy/nodes.py
{ "start": 69655, "end": 70307 }
class ____(RefExpr): """Member access expression x.y""" __slots__ = ("expr", "name", "def_var") __match_args__ = ("expr", "name", "node") def __init__(self, expr: Expression, name: str) -> None: super().__init__() self.expr = expr self.name = name # The variable node related to a definition through 'self.x = <initializer>'. # The nodes of other kinds of member expressions are resolved during type checking. self.def_var: Var | None = None def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_member_expr(self) # Kinds of arguments @unique
MemberExpr
python
astropy__astropy
astropy/modeling/core.py
{ "start": 117240, "end": 183841 }
class ____(Model): """ Base class for compound models. While it can be used directly, the recommended way to combine models is through the model operators. """ def __init__(self, op, left, right, name=None, *, unit_change_composition=False): self.__dict__["_param_names"] = None self._n_submodels = None self.op = op self.left = left self.right = right self._bounding_box = None self._user_bounding_box = None self._leaflist = None self._tdict = None self._parameters = None self._parameters_ = None self._param_metrics = None self._unit_change_composition = unit_change_composition if op != "fix_inputs" and len(left) != len(right): raise ValueError("Both operands must have equal values for n_models") self._n_models = len(left) if op != "fix_inputs" and ( (left.model_set_axis != right.model_set_axis) or left.model_set_axis ): # not False and not 0 raise ValueError( "model_set_axis must be False or 0 and consistent for operands" ) self._model_set_axis = left.model_set_axis if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS: if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs: raise ModelDefinitionError( "Both operands must match numbers of inputs and outputs" ) self.n_inputs = left.n_inputs self.n_outputs = left.n_outputs self.inputs = left.inputs self.outputs = left.outputs elif op == "&": self.n_inputs = left.n_inputs + right.n_inputs self.n_outputs = left.n_outputs + right.n_outputs self.inputs = combine_labels(left.inputs, right.inputs) self.outputs = combine_labels(left.outputs, right.outputs) elif op == "|": if left.n_outputs != right.n_inputs: raise ModelDefinitionError( "Unsupported operands for |:" f" {left.name} (n_inputs={left.n_inputs}," f" n_outputs={left.n_outputs}) and" f" {right.name} (n_inputs={right.n_inputs}," f" n_outputs={right.n_outputs}); n_outputs for the left-hand model" " must match n_inputs for the right-hand model." ) self.n_inputs = left.n_inputs self.n_outputs = right.n_outputs self.inputs = left.inputs self.outputs = right.outputs elif op == "fix_inputs": if not isinstance(left, Model): raise ValueError( 'First argument to "fix_inputs" must be an instance of ' "an astropy Model." ) if not isinstance(right, dict): raise ValueError( 'Expected a dictionary for second argument of "fix_inputs".' ) # Dict keys must match either possible indices # for model on left side, or names for inputs. self.n_inputs = left.n_inputs - len(right) # Assign directly to the private attribute (instead of using the setter) # to avoid asserting the new number of outputs matches the old one. self._outputs = left.outputs self.n_outputs = left.n_outputs newinputs = list(left.inputs) keys = right.keys() input_ind = [] for key in keys: if np.issubdtype(type(key), np.integer): if key >= left.n_inputs or key < 0: raise ValueError( "Substitution key integer value " "not among possible input choices." ) if key in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(key) elif isinstance(key, str): if key not in left.inputs: raise ValueError( "Substitution key string not among possible input choices." ) # Check to see it doesn't match positional # specification. ind = left.inputs.index(key) if ind in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(ind) # Remove substituted inputs input_ind.sort() input_ind.reverse() for ind in input_ind: del newinputs[ind] self.inputs = tuple(newinputs) # Now check to see if the input model has bounding_box defined. # If so, remove the appropriate dimensions and set it for this # instance. try: self.bounding_box = self.left.bounding_box.fix_inputs(self, right) except NotImplementedError: pass else: raise ModelDefinitionError("Illegal operator: ", self.op) self.name = name self._fittable = None if op in ("|", "+", "-"): self.linear = left.linear and right.linear else: self.linear = False self.eqcons = [] self.ineqcons = [] self.n_left_params = len(self.left.parameters) self._map_parameters() # Initialize the cache for the constraints (used primarily when # sync_constraints is False) self._constraints_cache = {} def _get_left_inputs_from_args(self, args): return args[: self.left.n_inputs] def _get_right_inputs_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs] elif op == "|" or op == "fix_inputs": return None else: return args[: self.left.n_inputs] def _get_left_params_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) n_inputs = self.left.n_inputs + self.right.n_inputs return args[n_inputs : n_inputs + self.n_left_params] else: return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params] def _get_right_params_from_args(self, args): op = self.op if op == "fix_inputs": return None if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :] else: return args[self.left.n_inputs + self.n_left_params :] def _get_kwarg_model_parameters_as_positional(self, args, kwargs): # could do it with inserts but rebuilding seems like simpilist way # TODO: Check if any param names are in kwargs maybe as an intersection of sets? if self.op == "&": new_args = list(args[: self.left.n_inputs + self.right.n_inputs]) args_pos = self.left.n_inputs + self.right.n_inputs else: new_args = list(args[: self.left.n_inputs]) args_pos = self.left.n_inputs for param_name in self.param_names: kw_value = kwargs.pop(param_name, None) if kw_value is not None: value = kw_value else: try: value = args[args_pos] except IndexError: raise IndexError("Missing parameter or input") args_pos += 1 new_args.append(value) return new_args, kwargs def _apply_operators_to_value_lists(self, leftval, rightval, **kw): op = self.op if op == "+": return binary_operation(operator.add, leftval, rightval) elif op == "-": return binary_operation(operator.sub, leftval, rightval) elif op == "*": return binary_operation(operator.mul, leftval, rightval) elif op == "/": return binary_operation(operator.truediv, leftval, rightval) elif op == "**": return binary_operation(operator.pow, leftval, rightval) elif op == "&": if not isinstance(leftval, tuple): leftval = (leftval,) if not isinstance(rightval, tuple): rightval = (rightval,) return leftval + rightval elif op in SPECIAL_OPERATORS: return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval) else: raise ModelDefinitionError("Unrecognized operator {op}") def evaluate(self, *args, **kw): op = self.op args, kw = self._get_kwarg_model_parameters_as_positional(args, kw) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) if op == "fix_inputs": pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs))) fixed_inputs = { key if np.issubdtype(type(key), np.integer) else pos_index[key]: value for key, value in self.right.items() } left_inputs = [ fixed_inputs[ind] if ind in fixed_inputs.keys() else inp for ind, inp in enumerate(left_inputs) ] leftval = self.left.evaluate(*left_inputs, *left_params) if op == "fix_inputs": return leftval right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) if op == "|": if isinstance(leftval, tuple): return self.right.evaluate(*leftval, *right_params) else: return self.right.evaluate(leftval, *right_params) else: rightval = self.right.evaluate(*right_inputs, *right_params) return self._apply_operators_to_value_lists(leftval, rightval, **kw) @property def fit_deriv(self): # If either side of the model is missing analytical derivative then we can't compute one if self.left.fit_deriv is None or self.right.fit_deriv is None: return None # Only the following operators are supported op = self.op if op not in ["-", "+", "*", "/"]: return None def _calc_compound_deriv(*args, **kwargs): args, kw = self._get_kwarg_model_parameters_as_positional(args, kwargs) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) left_deriv = self.left.fit_deriv(*left_inputs, *left_params) right_deriv = self.right.fit_deriv(*right_inputs, *right_params) # Not all fit_deriv methods return consistent types, some return # single arrays, some return lists of arrays, etc. We now convert # this to a single array. left_deriv = np.asanyarray(left_deriv) right_deriv = np.asanyarray(right_deriv) if not self.left.col_fit_deriv: left_deriv = np.moveaxis(left_deriv, -1, 0) if not self.right.col_fit_deriv: right_deriv = np.moveaxis(right_deriv, -1, 0) # Some models preserve the shape of the input in the output of # fit_deriv whereas some do not. For example for a 6-parameter model, # passing input with shape (5, 3) might produce a deriv array with # shape (6, 5, 3) or (6, 15). We therefore normalize this to always # ravel all but the first dimension left_deriv = left_deriv.reshape((left_deriv.shape[0], -1)) right_deriv = right_deriv.reshape((right_deriv.shape[0], -1)) # Convert the arrays back to lists over the first dimension so as to # be able to concatenate them (we don't use .tolist() which would # convert to a list of lists instead of a list of arrays) left_deriv = list(left_deriv) right_deriv = list(right_deriv) # We now have to use various differentiation rules to apply the # arithmetic operators to the derivatives. # If we consider an example of a compound model # h(x, a, b, c) made up of two models g(x, a) # and h(x, b, c), one with one parameter and # the other with two parameters, the derivatives # are evaluated as follows: # Addition # h(x, a, b, c) = f(x, a) + g(x, b, c) # fit_deriv = [df/da, dg/db, dg/dc] # Subtraction # h(x, a, b, c) = f(x, a) - g(x, b, c) # fit_deriv = [df/da, -dg/db, -dg/dc] # Multiplication # h(x, a, b, c) = f(x, a) * g(x, b, c) # fit_deriv = [g(x, b, c) * df/da, # f(x, a) * dg/db, # f(x, a) * dg/dc] # Division - Quotient rule # h(x, a, b, c) = f(x, a) / g(x, b, c) # fit_deriv = [df/da / g(x, b, c), # -f(x, a) * dg/db / g(x, b, c)**2, # -f(x, a) * dg/dc / g(x, b, c)**2] if op in ["+", "-"]: if op == "-": right_deriv = [-x for x in right_deriv] return np.array(left_deriv + right_deriv) leftval = self.left.evaluate(*left_inputs, *left_params).ravel() rightval = self.right.evaluate(*right_inputs, *right_params).ravel() if op == "*": return np.array( [rightval * dparam for dparam in left_deriv] + [leftval * dparam for dparam in right_deriv] ) # fmt: skip if op == "/": return np.array( [dparam / rightval for dparam in left_deriv] + [-leftval * (dparam / rightval**2) for dparam in right_deriv] ) # fmt: skip return _calc_compound_deriv @property def col_fit_deriv(self): return True @property def n_submodels(self): if self._leaflist is None: self._make_leaflist() return len(self._leaflist) @property def submodel_names(self): """Return the names of submodels in a ``CompoundModel``.""" if self._leaflist is None: self._make_leaflist() names = [item.name for item in self._leaflist] nonecount = 0 newnames = [] for item in names: if item is None: newnames.append(f"None_{nonecount}") nonecount += 1 else: newnames.append(item) return tuple(newnames) def _pre_evaluate(self, *args, **kwargs): """ CompoundModel specific input setup that needs to occur prior to model evaluation. Note ---- All of the _pre_evaluate for each component model will be performed at the time that the individual model is evaluated. """ # If equivalencies are provided, necessary to map parameters and pass # the leaflist as a keyword input for use by model evaluation so that # the compound model input names can be matched to the model input # names. if "equivalencies" in kwargs: # Restructure to be useful for the individual model lookup kwargs["inputs_map"] = [ (value[0], (value[1], key)) for key, value in self.inputs_map().items() ] # Setup actual model evaluation method def evaluate(_inputs): return self._evaluate(*_inputs, **kwargs) return evaluate, args, None, kwargs @property def _argnames(self): """ No inputs should be used to determine input_shape when handling compound models. """ return () def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ CompoundModel specific post evaluation processing of outputs. Note ---- All of the _post_evaluate for each component model will be performed at the time that the individual model is evaluated. """ if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1: return outputs[0] return outputs def _evaluate(self, *args, **kw): op = self.op if op != "fix_inputs": if op != "&": leftval = self.left(*args, **kw) if op != "|": rightval = self.right(*args, **kw) else: rightval = None else: leftval = self.left(*(args[: self.left.n_inputs]), **kw) rightval = self.right(*(args[self.left.n_inputs :]), **kw) if op != "|": return self._apply_operators_to_value_lists(leftval, rightval, **kw) elif op == "|": if isinstance(leftval, tuple): return self.right(*leftval, **kw) else: return self.right(leftval, **kw) else: subs = self.right newargs = list(args) subinds = [] subvals = [] for key in subs.keys(): if np.issubdtype(type(key), np.integer): subinds.append(key) elif isinstance(key, str): ind = self.left.inputs.index(key) subinds.append(ind) subvals.append(subs[key]) # Turn inputs specified in kw into positional indices. # Names for compound inputs do not propagate to sub models. kwind = [] kwval = [] for kwkey in list(kw.keys()): if kwkey in self.inputs: ind = self.inputs.index(kwkey) if ind < len(args): raise ValueError( "Keyword argument duplicates positional value supplied." ) kwind.append(ind) kwval.append(kw[kwkey]) del kw[kwkey] # Build new argument list # Append keyword specified args first if kwind: kwargs = list(zip(kwind, kwval)) kwargs.sort() kwindsorted, kwvalsorted = list(zip(*kwargs)) newargs = newargs + list(kwvalsorted) if subinds: subargs = list(zip(subinds, subvals)) subargs.sort() # subindsorted, subvalsorted = list(zip(*subargs)) # The substitutions must be inserted in order for ind, val in subargs: newargs.insert(ind, val) return self.left(*newargs, **kw) @property def param_names(self): """An ordered list of parameter names.""" return self._param_names def _make_leaflist(self): tdict = {} leaflist = [] make_subtree_dict(self, "", tdict, leaflist) self._leaflist = leaflist self._tdict = tdict def __getattr__(self, name): """ If someone accesses an attribute not already defined, map the parameters, and then see if the requested attribute is one of the parameters. """ # The following test is needed to avoid infinite recursion # caused by deepcopy. There may be other such cases discovered. if name == "__setstate__": raise AttributeError if name in self._param_names: return self.__dict__[name] else: raise AttributeError(f'Attribute "{name}" not found') def __getitem__(self, index): if self._leaflist is None: self._make_leaflist() leaflist = self._leaflist tdict = self._tdict if isinstance(index, slice): if index.step: raise ValueError("Steps in slices not supported for compound models") if index.start is not None: if isinstance(index.start, str): start = self._str_index_to_int(index.start) else: start = index.start else: start = 0 if index.stop is not None: if isinstance(index.stop, str): stop = self._str_index_to_int(index.stop) else: stop = index.stop - 1 else: stop = len(leaflist) - 1 if index.stop == 0: raise ValueError("Slice endpoint cannot be 0") if start < 0: start = len(leaflist) + start if stop < 0: stop = len(leaflist) + stop # now search for matching node: if stop == start: # only single value, get leaf instead in code below index = start else: for key in tdict: node, leftind, rightind = tdict[key] if leftind == start and rightind == stop: return node raise IndexError("No appropriate subtree matches slice") if np.issubdtype(type(index), np.integer): return leaflist[index] elif isinstance(index, str): return leaflist[self._str_index_to_int(index)] else: raise TypeError("index must be integer, slice, or model name string") def _str_index_to_int(self, str_index): # Search through leaflist for item with that name found = [] for nleaf, leaf in enumerate(self._leaflist): if getattr(leaf, "name", None) == str_index: found.append(nleaf) if len(found) == 0: raise IndexError(f"No component with name '{str_index}' found") if len(found) > 1: raise IndexError( f"Multiple components found using '{str_index}' as name\n" f"at indices {found}" ) return found[0] @property def n_inputs(self): """The number of inputs of a model.""" return self._n_inputs @n_inputs.setter def n_inputs(self, value): self._n_inputs = value @property def n_outputs(self): """The number of outputs of a model.""" return self._n_outputs @n_outputs.setter def n_outputs(self, value): self._n_outputs = value @property def eqcons(self): return self._eqcons @eqcons.setter def eqcons(self, value): self._eqcons = value @property def ineqcons(self): return self._eqcons @ineqcons.setter def ineqcons(self, value): self._eqcons = value def traverse_postorder(self, include_operator=False): """Postorder traversal of the CompoundModel tree.""" res = [] if isinstance(self.left, CompoundModel): res = res + self.left.traverse_postorder(include_operator) else: res = res + [self.left] if isinstance(self.right, CompoundModel): res = res + self.right.traverse_postorder(include_operator) else: res = res + [self.right] if include_operator: res.append(self.op) else: res.append(self) return res def _format_expression(self, format_leaf=None): leaf_idx = 0 operands = deque() if format_leaf is None: format_leaf = lambda i, l: f"[{i}]" for node in self.traverse_postorder(): if not isinstance(node, CompoundModel): operands.append(format_leaf(leaf_idx, node)) leaf_idx += 1 continue right = operands.pop() left = operands.pop() if node.op in OPERATOR_PRECEDENCE: oper_order = OPERATOR_PRECEDENCE[node.op] if isinstance(node, CompoundModel): if ( isinstance(node.left, CompoundModel) and OPERATOR_PRECEDENCE[node.left.op] < oper_order ): left = f"({left})" if ( isinstance(node.right, CompoundModel) and OPERATOR_PRECEDENCE[node.right.op] < oper_order ): right = f"({right})" operands.append(f"{left} {node.op} {right}") else: left = f"(({left})," right = f"({right}))" operands.append(" ".join((node.op[0], left, right))) return "".join(operands) def _format_components(self): if self._parameters_ is None: self._map_parameters() return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist)) def __str__(self): expression = self._format_expression() components = self._format_components() keywords = [ ("Expression", expression), ("Components", "\n" + indent(components, 4 * " ")), ] return super()._format_str(keywords=keywords) def rename(self, name): self.name = name return self @property def isleaf(self): return False @property def inverse(self): if self.op == "|": return self.right.inverse | self.left.inverse elif self.op == "&": return self.left.inverse & self.right.inverse else: return NotImplemented @property def fittable(self): """Set the fittable attribute on a compound model.""" if self._fittable is None: if self._leaflist is None: self._map_parameters() self._fittable = all(m.fittable for m in self._leaflist) return self._fittable __add__ = _model_oper("+") __sub__ = _model_oper("-") __mul__ = _model_oper("*") __truediv__ = _model_oper("/") __pow__ = _model_oper("**") __or__ = _model_oper("|") __and__ = _model_oper("&") def _map_parameters(self): """ Map all the constituent model parameters to the compound object, renaming as necessary by appending a suffix number. This can be an expensive operation, particularly for a complex expression tree. All the corresponding parameter attributes are created that one expects for the Model class. The parameter objects that the attributes point to are the same objects as in the constiutent models. Changes made to parameter values to either are seen by both. Prior to calling this, none of the associated attributes will exist. This method must be called to make the model usable by fitting engines. If oldnames=True, then parameters are named as in the original implementation of compound models. """ if self._parameters is not None: # do nothing return if self._leaflist is None: self._make_leaflist() self._parameters_ = {} param_map = {} self._param_names = [] for lindex, leaf in enumerate(self._leaflist): if not isinstance(leaf, dict): for param_name in leaf.param_names: param = getattr(leaf, param_name) new_param_name = f"{param_name}_{lindex}" self.__dict__[new_param_name] = param self._parameters_[new_param_name] = param self._param_names.append(new_param_name) param_map[new_param_name] = (lindex, param_name) self._param_metrics = defaultdict(dict) self._param_map = param_map self._param_map_inverse = {v: k for k, v in param_map.items()} self._initialize_slices() self._param_names = tuple(self._param_names) @staticmethod def _recursive_lookup(branch, adict, key): if isinstance(branch, CompoundModel): return adict[key] return branch, key def inputs_map(self): """ Map the names of the inputs to this ExpressionTree to the inputs to the leaf models. """ inputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {inp: (self, inp) for inp in self.inputs} elif self.op == "|": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp elif self.op == "&": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() if isinstance(self.right, CompoundModel): r_inputs_map = self.right.inputs_map() for i, inp in enumerate(self.inputs): if i < len(self.left.inputs): # Get from left if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[self.left.inputs[i]] else: inputs_map[inp] = self.left, self.left.inputs[i] else: # Get from right if isinstance(self.right, CompoundModel): inputs_map[inp] = r_inputs_map[ self.right.inputs[i - len(self.left.inputs)] ] else: inputs_map[inp] = ( self.right, self.right.inputs[i - len(self.left.inputs)], ) elif self.op == "fix_inputs": fixed_ind = list(self.right.keys()) ind = [ list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind ] inp_ind = list(range(self.left.n_inputs)) for i in ind: inp_ind.remove(i) for i in inp_ind: inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i] else: if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.left.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp return inputs_map @property def unit_change_composition(self): """ A flag indicating whether or not the unit change composition has been set for this model. """ return self._unit_change_composition @unit_change_composition.setter def unit_change_composition(self, value): self._unit_change_composition = value def _parameter_units_for_data_units(self, input_units, output_units): if self._leaflist is None: self._map_parameters() units_for_data = {} for imodel, model in enumerate(self._leaflist): if self.unit_change_composition: input_units = model.input_units or input_units output_units = model.output_units or output_units units_for_data_leaf = model._parameter_units_for_data_units( input_units, output_units ) for param_leaf in units_for_data_leaf: param = self._param_map_inverse[(imodel, param_leaf)] units_for_data[param] = units_for_data_leaf[param_leaf] return units_for_data @property def input_units(self): inputs_map = self.inputs_map() input_units_dict = { key: inputs_map[key][0].input_units[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units is not None } if input_units_dict: return input_units_dict return None @property def input_units_equivalencies(self): inputs_map = self.inputs_map() input_units_equivalencies_dict = { key: inputs_map[key][0].input_units_equivalencies[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units_equivalencies is not None } if not input_units_equivalencies_dict: return None return input_units_equivalencies_dict @property def input_units_allow_dimensionless(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_allow_dimensionless[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def input_units_strict(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_strict[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def return_units(self): outputs_map = self.outputs_map() return { key: outputs_map[key][0].return_units[orig_key] for key, (mod, orig_key) in outputs_map.items() if outputs_map[key][0].return_units is not None } def outputs_map(self): """ Map the names of the outputs to this ExpressionTree to the outputs to the leaf models. """ outputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {out: (self, out) for out in self.outputs} elif self.op == "|": if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for out in self.outputs: if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[out] else: outputs_map[out] = self.right, out elif self.op == "&": if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for i, out in enumerate(self.outputs): if i < len(self.left.outputs): # Get from left if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map[self.left.outputs[i]] else: outputs_map[out] = self.left, self.left.outputs[i] else: # Get from right if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[ self.right.outputs[i - len(self.left.outputs)] ] else: outputs_map[out] = ( self.right, self.right.outputs[i - len(self.left.outputs)], ) elif self.op == "fix_inputs": return self.left.outputs_map() else: if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() for out in self.left.outputs: if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map()[out] else: outputs_map[out] = self.left, out return outputs_map @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `~astropy.modeling.Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `~astropy.modeling.Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `~astropy.modeling.Model.bounding_box` limits. If `~astropy.modeling.Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the `~astropy.modeling.Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = self.get_bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, important when using # add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out def replace_submodel(self, name, model): """ Construct a new `~astropy.modeling.CompoundModel` instance from an existing CompoundModel, replacing the named submodel with a new model. In order to ensure that inverses and names are kept/reconstructed, it's necessary to rebuild the CompoundModel from the replaced node all the way back to the base. The original CompoundModel is left untouched. Parameters ---------- name : str name of submodel to be replaced model : `~astropy.modeling.Model` replacement model """ submodels = [ m for m in self.traverse_postorder() if getattr(m, "name", None) == name ] if submodels: if len(submodels) > 1: raise ValueError(f"More than one submodel named {name}") old_model = submodels.pop() if len(old_model) != len(model): raise ValueError( "New and old models must have equal values for n_models" ) # Do this check first in order to raise a more helpful Exception, # although it would fail trying to construct the new CompoundModel if ( old_model.n_inputs != model.n_inputs or old_model.n_outputs != model.n_outputs ): raise ValueError( "New model must match numbers of inputs and " "outputs of existing model" ) tree = _get_submodel_path(self, name) while tree: branch = self.copy() for node in tree[:-1]: branch = getattr(branch, node) setattr(branch, tree[-1], model) model = CompoundModel( branch.op, branch.left, branch.right, name=branch.name ) tree = tree[:-1] return model else: raise ValueError(f"No submodels found named {name}") def without_units_for_data(self, **kwargs): r""" See `~astropy.modeling.Model.without_units_for_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound \*, / and | (only if ``unit_change_composition`` is ``True`` on the instance) compound models as in that case it is reasonable to mix the output units. It does this by modifying the output units of each sub model by using the output units of the other sub model so that we can apply the original function and get the desired result. Additional data has to be output in the mixed output unit case so that the units can be properly rebuilt by `~astropy.modeling.CompoundModel.with_units_from_data`. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"]: inputs = {inp: kwargs[inp] for inp in self.inputs} left_units = self.left.output_units(**kwargs) right_units = self.right.output_units(**kwargs) if self.op == "*": left_kwargs = { out: kwargs[out] / right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: kwargs[out] / left_units[out] for out in self.right.outputs if kwargs[out] is not None } else: left_kwargs = { out: kwargs[out] * right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: 1 / kwargs[out] * left_units[out] for out in self.right.outputs if kwargs[out] is not None } left_kwargs.update(inputs.copy()) right_kwargs.update(inputs.copy()) left = self.left.without_units_for_data(**left_kwargs) if isinstance(left, tuple): left_kwargs["_left_kwargs"] = left[1] left_kwargs["_right_kwargs"] = left[2] left = left[0] right = self.right.without_units_for_data(**right_kwargs) if isinstance(right, tuple): right_kwargs["_left_kwargs"] = right[1] right_kwargs["_right_kwargs"] = right[2] right = right[0] model = CompoundModel(self.op, left, right, name=self.name) return model, left_kwargs, right_kwargs elif self.op == "|" and self.unit_change_composition: left_out = self.left(**{inp: kwargs[inp] for inp in self.inputs}) left = self.left.without_units_for_data(x=kwargs["x"], y=left_out) left_kwargs = {"x": kwargs["x"], "y": left_out} right = self.right.without_units_for_data( x=self.left(**{inp: kwargs[inp] for inp in self.inputs}), y=kwargs["y"] ) right_kwargs = {"x": left_out, "y": kwargs["y"]} model = CompoundModel( self.op, left, right, name=self.name, unit_change_composition=self.unit_change_composition, ) return model, left_kwargs, right_kwargs else: return super().without_units_for_data(**kwargs) def with_units_from_data(self, **kwargs): """ See `~astropy.modeling.Model.with_units_from_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. In order to do this it requires some additional information output by `~astropy.modeling.CompoundModel.without_units_for_data` passed as keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"] or (self.op == "|" and self.unit_change_composition): left_kwargs = kwargs.pop("_left_kwargs") right_kwargs = kwargs.pop("_right_kwargs") left = self.left.with_units_from_data(**left_kwargs) right = self.right.with_units_from_data(**right_kwargs) return CompoundModel( self.op, left, right, name=self.name, unit_change_composition=self.unit_change_composition, ) else: return super().with_units_from_data(**kwargs) def _get_submodel_path(model, name): """Find the route down a CompoundModel's tree to the model with the specified name (whether it's a leaf or not). """ if getattr(model, "name", None) == name: return [] try: return ["left"] + _get_submodel_path(model.left, name) except (AttributeError, TypeError): pass try: return ["right"] + _get_submodel_path(model.right, name) except (AttributeError, TypeError): pass def binary_operation(binoperator, left, right): """ Perform binary operation. Operands may be matching tuples of operands. """ if isinstance(left, tuple) and isinstance(right, tuple): return tuple(binoperator(item[0], item[1]) for item in zip(left, right)) return binoperator(left, right) def get_ops(tree, opset): """ Recursive function to collect operators used. """ if isinstance(tree, CompoundModel): opset.add(tree.op) get_ops(tree.left, opset) get_ops(tree.right, opset) else: return def make_subtree_dict(tree, nodepath, tdict, leaflist): """Traverse a tree noting each node by a key. The key indicates all the left/right choices necessary to reach that node. Each key will reference a tuple that contains: - reference to the compound model for that node. - left most index contained within that subtree (relative to all indices for the whole tree) - right most index contained within that subtree """ # if this is a leaf, just append it to the leaflist if not hasattr(tree, "isleaf"): leaflist.append(tree) else: leftmostind = len(leaflist) make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist) make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist) rightmostind = len(leaflist) - 1 tdict[nodepath] = (tree, leftmostind, rightmostind) _ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)] OPERATOR_PRECEDENCE = {} for idx, ops in enumerate(_ORDER_OF_OPERATORS): for op in ops: OPERATOR_PRECEDENCE[op] = idx del idx, op, ops def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None): """ This function creates a compound model with one or more of the input values of the input model assigned fixed values (scalar or array). Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that one or more of the model input values will be fixed to some constant value. values : dict A dictionary where the key identifies which input to fix and its value is the value to fix it at. The key may either be the name of the input or a number reflecting its order in the inputs. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> g = Gaussian2D(1, 2, 3, 4, 5) >>> gv = fix_inputs(g, {0: 2.5}) Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) """ model = CompoundModel("fix_inputs", modelinstance, values) if bounding_boxes is not None: if selector_args is None: selector_args = tuple((key, True) for key in values.keys()) bbox = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args ) _selector = bbox.selector_args.get_fixed_values(modelinstance, values) new_bbox = bbox[_selector] new_bbox = new_bbox.__class__.validate(model, new_bbox) model.bounding_box = new_bbox return model def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"): """ Set a validated bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated bounding box will be set on. bounding_box : tuple A bounding box tuple, see :ref:`astropy:bounding-boxes` for details ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = ModelBoundingBox.validate( modelinstance, bounding_box, ignored=ignored, order=order ) def bind_compound_bounding_box( modelinstance, bounding_boxes, selector_args, create_selector=None, ignored=None, order="C", ): """ Add a validated compound bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated compound bounding box will be set on. bounding_boxes : dict A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes` for details. selector_args : list List of selector argument tuples to define selection for compound bounding box, see :ref:`astropy:bounding-boxes` for details. create_selector : callable, optional An optional callable with interface (selector_value, model) which can generate a bounding box based on a selector value and model if there is no bounding box in the compound bounding box listed under that selector value. Default is ``None``, meaning new bounding box entries will not be automatically generated. ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args, create_selector=create_selector, ignored=ignored, order=order, ) def custom_model(*args, fit_deriv=None): """ Create a model from a user defined function. The inputs and parameters of the model will be inferred from the arguments of the function. This can be used either as a function or as a decorator. See below for examples of both usages. The model is separable only if there is a single input. .. note:: All model parameters have to be defined as keyword arguments with default values in the model function. Use `None` as a default argument value if you do not want to have a default value for that parameter. The standard settable model properties can be configured by default using keyword arguments matching the name of the property; however, these values are not set as model "parameters". Moreover, users cannot use keyword arguments matching non-settable model properties, with the exception of ``n_outputs`` which should be set to the number of outputs of your function. Parameters ---------- func : function Function which defines the model. It should take N positional arguments where ``N`` is dimensions of the model (the number of independent variable in the model), and any number of keyword arguments (the parameters). It must return the value of the model (typically as an array, but can also be a scalar for scalar inputs). This corresponds to the `~astropy.modeling.Model.evaluate` method. fit_deriv : function, optional Function which defines the Jacobian derivative of the model. I.e., the derivative with respect to the *parameters* of the model. It should have the same argument signature as ``func``, but should return a sequence where each element of the sequence is the derivative with respect to the corresponding argument. This corresponds to the :meth:`~astropy.modeling.FittableModel.fit_deriv` method. Examples -------- Define a sinusoidal model function as a custom 1D model:: >>> from astropy.modeling.models import custom_model >>> import numpy as np >>> def sine_model(x, amplitude=1., frequency=1.): ... return amplitude * np.sin(2 * np.pi * frequency * x) >>> def sine_deriv(x, amplitude=1., frequency=1.): ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x) >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv) Create an instance of the custom model and evaluate it:: >>> model = SineModel() >>> model(0.25) # doctest: +FLOAT_CMP 1.0 This model instance can now be used like a usual astropy model. The next example demonstrates a 2D Moffat function model, and also demonstrates the support for docstrings (this example could also include a derivative, but it has been omitted for simplicity):: >>> @custom_model ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0, ... alpha=1.0): ... \"\"\"Two dimensional Moffat function.\"\"\" ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 ... return amplitude * (1 + rr_gg) ** (-alpha) ... >>> print(Moffat2D.__doc__) Two dimensional Moffat function. >>> model = Moffat2D() >>> model(1, 1) # doctest: +FLOAT_CMP 0.3333333333333333 """ if len(args) == 1 and callable(args[0]): return _custom_model_wrapper(args[0], fit_deriv=fit_deriv) elif not args: return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv) else: raise TypeError( f"{__name__} takes at most one positional argument (the callable/" "function to be turned into a model. When used as a decorator " "it should be passed keyword arguments only (if " "any)." ) def _custom_model_inputs(func): """ Processes the inputs to the `~astropy.modeling.custom_model`'s function into the appropriate categories. Parameters ---------- func : callable Returns ------- inputs : list list of evaluation inputs special_params : dict dictionary of model properties which require special treatment settable_params : dict dictionary of defaults for settable model properties params : dict dictionary of model parameters set by `~astropyl.modeling.custom_model`'s function """ inputs, parameters = get_inputs_and_params(func) special = ["n_outputs"] settable = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is not None ] properties = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is None and attr not in special ] special_params = {} settable_params = {} params = {} for param in parameters: if param.name in special: special_params[param.name] = param.default elif param.name in settable: settable_params[param.name] = param.default elif param.name in properties: raise ValueError( f"Parameter '{param.name}' cannot be a model property: {properties}." ) else: params[param.name] = param.default return inputs, special_params, settable_params, params def _custom_model_wrapper(func, fit_deriv=None): """ Internal implementation `~astropy.modeling.custom_model`. When `~astropy.modeling.custom_model` is called as a function its arguments are passed to this function, and the result of this function is returned. When `~astropy.modeling.custom_model` is used as a decorator a partial evaluation of this function is returned by `~astropy.modeling.custom_model`. """ if not callable(func): raise ModelDefinitionError( "func is not callable; it must be a function or other callable object" ) if fit_deriv is not None and not callable(fit_deriv): raise ModelDefinitionError( "fit_deriv not callable; it must be a function or other callable object" ) model_name = func.__name__ inputs, special_params, settable_params, params = _custom_model_inputs(func) if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params): raise ModelDefinitionError( "derivative function should accept same number of parameters as func." ) params = { param: Parameter(param, default=default) for param, default in params.items() } mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = "__main__" members = { "__module__": str(modname), "__doc__": func.__doc__, "n_inputs": len(inputs), "n_outputs": special_params.pop("n_outputs", 1), "evaluate": staticmethod(func), "_settable_properties": settable_params, } if fit_deriv is not None: members["fit_deriv"] = staticmethod(fit_deriv) members.update(params) cls = type(model_name, (FittableModel,), members) cls._separable = len(inputs) == 1 return cls def render_model(model, arr=None, coords=None): """ Evaluates a model on an input array. Evaluation is limited to a bounding box if the `~astropy.modeling.Model.bounding_box` attribute is set. Parameters ---------- model : `~astropy.modeling.Model` Model to be evaluated. arr : `numpy.ndarray`, optional Array on which the model is evaluated. coords : array-like, optional Coordinate arrays mapping to ``arr``, such that ``arr[coords] == arr``. Returns ------- array : `numpy.ndarray` The model evaluated on the input ``arr`` or a new array from ``coords``. If ``arr`` and ``coords`` are both `None`, the returned array is limited to the `~astropy.modeling.Model.bounding_box` limits. If `~astropy.modeling.Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = model.bounding_box if (coords is None) & (arr is None) & (bbox is None): raise ValueError("If no bounding_box is set, coords or arr must be input.") # for consistent indexing if model.n_inputs == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if arr is not None: arr = arr.copy() # Check dimensions match model if arr.ndim != model.n_inputs: raise ValueError( "number of array dimensions inconsistent with number of model inputs." ) if coords is not None: # Check dimensions match arr and model coords = np.array(coords) if len(coords) != model.n_inputs: raise ValueError( "coordinate length inconsistent with the number of model inputs." ) if arr is not None: if coords[0].shape != arr.shape: raise ValueError("coordinate shape inconsistent with the array shape.") else: arr = np.zeros(coords[0].shape) if bbox is not None: # assures position is at center pixel, important when using add_array pd = pos, delta = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if arr is None: arr = model(*sub_coords) else: try: arr = add_array(arr, model(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input" " arr in one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = arr.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] arr += model(*coords[::-1]) return arr def hide_inverse(model): """ This is a convenience function intended to disable automatic generation of the inverse in compound models by disabling one of the constituent model's inverse. This is to handle cases where user provided inverse functions are not compatible within an expression. For example:: compound_model.inverse = hide_inverse(m1) + m2 + m3 This will insure that the defined inverse itself won't attempt to build its own inverse, which would otherwise fail in this example (e.g., m = m1 + m2 + m3 happens to raises an exception for this reason.) Note that this permanently disables it. To prevent that either copy the model or restore the inverse later. """ del model.inverse return model def compose_models_with_units(left, right): """ This function is a convenience function to compose two models with units such that unit changes are possible. This performs left | right, but with the added ability to handle unit changes Parameters ---------- left: `~astropy.modeling.Model` The model to the left of the ``|`` operator. right: `~astropy.modeling.Model` The model to the right of the ``|`` operator. Returns ------- model: `~astropy.modeling.CompoundModel` The composed left ``|`` right, with unit change through ``|`` enabled. """ model = left | right model.unit_change_composition = True return model
CompoundModel
python
coleifer__peewee
tests/sqlite.py
{ "start": 87587, "end": 90038 }
class ____(ModelTestCase): database = get_in_memory_db() requires = [Datum] def test_collated_fields(self): rows = ( (1, 'abc', 'abc', 'abc ', 'abc'), (2, 'abc', 'abc', 'abc', 'ABC'), (3, 'abc', 'abc', 'abc ', 'Abc'), (4, 'abc', 'abc ', 'ABC', 'abc')) for pk, a, b, c, d in rows: Datum.create(id=pk, a=a, b=b, c=c, d=d) def assertC(query, expected): self.assertEqual([r.id for r in query], expected) base = Datum.select().order_by(Datum.id) # Text comparison a=b is performed using binary collating sequence. assertC(base.where(Datum.a == Datum.b), [1, 2, 3]) # Text comparison a=b is performed using the RTRIM collating sequence. assertC(base.where(Datum.a == Datum.b.collate('RTRIM')), [1, 2, 3, 4]) # Text comparison d=a is performed using the NOCASE collating sequence. assertC(base.where(Datum.d == Datum.a), [1, 2, 3, 4]) # Text comparison a=d is performed using the BINARY collating sequence. assertC(base.where(Datum.a == Datum.d), [1, 4]) # Text comparison 'abc'=c is performed using RTRIM collating sequence. assertC(base.where('abc' == Datum.c), [1, 2, 3]) # Text comparison c='abc' is performed using RTRIM collating sequence. assertC(base.where(Datum.c == 'abc'), [1, 2, 3]) # Grouping is performed using the NOCASE collating sequence (Values # 'abc', 'ABC', and 'Abc' are placed in the same group). query = Datum.select(fn.COUNT(Datum.id)).group_by(Datum.d) self.assertEqual(query.scalar(), 4) # Grouping is performed using the BINARY collating sequence. 'abc' and # 'ABC' and 'Abc' form different groups. query = Datum.select(fn.COUNT(Datum.id)).group_by(Datum.d.concat('')) self.assertEqual([r[0] for r in query.tuples()], [1, 1, 2]) # Sorting or column c is performed using the RTRIM collating sequence. assertC(base.order_by(Datum.c, Datum.id), [4, 1, 2, 3]) # Sorting of (c||'') is performed using the BINARY collating sequence. assertC(base.order_by(Datum.c.concat(''), Datum.id), [4, 2, 3, 1]) # Sorting of column c is performed using the NOCASE collating sequence. assertC(base.order_by(Datum.c.collate('NOCASE'), Datum.id), [2, 4, 3, 1])
TestCollatedFieldDefinitions
python
doocs__leetcode
solution/2900-2999/2974.Minimum Number Game/Solution2.py
{ "start": 0, "end": 209 }
class ____: def numberGame(self, nums: List[int]) -> List[int]: nums.sort() for i in range(0, len(nums), 2): nums[i], nums[i + 1] = nums[i + 1], nums[i] return nums
Solution
python
huggingface__transformers
tests/test_tokenizers_backend_mixin.py
{ "start": 506, "end": 24576 }
class ____: """ Tests that specifically test the tokenizers-backend. These tests don't need to be run for every model, just once to verify the backend works correctly. """ tokenizer_class = None rust_tokenizer_class = None from_pretrained_id = None from_pretrained_kwargs = None @classmethod def setUpClass(cls) -> None: cls.from_pretrained_id = ( [cls.from_pretrained_id] if isinstance(cls.from_pretrained_id, str) else cls.from_pretrained_id ) # Use rust_tokenizer_class if set, otherwise fall back to tokenizer_class tokenizer_class = getattr(cls, "rust_tokenizer_class", None) or getattr(cls, "tokenizer_class", None) cls.tokenizers_list = [ ( tokenizer_class, pretrained_id, cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {}, ) for pretrained_id in (cls.from_pretrained_id or []) ] cls.tmpdirname = tempfile.mkdtemp() # save the first pretrained tokenizer to tmpdirname for tests to use if cls.from_pretrained_id and tokenizer_class is not None: try: from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( cls.from_pretrained_id[0], **(cls.from_pretrained_kwargs if cls.from_pretrained_kwargs is not None else {}), ) tokenizer.save_pretrained(cls.tmpdirname) except Exception: pass @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) @classmethod def get_rust_tokenizer(cls, pretrained_name=None, **kwargs) -> TokenizersBackend: pretrained_name = pretrained_name or cls.tmpdirname tokenizer_class = getattr(cls, "rust_tokenizer_class", None) or getattr(cls, "tokenizer_class", None) return tokenizer_class.from_pretrained(pretrained_name, **kwargs) def test_alignment_methods(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) batch_size = 3 encoding = tokenizer_r(text, add_special_tokens=False) batch_encoding = tokenizer_r([text] * batch_size, add_special_tokens=False) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # words, tokens self.assertEqual(len(encoding.word_ids(0)), num_tokens) word_ids = [w for w in encoding.word_ids(0) if w is not None] self.assertEqual(max(word_ids), last_word_index) self.assertEqual(min(word_ids), 0) batch_word_ids = [w for w in batch_encoding.word_ids(last_batch_index) if w is not None] self.assertEqual(len(batch_encoding.word_ids(last_batch_index)), num_tokens) self.assertEqual(max(batch_word_ids), last_word_index) self.assertEqual(min(batch_word_ids), 0) self.assertEqual(len(encoding.tokens(0)), num_tokens) # Assert token_to_word self.assertEqual(encoding.token_to_word(0), 0) self.assertEqual(encoding.token_to_word(0, 0), 0) self.assertEqual(encoding.token_to_word(last_token_index), last_word_index) self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(1, 0), 0) self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index) self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index) # Assert word_to_tokens self.assertEqual(encoding.word_to_tokens(0).start, 0) self.assertEqual(encoding.word_to_tokens(0, 0).start, 0) self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1) self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1) self.assertEqual( batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1 ) # Assert token_to_chars self.assertEqual(encoding.token_to_chars(0).start, 0) self.assertEqual(encoding.token_to_chars(0, 0).start, 0) self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1) self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1) self.assertEqual( batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1 ) # Assert char_to_token self.assertEqual(encoding.char_to_token(0), 0) self.assertEqual(encoding.char_to_token(0, 0), 0) self.assertEqual(encoding.char_to_token(last_char_index), last_token_index) self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(1, 0), 0) self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index) self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index) # Assert char_to_word self.assertEqual(encoding.char_to_word(0), 0) self.assertEqual(encoding.char_to_word(0, 0), 0) self.assertEqual(encoding.char_to_word(last_char_index), last_word_index) self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(1, 0), 0) self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index) self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index) # Assert word_to_chars self.assertEqual(encoding.word_to_chars(0).start, 0) self.assertEqual(encoding.word_to_chars(0, 0).start, 0) self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1) self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0) self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1) self.assertEqual( batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1 ) # Assert token_to_sequence self.assertEqual(encoding.token_to_sequence(num_tokens // 2), 0) self.assertEqual(encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(1, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(0, num_tokens // 2), 0) self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, num_tokens // 2), 0) # Pair of input sequences words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"] text = " ".join(words) pair_words = ["Amazing", "example", "full", "of", "inspiration"] pair_text = " ".join(pair_words) batch_size = 3 index_word_in_first_seq = words.index("inspiration") index_word_in_pair_seq = pair_words.index("inspiration") index_char_in_first_seq = text.find("inspiration") index_char_in_pair_seq = pair_text.find("inspiration") pair_encoding = tokenizer_r(text, pair_text, add_special_tokens=False) pair_batch_encoding = tokenizer_r( [text] * batch_size, [pair_text] * batch_size, add_special_tokens=False ) num_tokens = len(encoding["input_ids"]) last_word_index = len(words) - 1 last_token_index = num_tokens - 1 last_batch_index = batch_size - 1 last_char_index = len(text) - 1 # Assert word_to_tokens self.assertNotEqual( pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start ], pair_encoding["input_ids"][ pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start ], ) self.assertNotEqual( pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start ], ) # Assert char_to_token self.assertNotEqual( pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)], pair_encoding["input_ids"][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0) ], pair_batch_encoding["input_ids"][1][ pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1) ], ) # Assert char_to_word self.assertNotEqual( pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)], pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)], ) self.assertNotEqual( pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1), ) self.assertEqual( words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)], pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)], ) # Assert word_to_chars self.assertNotEqual( pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start], pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start], ) self.assertNotEqual( pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start, ) self.assertEqual( text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start], pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start], ) # Assert token_to_sequence pair_encoding = tokenizer_r(text, pair_text, add_special_tokens=True) pair_sequence_ids = [ pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding["input_ids"])) ] self.assertIn(0, pair_sequence_ids) self.assertIn(1, pair_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_sequence_ids) pair_batch_encoding = tokenizer_r( [text] * batch_size, [pair_text] * batch_size, add_special_tokens=True ) pair_batch_sequence_ids = [ pair_batch_encoding.token_to_sequence(1, i) for i in range(len(pair_batch_encoding["input_ids"][0])) ] self.assertIn(0, pair_batch_sequence_ids) self.assertIn(1, pair_batch_sequence_ids) if tokenizer_r.num_special_tokens_to_add(pair=True): self.assertIn(None, pair_batch_sequence_ids) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) text = "Wonderful no inspiration example with subtoken" pair = "Along with an awesome pair" # No pair tokens_with_offsets = tokenizer_r( text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) # Pairs tokens_with_offsets = tokenizer_r( text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens, new_tokenizer.all_special_tokens, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): # This feature only exists for fast tokenizers tokenizer = self.get_rust_tokenizer() # Test with a special tokens map class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) # Create a new mapping from the special tokens defined in the original tokenizer special_tokens_list = PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES.copy() if "additional_special_tokens" in special_tokens_list: special_tokens_list.remove("additional_special_tokens") special_tokens_map = {} for token in special_tokens_list: if getattr(tokenizer, token) is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" # Train new tokenizer new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) # Check the changes for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, token) is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) # Check if the special tokens have been kept (all_special_tokens returns strings) for special_token in tokenizer.all_special_tokens: if special_token not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer.all_special_tokens, f"'{special_token}' should be in {new_tokenizer.all_special_tokens}", ) else: # The special token must appear in the list of the new tokenizer with the new mapping. self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) @parameterized.expand([(True,), (False,)]) def test_rust_tokenizer_add_prefix_space(self, add_prefix_space): for tokenizer, pretrained_name, _ in self.tokenizers_list: fast_tokenizer = tokenizer.from_pretrained(pretrained_name, add_prefix_space=add_prefix_space) self.assertEqual(fast_tokenizer.add_prefix_space, add_prefix_space) # Only the ByteLevel pre-tokenizer has the `add_prefix_space` attribute, we have to ensure that it's set correctly if hasattr(fast_tokenizer.backend_tokenizer.pre_tokenizer, "add_prefix_space"): self.assertEqual(fast_tokenizer.backend_tokenizer.pre_tokenizer.add_prefix_space, add_prefix_space) def test_local_files_only(self): from transformers import AutoTokenizer pretrained_list = getattr(self, "from_pretrained_id", []) or [] for pretrained_name in pretrained_list: with self.subTest(f"AutoTokenizer ({pretrained_name})"): # First cache the tokenizer files try: tokenizer_cached = AutoTokenizer.from_pretrained(pretrained_name) # Now load with local_files_only=True tokenizer_local = AutoTokenizer.from_pretrained(pretrained_name, local_files_only=True) # Check that the two tokenizers are identical self.assertEqual(tokenizer_cached.get_vocab(), tokenizer_local.get_vocab()) self.assertEqual( tokenizer_cached.all_special_tokens_extended, tokenizer_local.all_special_tokens_extended, ) except Exception as _: pass # if the pretrained model is not loadable how could it pass locally :)
TokenizersBackendTesterMixin
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_U.py
{ "start": 80, "end": 1090 }
class ____(Benchmark): r""" Ursem 1 objective function. This class defines the Ursem 1 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\text{Ursem01}}(x) = - \sin(2x_1 - 0.5 \pi) - 3 \cos(x_2) - 0.5 x_1 with :math:`x_1 \in [-2.5, 3]` and :math:`x_2 \in [-2, 2]`. *Global optimum*: :math:`f(x) = -4.81681406371` for :math:`x = [1.69714, 0.0]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = [(-2.5, 3.0), (-2.0, 2.0)] self.global_optimum = [[1.69714, 0.0]] self.fglob = -4.81681406371 def fun(self, x, *args): self.nfev += 1 return -sin(2 * x[0] - 0.5 * pi) - 3.0 * cos(x[1]) - 0.5 * x[0]
Ursem01
python
cython__cython
Cython/Compiler/PyrexTypes.py
{ "start": 15606, "end": 25277 }
class ____(BaseType): # # Pseudo-type defined with a ctypedef statement in a # 'cdef extern from' block. # Delegates most attribute lookups to the base type. # (Anything not defined here or in the BaseType is delegated.) # # qualified_name string # typedef_name string # typedef_cname string # typedef_base_type PyrexType # typedef_is_external bool is_typedef = 1 typedef_is_external = 0 to_py_utility_code = None from_py_utility_code = None subtypes = ['typedef_base_type'] def __init__(self, name, base_type, cname, is_external=0, namespace=None): assert not base_type.is_complex self.typedef_name = name self.typedef_cname = cname self.typedef_base_type = base_type self.typedef_is_external = is_external self.typedef_namespace = namespace def resolve(self): return self.typedef_base_type.resolve() def resolve_known_type(self): """Resolve the typedef unless it is external (and thus not safely known). """ if self.typedef_is_external: return self tp = self.typedef_base_type while tp.is_typedef and not tp.typedef_is_external: tp = tp.typedef_base_type return tp def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = self.typedef_name else: base_code = public_decl(self.typedef_cname, dll_linkage) if self.typedef_namespace is not None and not pyrex: base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code) return self.base_declaration_code(base_code, entity_code) def as_argument_type(self): return self def cast_code(self, expr_code): # If self is really an array (rather than pointer), we can't cast. # For example, the gmp mpz_t. if self.typedef_base_type.is_array: base_type = self.typedef_base_type.base_type return CPtrType(base_type).cast_code(expr_code) else: return BaseType.cast_code(self, expr_code) def specialize(self, values): base_type = self.typedef_base_type.specialize(values) namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None if base_type is self.typedef_base_type and namespace is self.typedef_namespace: return self else: return create_typedef_type(self.typedef_name, base_type, self.typedef_cname, 0, namespace) def __repr__(self): return "<CTypedefType %s>" % self.typedef_cname def __str__(self): return self.typedef_name def _create_utility_code(self, template_utility_code, template_function_name): type_name = type_identifier(self.typedef_cname) utility_code = template_utility_code.specialize( type = self.typedef_cname, TypeName = type_name) function_name = template_function_name % type_name return utility_code, function_name def create_to_py_utility_code(self, env): if self.typedef_is_external: if not self.to_py_utility_code: base_type = self.typedef_base_type if type(base_type) is CIntType: self.to_py_function = "__Pyx_PyLong_From_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntToPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "TO_PY_FUNCTION": self.to_py_function})) return True elif base_type.is_float: pass # XXX implement! elif base_type.is_complex: pass # XXX implement! pass elif base_type.is_cpp_string: cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self) context = { 'cname': cname, 'type': self.typedef_cname, } from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( "string.to_py", "CppConvert.pyx", context=context)) self.to_py_function = cname return True if self.to_py_utility_code: env.use_utility_code(self.to_py_utility_code) return True # delegation return self.typedef_base_type.create_to_py_utility_code(env) def create_from_py_utility_code(self, env): if self.typedef_is_external: if not self.from_py_utility_code: base_type = self.typedef_base_type if type(base_type) is CIntType: self.from_py_function = "__Pyx_PyLong_As_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntFromPy", "TypeConversion.c", context={ "TYPE": self.empty_declaration_code(), "FROM_PY_FUNCTION": self.from_py_function, "IS_ENUM": base_type.is_enum, })) return True elif base_type.is_float: pass # XXX implement! elif base_type.is_complex: pass # XXX implement! elif base_type.is_cpp_string: cname = '__pyx_convert_string_from_py_%s' % type_identifier(self) context = { 'cname': cname, 'type': self.typedef_cname, } from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( "string.from_py", "CppConvert.pyx", context=context)) self.from_py_function = cname return True if self.from_py_utility_code: env.use_utility_code(self.from_py_utility_code) return True # delegation return self.typedef_base_type.create_from_py_utility_code(env) def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): if to_py_function is None: to_py_function = self.to_py_function return self.typedef_base_type.to_py_call_code( source_code, result_code, result_type, to_py_function) def from_py_call_code(self, source_code, result_code, error_pos, code, from_py_function=None, error_condition=None, special_none_cvalue=None): return self.typedef_base_type.from_py_call_code( source_code, result_code, error_pos, code, from_py_function or self.from_py_function, error_condition or self.error_condition(result_code), special_none_cvalue=special_none_cvalue, ) def overflow_check_binop(self, binop, env, const_rhs=False): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) type = self.empty_declaration_code() name = self.specialization_name() if binop == "lshift": env.use_utility_code(TempitaUtilityCode.load_cached( "LeftShift", "Overflow.c", context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) else: if const_rhs: binop += "_const" _load_overflow_base(env) env.use_utility_code(TempitaUtilityCode.load_cached( "SizeCheck", "Overflow.c", context={'TYPE': type, 'NAME': name})) env.use_utility_code(TempitaUtilityCode.load_cached( "Binop", "Overflow.c", context={'TYPE': type, 'NAME': name, 'BINOP': binop})) return "__Pyx_%s_%s_checking_overflow" % (binop, name) def error_condition(self, result_code): if self.typedef_is_external: if self.exception_value is not None: condition = "(%s == %s)" % ( result_code, self.cast_code(self.exception_value)) if self.exception_check: condition += " && PyErr_Occurred()" return condition # delegation return self.typedef_base_type.error_condition(result_code) def __getattr__(self, name): return getattr(self.typedef_base_type, name) def py_type_name(self): return self.typedef_base_type.py_type_name() def can_coerce_to_pyobject(self, env): return self.typedef_base_type.can_coerce_to_pyobject(env) def can_coerce_from_pyobject(self, env): return self.typedef_base_type.can_coerce_from_pyobject(env) def can_coerce_to_pystring(self, env, format_spec=None): return self.typedef_base_type.can_coerce_to_pystring(env, format_spec) def convert_to_pystring(self, cvalue, code, format_spec=None, name_type=None): if self.typedef_is_external and name_type is None: # The declared base type of external typedefs may not be exact, so use the typedef type name. name_type = self return self.typedef_base_type.convert_to_pystring(cvalue, code, format_spec, name_type)
CTypedefType
python
django__django
tests/utils_tests/test_encoding.py
{ "start": 4909, "end": 8768 }
class ____(unittest.TestCase): def test_filepath_to_uri(self): self.assertIsNone(filepath_to_uri(None)) self.assertEqual( filepath_to_uri("upload\\чубака.mp4"), "upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4", ) self.assertEqual(filepath_to_uri(Path("upload/test.png")), "upload/test.png") self.assertEqual(filepath_to_uri(Path("upload\\test.png")), "upload/test.png") def test_iri_to_uri(self): cases = [ # Valid UTF-8 sequences are encoded. ("red%09rosé#red", "red%09ros%C3%A9#red"), ("/blog/for/Jürgen Münster/", "/blog/for/J%C3%BCrgen%20M%C3%BCnster/"), ( "locations/%s" % quote_plus("Paris & Orléans"), "locations/Paris+%26+Orl%C3%A9ans", ), # Reserved chars remain unescaped. ("%&", "%&"), ("red&♥ros%#red", "red&%E2%99%A5ros%#red"), (gettext_lazy("red&♥ros%#red"), "red&%E2%99%A5ros%#red"), ] for iri, uri in cases: with self.subTest(iri): self.assertEqual(iri_to_uri(iri), uri) # Test idempotency. self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri) def test_uri_to_iri(self): cases = [ (None, None), # Valid UTF-8 sequences are decoded. ("/%e2%89%Ab%E2%99%a5%E2%89%aB/", "/≫♥≫/"), ("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"), ("/%41%5a%6B/", "/AZk/"), # Reserved and non-URL valid ASCII chars are not decoded. ("/%25%20%02%41%7b/", "/%25%20%02A%7b/"), # Broken UTF-8 sequences remain escaped. ("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"), ("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"), ("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"), ("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"), ( "/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93", "/♥♥/?utf8=%9C%93✓%9C%93", ), ] for uri, iri in cases: with self.subTest(uri): self.assertEqual(uri_to_iri(uri), iri) # Test idempotency. self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri) def test_complementarity(self): cases = [ ( "/blog/for/J%C3%BCrgen%20M%C3%BCnster/", "/blog/for/J\xfcrgen%20M\xfcnster/", ), ("%&", "%&"), ("red&%E2%99%A5ros%#red", "red&♥ros%#red"), ("/%E2%99%A5%E2%99%A5/", "/♥♥/"), ("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"), ("/%25%20%02%7b/", "/%25%20%02%7b/"), ("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"), ("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"), ("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"), ("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"), ( "/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93", "/♥♥/?utf8=%9C%93✓%9C%93", ), ] for uri, iri in cases: with self.subTest(uri): self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri) self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri) def test_escape_uri_path(self): cases = [ ( "/;some/=awful/?path/:with/@lots/&of/+awful/chars", "/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars", ), ("/foo#bar", "/foo%23bar"), ("/foo?bar", "/foo%3Fbar"), ] for uri, expected in cases: with self.subTest(uri): self.assertEqual(escape_uri_path(uri), expected)
TestRFC3987IEncodingUtils
python
doocs__leetcode
solution/3500-3599/3556.Sum of Largest Prime Substrings/Solution.py
{ "start": 0, "end": 475 }
class ____: def sumOfLargestPrimes(self, s: str) -> int: def is_prime(x: int) -> bool: if x < 2: return False return all(x % i for i in range(2, int(sqrt(x)) + 1)) st = set() n = len(s) for i in range(n): x = 0 for j in range(i, n): x = x * 10 + int(s[j]) if is_prime(x): st.add(x) return sum(sorted(st)[-3:])
Solution
python
django__django
tests/forms_tests/field_tests/test_typedmultiplechoicefield.py
{ "start": 158, "end": 3696 }
class ____(SimpleTestCase): def test_typedmultiplechoicefield_1(self): f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int) self.assertEqual([1], f.clean(["1"])) msg = "'Select a valid choice. 2 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean(["2"]) def test_typedmultiplechoicefield_2(self): # Different coercion, same validation. f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float) self.assertEqual([1.0], f.clean(["1"])) def test_typedmultiplechoicefield_3(self): # This can also cause weirdness: be careful (bool(-1) == True, # remember) f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool) self.assertEqual([True], f.clean(["-1"])) def test_typedmultiplechoicefield_4(self): f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int) self.assertEqual([1, -1], f.clean(["1", "-1"])) msg = "'Select a valid choice. 2 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean(["1", "2"]) def test_typedmultiplechoicefield_5(self): # Even more weirdness: if you have a valid choice but your coercion # function can't coerce, you'll still get a validation error. Don't do # this! f = TypedMultipleChoiceField(choices=[("A", "A"), ("B", "B")], coerce=int) msg = "'Select a valid choice. B is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean(["B"]) # Required fields require values with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) def test_typedmultiplechoicefield_6(self): # Non-required fields aren't required f = TypedMultipleChoiceField( choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False ) self.assertEqual([], f.clean([])) def test_typedmultiplechoicefield_7(self): # If you want cleaning an empty value to return a different type, tell # the field f = TypedMultipleChoiceField( choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None, ) self.assertIsNone(f.clean([])) def test_typedmultiplechoicefield_has_changed(self): # has_changed should not trigger required validation f = TypedMultipleChoiceField( choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True ) self.assertFalse(f.has_changed(None, "")) def test_typedmultiplechoicefield_special_coerce(self): """ A coerce function which results in a value not present in choices should raise an appropriate error (#21397). """ def coerce_func(val): return decimal.Decimal("1.%s" % val) f = TypedMultipleChoiceField( choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True ) self.assertEqual([decimal.Decimal("1.2")], f.clean(["2"])) with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean([]) msg = "'Select a valid choice. 3 is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): f.clean(["3"])
TypedMultipleChoiceFieldTest
python
lxml__lxml
src/lxml/tests/dummy_http_server.py
{ "start": 1126, "end": 1624 }
class ____(wsgiserver.WSGIRequestHandler): def get_stderr(self): # don't write to stderr return sys.stdout def log_message(self, format, *args): # message = "wsmock(%s) %s" % (self.address_string(), format % args) pass # don't log messages def build_web_server(app, port, host=None): server = wsgiserver.make_server( host or '', port, app, server_class=WebServer, handler_class=_RequestHandler) return server
_RequestHandler
python
Textualize__textual
src/textual/widgets/_footer.py
{ "start": 3646, "end": 3755 }
class ____(Label): """Text displayed in the footer (used by binding groups).""" @rich.repr.auto
FooterLabel
python
kamyu104__LeetCode-Solutions
Python/sum-of-beautiful-subsequences.py
{ "start": 2093, "end": 2957 }
class ____(object): def totalBeauty(self, nums): """ :type nums: List[int] :rtype: int """ def count(arr): val_to_idx = {x:i for i, x in enumerate(sorted(set(arr)))} # coordinate compression bit = BIT(len(val_to_idx)) for x in arr: bit.add(val_to_idx[x], bit.query(val_to_idx[x]-1)+1) return bit.query(len(val_to_idx)-1) mx = max(nums) lookup = [[] for _ in xrange(mx+1)] for x in nums: for d in FACTORS[x]: lookup[d].append(x) result = 0 cnt = [0]*(mx+1) for g in reversed(xrange(1, mx+1)): cnt[g] = count(lookup[g]) for ng in xrange(g+g, mx+1, g): cnt[g] -= cnt[ng] result = (result+g*cnt[g])%MOD return result
Solution2