language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
astropy__astropy
astropy/io/fits/hdu/compressed/tests/test_compression_failures.py
{ "start": 369, "end": 7131 }
class ____(FitsTestCase): def test_wrong_argument_number(self): with pytest.raises(TypeError): compress_image_data(1, 2) def test_unknown_compression_type(self): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header["ZCMPTYPE"] = "fun" with pytest.raises(ValueError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert "Unrecognized compression type: fun" in str(exc.value) def test_zbitpix_unknown(self): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header["ZBITPIX"] = 13 with pytest.raises(ValueError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert "Invalid value for BITPIX: 13" in str(exc.value) def test_data_none(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu.data = None hdu.tile_shape = None bintable = hdu._get_bintable_without_data() with pytest.raises(TypeError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert "Image data must be a numpy.ndarray" in str(exc.value) def test_invalid_tform(self): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header["TFORM1"] = "TX" with pytest.raises(RuntimeError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert "TX" in str(exc.value) and "TFORM" in str(exc.value) def test_invalid_zdither(self): hdu = fits.CompImageHDU(np.ones((10, 10)), quantize_method=1) bintable = hdu._get_bintable_without_data() bintable.header["ZDITHER0"] = "a" with pytest.raises(TypeError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["ZNAXIS", "ZBITPIX"]) def test_header_missing_keyword(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() del bintable.header[kw] with pytest.raises(KeyError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert kw in str(exc.value) @pytest.mark.parametrize("kw", ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]) def test_header_value_int_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = MAX_INT + 1 with pytest.raises(OverflowError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["ZTILE1", "ZNAXIS1"]) def test_header_value_long_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = MAX_LONG + 1 with pytest.raises(OverflowError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TNULL1", "PCOUNT", "THEAP"]) def test_header_value_longlong_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = MAX_LONGLONG + 1 with pytest.raises(OverflowError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["ZVAL3"]) def test_header_value_float_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = 1e300 with pytest.raises(OverflowError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TFIELDS", "PCOUNT"]) def test_header_value_negative(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = -1 with pytest.raises(ValueError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert f"{kw} should not be negative." in str(exc.value) @pytest.mark.parametrize(("kw", "limit"), [("ZNAXIS", 999), ("TFIELDS", 999)]) def test_header_value_exceeds_custom_limit(self, kw, limit): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = limit + 1 with pytest.raises(ValueError) as exc: compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) assert kw in str(exc.value) @pytest.mark.parametrize( "kw", ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"] ) def test_header_value_no_string(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = 1 with pytest.raises(TypeError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["TZERO1", "TSCAL1"]) def test_header_value_no_double(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) bintable = hdu._get_bintable_without_data() bintable.header[kw] = "1" with pytest.raises(TypeError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns ) @pytest.mark.parametrize("kw", ["ZSCALE", "ZZERO"]) def test_header_value_no_double_int_image(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10), dtype=np.int32)) bintable = hdu._get_bintable_without_data() bintable.header[kw] = "1" with pytest.raises(TypeError): compress_image_data( hdu.data, hdu.compression_type, bintable.header, bintable.columns )
TestCompressionFunction
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/events.py
{ "start": 1874, "end": 2129 }
class ____(NamedTuple): """An AssetKey with a partition range.""" asset_key: AssetKey partition_range: Optional[PartitionKeyRange] DynamicAssetKey = Callable[["OutputContext"], Optional[AssetKey]] @whitelist_for_serdes
AssetPartitionWipeRange
python
redis__redis-py
redis/commands/search/query.py
{ "start": 11466, "end": 11939 }
class ____(Filter): INF = "+inf" NEG_INF = "-inf" def __init__( self, field: str, minval: Union[int, str], maxval: Union[int, str], minExclusive: bool = False, maxExclusive: bool = False, ) -> None: args = [ minval if not minExclusive else f"({minval}", maxval if not maxExclusive else f"({maxval}", ] Filter.__init__(self, "FILTER", field, *args)
NumericFilter
python
huggingface__transformers
tests/models/gpt_oss/test_modeling_gpt_oss.py
{ "start": 1306, "end": 1437 }
class ____(CausalLMModelTester): if is_torch_available(): base_model_class = GptOssModel @require_torch
GptOssModelTester
python
huggingface__transformers
src/transformers/tokenization_mistral_common.py
{ "start": 8018, "end": 8190 }
class ____(str, Enum): """Enum for the different type of tokenizer.""" spm = "spm" tekken = "tekken" @requires(backends=("mistral-common",))
MistralTokenizerType
python
scikit-learn__scikit-learn
examples/miscellaneous/plot_metadata_routing.py
{ "start": 24849, "end": 26413 }
class ____(MetaEstimatorMixin, RegressorMixin, BaseEstimator): # show warning to remind user to explicitly set the value with # `.set_{method}_request(sample_weight={boolean})` __metadata_request__fit = {"sample_weight": metadata_routing.WARN} def __init__(self, estimator): self.estimator = estimator def fit(self, X, y, sample_weight=None, **fit_params): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params ) check_metadata(self, sample_weight=sample_weight) self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) def get_metadata_routing(self): router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) ) return router # %% # The above implementation is almost the same as ``MetaRegressor``, and # because of the default request value defined in ``__metadata_request__fit`` # there is a warning raised when fitted. with warnings.catch_warnings(record=True) as record: WeightedMetaRegressor( estimator=LinearRegression().set_fit_request(sample_weight=False) ).fit(X, y, sample_weight=my_weights) for w in record: print(w.message) # %% # When an estimator consumes a metadata which it didn't consume before, the # following pattern can be used to warn the users about it.
WeightedMetaRegressor
python
pytorch__pytorch
torch/nn/modules/padding.py
{ "start": 14075, "end": 15659 }
class ____(_ReflectionPadNd): r"""Pads the input tensor using the reflection of the input boundary. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`. Args: padding (int, tuple): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 2-`tuple`, uses (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`) Note that padding size should be less than the corresponding input dimension. Shape: - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}` Examples:: >>> m = nn.ReflectionPad1d(2) >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles") >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4) >>> input tensor([[[0., 1., 2., 3.], [4., 5., 6., 7.]]]) >>> m(input) tensor([[[2., 1., 0., 1., 2., 3., 2., 1.], [6., 5., 4., 5., 6., 7., 6., 5.]]]) >>> # using different paddings for different sides >>> m = nn.ReflectionPad1d((3, 1)) >>> m(input) tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], [7., 6., 5., 4., 5., 6., 7., 6.]]]) """ # pyrefly: ignore [bad-override] padding: tuple[int, int] def __init__(self, padding: _size_2_t) -> None: super().__init__() self.padding = _pair(padding)
ReflectionPad1d
python
Textualize__textual
tests/selection_list/test_selection_values.py
{ "start": 193, "end": 3216 }
class ____(App[None]): def __init__(self, default_state: bool = False) -> None: super().__init__() self._default_state = default_state def compose(self) -> ComposeResult: yield SelectionList[int](*[(str(n), n, self._default_state) for n in range(50)]) async def test_empty_selected() -> None: """Selected should be empty when nothing is selected.""" async with SelectionListApp().run_test() as pilot: assert pilot.app.query_one(SelectionList).selected == [] async def test_programatic_select() -> None: """Selected should contain a selected value.""" async with SelectionListApp().run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.select(0) assert pilot.app.query_one(SelectionList).selected == [0] async def test_programatic_select_all() -> None: """Selected should contain all selected values.""" async with SelectionListApp().run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.select_all() assert pilot.app.query_one(SelectionList).selected == list(range(50)) async def test_programatic_deselect() -> None: """Selected should not contain a deselected value.""" async with SelectionListApp(True).run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.deselect(0) assert pilot.app.query_one(SelectionList).selected == list(range(50)[1:]) async def test_programatic_deselect_all() -> None: """Selected should not contain anything after deselecting all values.""" async with SelectionListApp(True).run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.deselect_all() assert pilot.app.query_one(SelectionList).selected == [] async def test_programatic_toggle() -> None: """Selected should reflect a toggle.""" async with SelectionListApp().run_test() as pilot: selection = pilot.app.query_one(SelectionList) for n in range(25, 50): selection.select(n) for n in range(50): selection.toggle(n) assert pilot.app.query_one(SelectionList).selected == list(range(50)[:25]) async def test_programatic_toggle_all() -> None: """Selected should contain all values after toggling all on.""" async with SelectionListApp().run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.toggle_all() assert pilot.app.query_one(SelectionList).selected == list(range(50)) async def test_removal_of_selected_item() -> None: """Removing a selected selection should remove its value from the selected set.""" async with SelectionListApp().run_test() as pilot: selection = pilot.app.query_one(SelectionList) selection.toggle(0) assert pilot.app.query_one(SelectionList).selected == [0] selection.remove_option_at_index(0) assert pilot.app.query_one(SelectionList).selected == []
SelectionListApp
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-google/llama_index/readers/google/maps/base.py
{ "start": 889, "end": 5282 }
class ____(BaseReader): def __init__( self, api_key: Optional[str] = None, ): self.api_key = api_key or os.getenv("GOOGLE_MAPS_API_KEY") if not self.api_key: raise ValueError( "API key must be provided or set in the environment variables as 'GOOGLE_MAPS_API_KEY'" ) def load_data( self, text: str, number_of_results: Optional[int] = DEFAULT_NUMBER_OF_RESULTS, ) -> List[Document]: """ Load data from Google Maps. Args: text (str): the text to search for. number_of_results (Optional[int]): the number of results to return. Defaults to 20. """ response = self._search_text_request(text, MAX_RESULTS_PER_PAGE) documents = [] while "nextPageToken" in response: next_page_token = response["nextPageToken"] places = response.get("places", []) if len(places) == 0: break for place in places: formatted_address = place["formattedAddress"] average_rating = place["rating"] display_name = place["displayName"] if isinstance(display_name, dict): display_name = display_name["text"] number_of_ratings = place["userRatingCount"] reviews = [] for review in place["reviews"]: review_text = review["text"]["text"] author_name = review["authorAttribution"]["displayName"] relative_publish_time = review["relativePublishTimeDescription"] rating = review["rating"] reviews.append( Review( author_name=author_name, rating=rating, text=review_text, relative_publish_time=relative_publish_time, ) ) place = Place( reviews=reviews, address=formatted_address, average_rating=average_rating, display_name=display_name, number_of_ratings=number_of_ratings, ) reviews_text = "\n".join( [ f"Author: {review.author_name}, Rating: {review.rating}, Text: {review.text}, Relative Publish Time: {review.relative_publish_time}" for review in reviews ] ) place_text = f"Place: {place.display_name}, Address: {place.address}, Average Rating: {place.average_rating}, Number of Ratings: {place.number_of_ratings}" document_text = f"{place_text}\n{reviews_text}" if len(documents) == number_of_results: return documents documents.append(Document(text=document_text, extra_info=place.dict())) response = self._search_text_request( text, MAX_RESULTS_PER_PAGE, next_page_token ) return documents def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data lazily from Google Maps.""" yield from self.load_data(*args, **load_kwargs) def _search_text_request( self, text, number_of_results, next_page_token: Optional[str] = None ) -> dict: """ Send a request to the Google Maps Places API to search for text. Args: text (str): the text to search for. number_of_results (int): the number of results to return. next_page_token (Optional[str]): the next page token to get the next page of results. """ headers = { "Content-Type": "application/json", "X-Goog-Api-Key": self.api_key, "X-Goog-FieldMask": FIELD_MASK, } payload = json.dumps( { "textQuery": text, "pageSize": number_of_results, "pageToken": next_page_token, } ) response = requests.post(SEARCH_TEXT_BASE_URL, headers=headers, data=payload) response.raise_for_status() return response.json()
GoogleMapsTextSearchReader
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_checkbox07.py
{ "start": 315, "end": 1243 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("checkbox07.xlsx") def test_create_file_with_checkboxes_in_table(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() cell_format = workbook.add_format({"checkbox": True}) data = [ [1, True], [2, False], [3, False], [4, True], ] worksheet.add_table( "A1:B5", { "data": data, "columns": [ {"header": "Col1"}, {"header": "Col2", "format": cell_format}, ], }, ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pytorch__pytorch
torch/_guards.py
{ "start": 18258, "end": 18721 }
class ____(Checkpointable[ModuleContextCheckpointState]): def __init__(self) -> None: self.nn_modules: dict[str, Any] = {} def copy_graphstate(self) -> ModuleContextCheckpointState: return ModuleContextCheckpointState(dict(self.nn_modules)) def restore_graphstate(self, state: ModuleContextCheckpointState) -> None: assert isinstance(state, ModuleContextCheckpointState) self.nn_modules = state.nn_modules
ModuleContext
python
pytorch__pytorch
test/inductor/test_codecache.py
{ "start": 87604, "end": 87952 }
class ____(CustomPartitionerFn): def __init__(self): self._uuid = None def __call__( self, gm, joint_inputs, **kwargs ) -> tuple[torch.fx.GraphModule, torch.fx.GraphModule]: return gm, gm # Dummy implementation def uuid(self) -> Optional[Union[bytes, str]]: return self._uuid
TestCustomPartitionerFn
python
google__jax
tests/version_test.py
{ "start": 2503, "end": 8844 }
class ____(unittest.TestCase): def assertValidVersion(self, version): self.assertIsNotNone(VERSION_PATTERN.match(version)) def testVersionValidity(self): self.assertValidVersion(jax.__version__) self.assertValidVersion(jax._src.lib.version_str) @patch_jax_version("1.2.3", "1.2.3.dev4567") def testVersionInRelease(self): # If the release version is set, subprocess should not be called. with assert_no_subprocess_call(): version = jax.version._get_version_string() self.assertEqual(version, "1.2.3.dev4567") self.assertValidVersion(version) @patch_jax_version("1.2.3", None) def testVersionInNonRelease(self): # If the release version is not set, we expect subprocess to be called # in order to attempt accessing git commit information. with assert_subprocess_call(): version = jax.version._get_version_string() self.assertTrue(version.startswith("1.2.3.dev")) self.assertValidVersion(version) @patch_jax_version("1.2.3", "1.2.3.dev4567") def testBuildVersionInRelease(self): # If building from a source tree with release version set, subprocess # should not be called. with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertEqual(version, "1.2.3.dev4567") self.assertValidVersion(version) @jtu.thread_unsafe_test() # Setting environment variables is not thread-safe. @patch_jax_version("1.2.3", None) def testBuildVersionFromEnvironment(self): # This test covers build-time construction of version strings in the # presence of several environment variables. base_version = "1.2.3" with jtu.set_env(JAX_RELEASE=None, JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None): with assert_subprocess_call(): version = jax.version._get_version_for_build() # TODO(jakevdp): confirm that this includes a date string & commit hash? self.assertTrue(version.startswith(f"{base_version}.dev")) self.assertValidVersion(version) with jtu.set_env(JAX_RELEASE=None, JAXLIB_RELEASE=None, JAX_NIGHTLY="1", JAXLIB_NIGHTLY=None): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() datestring = datetime.date.today().strftime("%Y%m%d") self.assertEqual(version, f"{base_version}.dev{datestring}") self.assertValidVersion(version) with jtu.set_env(JAX_RELEASE=None, JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY="1"): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() datestring = datetime.date.today().strftime("%Y%m%d") self.assertEqual(version, f"{base_version}.dev{datestring}") self.assertValidVersion(version) with jtu.set_env(JAX_RELEASE="1", JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertFalse(jax.version._is_prerelease()) self.assertEqual(version, base_version) self.assertValidVersion(version) with jtu.set_env(JAX_RELEASE=None, JAXLIB_RELEASE="1", JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertFalse(jax.version._is_prerelease()) self.assertEqual(version, base_version) self.assertValidVersion(version) with jtu.set_env(JAX_RELEASE=None, JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None, JAX_CUSTOM_VERSION_SUFFIX="test"): with assert_subprocess_call(stdout=b"1731433958-1c0f1076e"): version = jax.version._get_version_for_build() self.assertTrue(version.startswith(f"{base_version}.dev")) self.assertTrue(version.endswith("test")) self.assertValidVersion(version) with jtu.set_env( JAX_RELEASE=None, JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY="1", WHEEL_VERSION_SUFFIX=".dev20250101+1c0f1076erc1", ): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertEqual(version, f"{base_version}.dev20250101+1c0f1076erc1") self.assertValidVersion(version) with jtu.set_env( JAX_RELEASE="1", JAXLIB_RELEASE=None, JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None, WHEEL_VERSION_SUFFIX="rc0", ): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertTrue(jax.version._is_prerelease()) self.assertEqual(version, f"{base_version}rc0") self.assertValidVersion(version) with jtu.set_env( JAX_RELEASE=None, JAXLIB_RELEASE="1", JAX_NIGHTLY=None, JAXLIB_NIGHTLY=None, WHEEL_VERSION_SUFFIX="rc0", ): with assert_no_subprocess_call(): version = jax.version._get_version_for_build() self.assertTrue(jax.version._is_prerelease()) self.assertEqual(version, f"{base_version}rc0") self.assertValidVersion(version) def testVersions(self): check_jaxlib_version(jax_version="1.2.3", jaxlib_version="1.2.3", minimum_jaxlib_version="1.2.3") check_jaxlib_version(jax_version="1.2.3.4", jaxlib_version="1.2.3", minimum_jaxlib_version="1.2.3") check_jaxlib_version(jax_version="2.5.dev234", jaxlib_version="1.2.3", minimum_jaxlib_version="1.2.3") with self.assertRaisesRegex(RuntimeError, ".*jax requires version >=.*"): check_jaxlib_version(jax_version="1.2.3", jaxlib_version="1.0", minimum_jaxlib_version="1.2.3") with self.assertRaisesRegex(RuntimeError, ".*jax requires version >=.*"): check_jaxlib_version(jax_version="1.2.3", jaxlib_version="1.0", minimum_jaxlib_version="1.0.1") with self.assertRaisesRegex(RuntimeError, ".incompatible with jax version.*"): check_jaxlib_version(jax_version="1.2.3", jaxlib_version="1.2.4", minimum_jaxlib_version="1.0.5") if __name__ == "__main__": absltest.main()
JaxVersionTest
python
spyder-ide__spyder
spyder/utils/stylesheet.py
{ "start": 3756, "end": 11722 }
class ____(SpyderStyleSheet, SpyderConfigurationAccessor): """ Class to build and access the stylesheet we use in the entire application. """ # Don't create the stylesheet here so that Spyder gets the app font from # the system when it starts for the first time. This also allows us to # display the splash screen more quickly because the stylesheet is then # computed only when it's going to be applied to the app, not when this # object is imported. SET_STYLESHEET_AT_INIT = False def __init__(self): super().__init__() self._stylesheet_as_string = None def to_string(self): """Save stylesheet as a string for quick access.""" if self._stylesheet_as_string is None: self.set_stylesheet() self._stylesheet_as_string = self._stylesheet.toString() return self._stylesheet_as_string def set_stylesheet(self): """ This takes the stylesheet from QDarkstyle and applies our customizations to it. """ stylesheet = qdarkstyle.load_stylesheet(palette=SpyderPalette) self._stylesheet = parse_stylesheet(stylesheet) # Add our customizations self._customize_stylesheet() def _customize_stylesheet(self): """Apply our customizations to the stylesheet.""" css = self._stylesheet # App font properties font_family = self.get_conf('app_font/family', section='appearance') font_size = int(self.get_conf('app_font/size', section='appearance')) # Remove padding and border for QStackedWidget (used in Plots # and the Variable Explorer) css['QStackedWidget'].setValues( border='0px', padding='0px', ) # Remove margin when pressing buttons css["QToolButton:pressed"].setValues( margin='0px' ) # Remove border, padding and spacing for main toolbar css.QToolBar.setValues( borderBottom='0px', padding='0px', spacing='0px', ) # Remove margins around separators and decrease size a bit css['QMainWindow::separator:horizontal'].setValues( marginTop='0px', marginBottom='0px', # This is summed to the separator padding (2px) width="3px", # Hide image because the default image is not visible at this size image="none" ) css['QMainWindow::separator:vertical'].setValues( marginLeft='0px', marginRight='0px', # This is summed to the separator padding (2px) height='3px', # Hide image because the default image is not visible at this size image="none" ) # Increase padding and fix disabled color for QPushButton's css.QPushButton.setValues(padding=AppStyle.QPushButtonPadding) for state in ['disabled', 'checked', 'checked:disabled']: css[f'QPushButton:{state}'].setValues( padding=AppStyle.QPushButtonPadding, ) # This is especially necessary in the light theme because the # contrast between the background and text colors is too small if state in ['disabled', 'checked:disabled']: css[f"QPushButton:{state}"].setValues( color=SpyderPalette.COLOR_TEXT_3, ) # Adjust QToolButton style to our needs. # This affects not only the pane toolbars but also the # find/replace widget, the finder in the Variable Explorer, # and all QToolButton's that are not part of the main toolbar. for element in ['QToolButton', 'QToolButton:disabled']: css[f'{element}'].setValues( backgroundColor='transparent' ) for state in ['hover', 'pressed', 'checked', 'checked:hover']: if state == 'hover': color = SpyderPalette.COLOR_BACKGROUND_2 else: color = SpyderPalette.COLOR_BACKGROUND_3 css[f'QToolButton:{state}'].setValues( backgroundColor=color ) # Adjust padding of QPushButton's in QDialog's for widget in ["QPushButton", "QPushButton:disabled"]: css[f"QDialogButtonBox {widget}"].setValues( padding=( AppStyle.QPushButtonPadding if (MAC or WIN) else f"{AppStyle.MarginSize + 1}px {AppStyle.MarginSize}px" ), # This width comes from QDarkstyle but it's too big on Mac minWidth="50px" if WIN else ("60px" if MAC else "80px"), ) css["QDialogButtonBox QPushButton:!default"].setValues( padding=( AppStyle.QPushButtonPadding if (MAC or WIN) else f"{AppStyle.MarginSize + 1}px {AppStyle.MarginSize}px" ), # This width comes from QDarkstyle but it's too big on Mac minWidth="50px" if WIN else ("60px" if MAC else "80px"), ) # Remove icons in QMessageBoxes css["QDialogButtonBox"]["dialogbuttonbox-buttons-have-icons"].setValue( "0" ) # Set font for widgets that don't inherit it from the application # This is necessary for spyder-ide/spyder#5942. for widget in ['QToolTip', 'QDialog', 'QListView', 'QTreeView', 'QHeaderView::section', 'QTableView']: css[f'{widget}'].setValues( fontFamily=font_family, fontSize=f'{font_size}pt' ) # Make lineedits have *almost* the same height as our comboboxes. This # is not perfect because (oddly enough) Qt doesn't set the same height # for both when using the same value, but it's close enough. css.QLineEdit.setValues( minHeight=f'{AppStyle.ComboBoxMinHeight - 0.25}em' ) # Do the same for spinboxes css.QSpinBox.setValues( minHeight=f'{AppStyle.ComboBoxMinHeight - 0.25}em' ) # Remove border in QGroupBox to avoid the "boxes within boxes" # antipattern. Also, increase its title font in one point to make it # more relevant. css.QGroupBox.setValues( border='0px', fontSize=f'{font_size + 1}pt', ) # Increase separation between title and content of QGroupBoxes and fix # its alignment. css['QGroupBox::title'].setValues( paddingTop='-0.3em', left='0px', ) # Decrease splitter handle size to be a bit smaller than QMainWindow # separators. css['QSplitter::handle'].setValues( padding="0px", ) css['QSplitter::handle:horizontal'].setValues( width="5px", image="none" ) css['QSplitter::handle:vertical'].setValues( height="5px", image="none" ) # Make splitter handle color match the one of QMainWindow separators css['QSplitter::handle:hover'].setValues( backgroundColor=SpyderPalette.COLOR_BACKGROUND_6, ) # Add padding to tooltips css.QToolTip.setValues( padding="1px 2px", ) # Add padding to tree widget items to make them look better css["QTreeWidget::item"].setValues( padding=f"{AppStyle.MarginSize - 1}px 0px", ) css["QTreeView::item"].setValues( padding=f"{AppStyle.MarginSize - 1}px 0px", ) APP_STYLESHEET = AppStylesheet() # ============================================================================= # ---- Toolbar stylesheets # =============================================================================
AppStylesheet
python
run-llama__llama_index
llama-index-core/tests/prompts/test_base.py
{ "start": 403, "end": 10115 }
class ____(BaseOutputParser): """Mock output parser.""" def __init__(self, format_string: str) -> None: self._format_string = format_string def parse(self, output: str) -> Any: return {"output": output} def format(self, query: str) -> str: return query + "\n" + self._format_string @pytest.fixture() def output_parser() -> BaseOutputParser: return MockOutputParser(format_string="output_instruction") def test_template() -> None: """Test partial format.""" prompt_txt = "hello {text} {foo}" prompt = PromptTemplate(prompt_txt) prompt_fmt = prompt.partial_format(foo="bar") assert isinstance(prompt_fmt, PromptTemplate) assert prompt_fmt.format(text="world") == "hello world bar" assert prompt_fmt.format_messages(text="world") == [ ChatMessage(content="hello world bar", role=MessageRole.USER) ] def test_template_output_parser(output_parser: BaseOutputParser) -> None: prompt_txt = "hello {text} {foo}" prompt = PromptTemplate(prompt_txt, output_parser=output_parser) prompt_fmt = prompt.format(text="world", foo="bar") assert prompt_fmt == "hello world bar\noutput_instruction" def test_chat_template() -> None: chat_template = ChatPromptTemplate( message_templates=[ ChatMessage( content="This is a system message with a {sys_param}", role=MessageRole.SYSTEM, ), ChatMessage(content="hello {text} {foo}", role=MessageRole.USER), ], prompt_type=PromptType.CONVERSATION, ) partial_template = chat_template.partial_format(sys_param="sys_arg") messages = partial_template.format_messages(text="world", foo="bar") assert messages[0] == ChatMessage( content="This is a system message with a sys_arg", role=MessageRole.SYSTEM ) assert partial_template.format(text="world", foo="bar") == ( "system: This is a system message with a sys_arg\n" "user: hello world bar\n" "assistant: " ) def test_chat_template_output_parser(output_parser: BaseOutputParser) -> None: chat_template = ChatPromptTemplate( message_templates=[ ChatMessage( content="This is a system message with a {sys_param}", role=MessageRole.SYSTEM, ), ChatMessage(content="hello {text} {foo}", role=MessageRole.USER), ], prompt_type=PromptType.CONVERSATION, output_parser=output_parser, ) messages = chat_template.format_messages( text="world", foo="bar", sys_param="sys_arg" ) assert ( messages[0].content == "This is a system message with a sys_arg\noutput_instruction" ) def test_selector_template() -> None: default_template = PromptTemplate("hello {text} {foo}") chat_template = ChatPromptTemplate( message_templates=[ ChatMessage( content="This is a system message with a {sys_param}", role=MessageRole.SYSTEM, ), ChatMessage(content="hello {text} {foo}", role=MessageRole.USER), ], prompt_type=PromptType.CONVERSATION, ) selector_template = SelectorPromptTemplate( default_template=default_template, conditionals=[ (lambda llm: isinstance(llm, MockLLM), chat_template), ], ) partial_template = selector_template.partial_format(text="world", foo="bar") prompt = partial_template.format() assert prompt == "hello world bar" messages = partial_template.format_messages(llm=MockLLM(), sys_param="sys_arg") assert messages[0] == ChatMessage( content="This is a system message with a sys_arg", role=MessageRole.SYSTEM ) def test_template_var_mappings() -> None: """Test template variable mappings.""" qa_prompt_tmpl = """\ Here's some context: {foo} Given the context, please answer the final question: {bar} """ template_var_mappings = { "context_str": "foo", "query_str": "bar", } # try regular prompt template qa_prompt = PromptTemplate( qa_prompt_tmpl, template_var_mappings=template_var_mappings ) fmt_prompt = qa_prompt.format(query_str="abc", context_str="def") assert ( fmt_prompt == """\ Here's some context: def Given the context, please answer the final question: abc """ ) # try partial format qa_prompt_partial = qa_prompt.partial_format(query_str="abc2") fmt_prompt_partial = qa_prompt_partial.format(context_str="def2") assert ( fmt_prompt_partial == """\ Here's some context: def2 Given the context, please answer the final question: abc2 """ ) # try chat prompt template # partial template var mapping template_var_mappings = { "context_str": "foo", "query_str": "bar", } chat_template = ChatPromptTemplate( message_templates=[ ChatMessage( content="This is a system message with a {sys_param}", role=MessageRole.SYSTEM, ), ChatMessage(content="hello {foo} {bar}", role=MessageRole.USER), ], prompt_type=PromptType.CONVERSATION, template_var_mappings=template_var_mappings, ) fmt_prompt = chat_template.format( query_str="abc", context_str="def", sys_param="sys_arg" ) assert fmt_prompt == ( "system: This is a system message with a sys_arg\n" "user: hello def abc\n" "assistant: " ) def test_function_mappings() -> None: """Test function mappings.""" test_prompt_tmpl = """foo bar {abc} {xyz}""" ## PROMPT 1 # test a format function that uses values of both abc and def def _format_abc(**kwargs: Any) -> str: """Given kwargs, output formatted variable.""" return f"{kwargs['abc']}-{kwargs['xyz']}" test_prompt = PromptTemplate( test_prompt_tmpl, function_mappings={"abc": _format_abc} ) assert test_prompt.format(abc="123", xyz="456") == "foo bar 123-456 456" # test partial test_prompt_partial = test_prompt.partial_format(xyz="456") assert test_prompt_partial.format(abc="789") == "foo bar 789-456 456" ## PROMPT 2 # test a format function that only depends on values of xyz def _format_abc_2(**kwargs: Any) -> str: """Given kwargs, output formatted variable.""" return f"{kwargs['xyz']}" test_prompt_2 = PromptTemplate( test_prompt_tmpl, function_mappings={"abc": _format_abc_2} ) assert test_prompt_2.format(xyz="456") == "foo bar 456 456" # test that formatting abc itself will throw an error with pytest.raises(KeyError): test_prompt_2.format(abc="123") ## PROMPT 3 - test prompt with template var mappings def _format_prompt_key1(**kwargs: Any) -> str: """Given kwargs, output formatted variable.""" return f"{kwargs['prompt_key1']}-{kwargs['prompt_key2']}" template_var_mappings = { "prompt_key1": "abc", "prompt_key2": "xyz", } test_prompt_3 = PromptTemplate( test_prompt_tmpl, template_var_mappings=template_var_mappings, # NOTE: with template mappings, needs to use the source variable names, # not the ones being mapped to in the template function_mappings={"prompt_key1": _format_prompt_key1}, ) assert ( test_prompt_3.format(prompt_key1="678", prompt_key2="789") == "foo bar 678-789 789" ) ### PROMPT 4 - test chat prompt template chat_template = ChatPromptTemplate( message_templates=[ ChatMessage( content="This is a system message with a {sys_param}", role=MessageRole.SYSTEM, ), ChatMessage(content="hello {abc} {xyz}", role=MessageRole.USER), ], prompt_type=PromptType.CONVERSATION, function_mappings={"abc": _format_abc}, ) fmt_prompt = chat_template.format(abc="tmp1", xyz="tmp2", sys_param="sys_arg") assert fmt_prompt == ( "system: This is a system message with a sys_arg\n" "user: hello tmp1-tmp2 tmp2\n" "assistant: " ) def test_template_with_json() -> None: """Test partial format.""" prompt_txt = 'hello {text} {foo} {"bar": "baz"}' prompt = PromptTemplate(prompt_txt) assert prompt.format(foo="foo2", text="world") == 'hello world foo2 {"bar": "baz"}' assert prompt.format_messages(foo="foo2", text="world") == [ ChatMessage(content='hello world foo2 {"bar": "baz"}', role=MessageRole.USER) ] test_case_2 = PromptTemplate("test {message} {test}") assert test_case_2.format(message="message") == "test message {test}" test_case_3 = PromptTemplate("test {{message}} {{test}}") assert test_case_3.format(message="message", test="test") == "test {message} {test}" def test_template_has_json() -> None: """Test partial format.""" prompt_txt = ( 'hello {text} {foo} \noutput format:\n```json\n{"name": "llamaindex"}\n```' ) except_prompt = ( 'hello world bar \noutput format:\n```json\n{"name": "llamaindex"}\n```' ) prompt_template = PromptTemplate(prompt_txt) template_vars = prompt_template.template_vars prompt_fmt = prompt_template.partial_format(foo="bar") prompt = prompt_fmt.format(text="world") assert isinstance(prompt_fmt, PromptTemplate) assert template_vars == ["text", "foo"] assert prompt == except_prompt assert prompt_fmt.format_messages(text="world") == [ ChatMessage(content=except_prompt, role=MessageRole.USER) ]
MockOutputParser
python
modin-project__modin
modin/core/io/text/json_dispatcher.py
{ "start": 1131, "end": 4910 }
class ____(TextFileDispatcher): """Class handles utils for reading `.json` files.""" @classmethod def _read(cls, path_or_buf, **kwargs): """ Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters. Parameters ---------- path_or_buf : str, path object or file-like object `path_or_buf` parameter of `read_json` function. **kwargs : dict Parameters of `read_json` function. Returns ------- BaseQueryCompiler Query compiler with imported data for further processing. """ path_or_buf = stringify_path(path_or_buf) path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists( path_or_buf, storage_options=kwargs.get("storage_options") ): return cls.single_worker_read( path_or_buf, reason=cls._file_not_found_msg(path_or_buf), **kwargs ) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read( path_or_buf, reason=cls.BUFFER_UNSUPPORTED_MSG, **kwargs ) if not kwargs.get("lines", False): return cls.single_worker_read( path_or_buf, reason="`lines` argument not supported", **kwargs ) with OpenFile( path_or_buf, "rb", **(kwargs.get("storage_options", None) or {}), ) as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile( path_or_buf, "rb", kwargs.get("compression", "infer"), **(kwargs.get("storage_options", None) or {}), ) as f: column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} splits, _ = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) partition_ids = [None] * len(splits) index_ids = [None] * len(splits) dtypes_ids = [None] * len(splits) for idx, (start, end) in enumerate(splits): args.update({"start": start, "end": end}) *partition_ids[idx], index_ids[idx], dtypes_ids[idx], _ = cls.deploy( func=cls.parse, f_kwargs=args, num_returns=num_splits + 3, ) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) # Compute dtypes by getting collecting and combining all of the partitions. The # reported dtypes from differing rows can be different based on the inference in # the limited data seen by each worker. We use pandas to compute the exact dtype # over the whole column for each column. The index is set below. dtypes = cls.get_dtypes(dtypes_ids, columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame)
JSONDispatcher
python
tensorflow__tensorflow
tensorflow/python/checkpoint/checkpoint_management_test.py
{ "start": 13441, "end": 31251 }
class ____(test.TestCase): @test_util.run_in_graph_and_eager_modes def testDeletion(self): checkpoint = util.Checkpoint() manager = checkpoint_management.CheckpointManager( checkpoint, self.get_temp_dir(), max_to_keep=3) first_path = manager.save() second_path = manager.save() third_path = manager.save() fourth_path = manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertFalse(checkpoint_management.checkpoint_exists(first_path)) @test_util.run_in_graph_and_eager_modes def testKeepAll(self): checkpoint = util.Checkpoint() directory = os.path.join( self.get_temp_dir(), # Avoid sharing directories between eager and graph # TODO(allenl): stop run_in_graph_and_eager_modes reusing directories str(context.executing_eagerly())) manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=None) first_path = manager.save() second_path = manager.save() third_path = manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertTrue(checkpoint_management.checkpoint_exists(first_path)) self.assertEqual(third_path, manager.latest_checkpoint) self.assertEqual([first_path, second_path, third_path], manager.checkpoints) del manager manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=None) fourth_path = manager.save() self.assertEqual([first_path, second_path, third_path, fourth_path], manager.checkpoints) del manager manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=3) self.assertEqual([first_path, second_path, third_path, fourth_path], manager.checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertTrue(checkpoint_management.checkpoint_exists(first_path)) fifth_path = manager.save() self.assertEqual([third_path, fourth_path, fifth_path], manager.checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(fifth_path)) self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertFalse(checkpoint_management.checkpoint_exists(second_path)) self.assertFalse(checkpoint_management.checkpoint_exists(first_path)) @test_util.run_in_graph_and_eager_modes @test.mock.patch.object(checkpoint_management, "time") def testSaveRestoreState(self, mock_time): directory = self.get_temp_dir() mock_time.time.return_value = 3. checkpoint = util.Checkpoint() first_manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2) first_time = 10000. first_name = os.path.join(directory, "ckpt-1") mock_time.time.return_value = first_time first_manager.save() state = checkpoint_management.get_checkpoint_state(directory) second_time = first_time + 3610. second_name = os.path.join(directory, "ckpt-2") mock_time.time.return_value = second_time first_manager.save() state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual([first_time, second_time], state.all_model_checkpoint_timestamps) self.assertEqual([first_name, second_name], first_manager.checkpoints) self.assertEqual(second_name, first_manager.latest_checkpoint) del first_manager second_manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2, keep_checkpoint_every_n_hours=1.5) self.assertEqual([first_name, second_name], second_manager.checkpoints) self.assertEqual(second_name, second_manager.latest_checkpoint) third_name = os.path.join(directory, "ckpt-3") third_time = second_time + 3600. * 0.2 mock_time.time.return_value = third_time second_manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(first_name)) self.assertTrue(checkpoint_management.checkpoint_exists(second_name)) self.assertEqual([second_name, third_name], second_manager.checkpoints) state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual(first_time, state.last_preserved_timestamp) fourth_time = third_time + 3600. * 0.5 mock_time.time.return_value = fourth_time fourth_name = os.path.join(directory, "ckpt-4") second_manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(first_name)) self.assertFalse(checkpoint_management.checkpoint_exists(second_name)) self.assertEqual([third_name, fourth_name], second_manager.checkpoints) fifth_time = fourth_time + 3600. * 0.5 mock_time.time.return_value = fifth_time fifth_name = os.path.join(directory, "ckpt-5") second_manager.save() self.assertEqual([fourth_name, fifth_name], second_manager.checkpoints) state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual(first_time, state.last_preserved_timestamp) del second_manager third_manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2, keep_checkpoint_every_n_hours=1.5) self.assertEqual(fifth_name, third_manager.latest_checkpoint) mock_time.time.return_value += 10. third_manager.save() sixth_name = os.path.join(directory, "ckpt-6") state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual(fourth_time, state.last_preserved_timestamp) self.assertTrue(checkpoint_management.checkpoint_exists(first_name)) self.assertTrue(checkpoint_management.checkpoint_exists(fourth_name)) self.assertTrue(checkpoint_management.checkpoint_exists(fifth_name)) self.assertTrue(checkpoint_management.checkpoint_exists(sixth_name)) self.assertFalse(checkpoint_management.checkpoint_exists(second_name)) self.assertFalse(checkpoint_management.checkpoint_exists(third_name)) self.assertEqual([fifth_name, sixth_name], third_manager.checkpoints) @test_util.run_in_graph_and_eager_modes def testContinueFromUnmanaged(self): directory = self.get_temp_dir() prefix = os.path.join(directory, "unusual_prefix") checkpoint = util.Checkpoint() first_path = checkpoint.save(prefix) second_path = checkpoint.save(prefix) del checkpoint checkpoint = util.Checkpoint() manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2) checkpoint.restore(manager.latest_checkpoint).run_restore_ops() self.assertEqual(2, self.evaluate(checkpoint.save_counter)) third_path = manager.save() self.assertEqual([third_path], manager.checkpoints) fourth_path = manager.save() self.assertEqual([third_path, fourth_path], manager.checkpoints) fifth_path = manager.save() self.assertEqual([fourth_path, fifth_path], manager.checkpoints) self.assertTrue(checkpoint_management.checkpoint_exists(first_path)) self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertFalse(checkpoint_management.checkpoint_exists(third_path)) self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path)) self.assertTrue(checkpoint_management.checkpoint_exists(fifth_path)) @test_util.run_in_graph_and_eager_modes @test.mock.patch.object(checkpoint_management, "time") def testClockReset(self, mock_time): directory = self.get_temp_dir() mock_time.time.return_value = 10000. checkpoint = util.Checkpoint() first_manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=1, keep_checkpoint_every_n_hours=1.) first_path = first_manager.save() mock_time.time.return_value += 3600. second_path = first_manager.save() mock_time.time.return_value += 3600. third_path = first_manager.save() self.assertFalse(checkpoint_management.checkpoint_exists(first_path)) self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertEqual([third_path], first_manager.checkpoints) state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual(13600., state.last_preserved_timestamp) # Set the clock back in time mock_time.time.return_value = 5000. del first_manager with test.mock.patch.object(logging, "warning") as mock_log: second_manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=1) self.assertRegex( str(mock_log.call_args), "behind the last preserved checkpoint timestamp") # We should err on the side of keeping checkpoints around when we're not # sure whether they were preserved or not due to clock funkiness. self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) # We know about the existing checkpoints, but they'll never be deleted and # so won't go in the CheckpointState proto on save. self.assertEqual(third_path, second_manager.latest_checkpoint) self.assertEqual([], second_manager.checkpoints) mock_time.time.return_value += 10. fourth_path = second_manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertEqual(fourth_path, second_manager.latest_checkpoint) self.assertEqual([fourth_path], second_manager.checkpoints) mock_time.time.return_value += 10. fifth_path = second_manager.save() self.assertTrue(checkpoint_management.checkpoint_exists(second_path)) self.assertTrue(checkpoint_management.checkpoint_exists(third_path)) self.assertEqual([fifth_path], second_manager.checkpoints) state = checkpoint_management.get_checkpoint_state(directory) self.assertEqual(5000., state.last_preserved_timestamp) self.assertEqual([5020.], state.all_model_checkpoint_timestamps) @test_util.run_in_graph_and_eager_modes def testCustomNumbering(self): directory = self.get_temp_dir() step = variables.Variable(0, dtype=dtypes.int64) checkpoint = util.Checkpoint(step=step) manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2) self.evaluate(step.initializer) for i in range(5): path = manager.save(checkpoint_number=step) expected_suffix = "-%d" % (2 * i,) if not path.endswith(expected_suffix): self.fail("%s should have suffix %s" % (path, expected_suffix)) self.evaluate(step.assign_add(2)) self.assertEqual(5, self.evaluate(checkpoint.save_counter)) # Test regular integers last_path = manager.save(checkpoint_number=32) self.assertIn("-32", last_path) self.assertEqual(last_path, manager.latest_checkpoint) self.assertEqual( last_path, checkpoint_management.latest_checkpoint(directory)) state = checkpoint_management.get_checkpoint_state(directory) # Only the most recent two checkpoints are saved self.assertEqual([path, last_path], state.all_model_checkpoint_paths) @test_util.run_in_graph_and_eager_modes def testCustomCheckpointPrefix(self): directory = self.get_temp_dir() checkpoint = util.Checkpoint() manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2, checkpoint_name="ckpt_name") path = manager.save(checkpoint_number=5) self.assertEqual(os.path.basename(path), "ckpt_name-5") manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2) path = manager.save(checkpoint_number=5) self.assertEqual(os.path.basename(path), "ckpt-5") @test_util.run_in_graph_and_eager_modes def testRestoreOrInitialize(self): directory = self.get_temp_dir() # Create a checkpoint for initializing. init_prefix = os.path.join(directory, "init") init_v = variables.Variable(2.0) init_ckpt = util.Checkpoint(v=init_v) self.evaluate(init_v.initializer) init_path = init_ckpt.save(init_prefix) # Create the checkpoint manager. ckpt_dir = os.path.join(directory, "ckpt") v = variables.Variable(1.0) checkpoint = util.Checkpoint(v=v) manager = checkpoint_management.CheckpointManager( checkpoint, ckpt_dir, max_to_keep=None, init_fn=lambda: checkpoint.restore(init_path).run_restore_ops()) self.evaluate(v.initializer) # First call should call `init_fn`. self.assertIsNone(manager.restore_or_initialize()) self.assertEqual(2.0, self.evaluate(v)) # Save a checkpoint and second call should restore from the checkpoints. manager.save() self.assertIsNotNone(manager.restore_or_initialize()) @test_util.run_in_graph_and_eager_modes def testCheckpointManagerFSpathDirectory(self): directory = pathlib.Path(self.get_temp_dir()) v = variables.Variable(0.0) checkpoint = util.Checkpoint(v=v) self.evaluate(v.initializer) manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2, checkpoint_name="ckpt_name") save_path = manager.save() expected = str(directory / "ckpt_name-1") self.assertEqual(expected, save_path) restore_path = manager.restore_or_initialize() self.assertEqual(str(directory / "ckpt_name-1"), restore_path) @test_util.run_in_graph_and_eager_modes def testLatestCheckpointFSpathDirectory(self): directory = pathlib.Path(self.get_temp_dir()) checkpoint = util.Checkpoint() manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=2, checkpoint_name="ckpt_name") manager.save() cp_dir = checkpoint_management.latest_checkpoint(directory) self.assertEqual(str(directory / "ckpt_name-1"), cp_dir) @test_util.run_in_graph_and_eager_modes def testCheckpointInterval(self): v = variables.Variable(1.0) step_counter = variables.Variable(0) self.evaluate([v.initializer, step_counter.initializer]) checkpoint = util.Checkpoint(v=v) manager = checkpoint_management.CheckpointManager( checkpoint, self.get_temp_dir(), max_to_keep=None, step_counter=step_counter, checkpoint_interval=2) # step_counter: 0, save an initial checkpoint. path = manager.save(check_interval=True) self.assertTrue(checkpoint_management.checkpoint_exists(path)) # step_counter: 1, no checkpoint saved. self.evaluate(step_counter.assign_add(1)) path = manager.save(check_interval=True) self.assertIsNone(path) # step_counter: 2, checkpoint saved. self.evaluate(step_counter.assign_add(1)) path = manager.save(check_interval=True) self.assertTrue(checkpoint_management.checkpoint_exists(path)) # no checkpoint saved when calling `save` with the same step counter. path = manager.save(check_interval=True) self.assertIsNone(path) # step_counter: 3, no checkpoint saved. self.evaluate(step_counter.assign_add(1)) path = manager.save(check_interval=True) self.assertIsNone(path) # Always save the checkpoint. path = manager.save(check_interval=False) self.assertTrue(checkpoint_management.checkpoint_exists(path)) @test_util.run_in_graph_and_eager_modes def testCheckpointIntervalWithLastCheckpointStep(self): v = variables.Variable(1.0) step_counter = variables.Variable(1) self.evaluate([v.initializer, step_counter.initializer]) checkpoint = util.Checkpoint(v=v) manager = checkpoint_management.CheckpointManager( checkpoint, self.get_temp_dir(), max_to_keep=None, step_counter=step_counter, checkpoint_interval=2, last_checkpoint_step=1) # step_counter: 1, no checkpoint saved. path = manager.save(check_interval=True) self.assertIsNone(path) # step_counter: 2, no checkpoint saved since the interval starts at 1. self.evaluate(step_counter.assign_add(1)) path = manager.save(check_interval=True) self.assertIsNone(path) # step_counter: 3, checkpoint saved. self.evaluate(step_counter.assign_add(1)) path = manager.save(check_interval=True) self.assertTrue(checkpoint_management.checkpoint_exists(path)) @test_util.run_in_graph_and_eager_modes def testCheckpointIntervalWithRestore(self): directory = self.get_temp_dir() v = variables.Variable(1.0) step_counter = variables.Variable(0) self.evaluate([v.initializer, step_counter.initializer]) # Prepare a checkpoint. checkpoint = util.Checkpoint(v=v) checkpoint.save(os.path.join(directory, "ckpt")) manager = checkpoint_management.CheckpointManager( checkpoint, directory, max_to_keep=None, step_counter=step_counter, checkpoint_interval=2) # Restore from the checkpoint. self.assertIsNotNone(manager.restore_or_initialize()) # step_counter: 0, no checkpoint saved because it is restored from the # checkpoint with the same step. path = manager.save() self.assertIsNone(path) if __name__ == "__main__": test.main()
CheckpointManagerTest
python
neetcode-gh__leetcode
python/1609-even-odd-tree.py
{ "start": 0, "end": 723 }
class ____: def isEvenOddTree(self, root: Optional[TreeNode]) -> bool: even = True q = deque([root]) while q: prev = float("-inf") if even else float("inf") for _ in range(len(q)): node = q.popleft() if even and (node.val % 2 == 0 or node.val <= prev): return False elif not even and (node.val % 2 == 1 or node.val >= prev): return False if node.left: q.append(node.left) if node.right: q.append(node.right) prev = node.val even = not even return True
Solution
python
numba__numba
numba/tests/test_new_type_system.py
{ "start": 94, "end": 898 }
class ____(TestCase): def setUp(self) -> None: if config.USE_LEGACY_TYPE_SYSTEM: self.skipTest("This test is only for the new type system") return super().setUp() def test_return_types(self): @njit def foo(x): return x cases = [ # Python types 1, 1.2, (1 + 2j), True, # NumPy types np.int32(1), np.float64(1.2), np.complex64(1 + 2j), np.complex128(1 + 2j), np.bool_(True), np.datetime64('2020-01-01'), np.timedelta64(1, 'D'), ] for case in cases: self.assertEqual(foo(case), case) self.assertEqual(type(foo(case)), type(case))
TestTypes
python
ansible__ansible
lib/ansible/galaxy/token.py
{ "start": 5890, "end": 6780 }
class ____(object): token_type = 'Basic' def __init__(self, username, password=None): self.username = username self.password = password self._token = None @staticmethod def _encode_token(username, password): token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'), to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '') b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict')) return to_text(b64_val) def get(self): if self._token: return self._token self._token = self._encode_token(self.username, self.password) return self._token def headers(self): headers = {} headers['Authorization'] = '%s %s' % (self.token_type, self.get()) return headers
BasicAuthToken
python
python-markdown__markdown
markdown/extensions/wikilinks.py
{ "start": 1809, "end": 3285 }
class ____(InlineProcessor): """ Build link from `wikilink`. """ def __init__(self, pattern: str, config: dict[str, Any]): super().__init__(pattern) self.config = config def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str, int, int]: if m.group(1).strip(): base_url, end_url, html_class = self._getMeta() label = m.group(1).strip() url = self.config['build_url'](label, base_url, end_url) a = etree.Element('a') a.text = label a.set('href', url) if html_class: a.set('class', html_class) else: a = '' return a, m.start(0), m.end(0) def _getMeta(self) -> tuple[str, str, str]: """ Return meta data or `config` data. """ base_url = self.config['base_url'] end_url = self.config['end_url'] html_class = self.config['html_class'] if hasattr(self.md, 'Meta'): if 'wiki_base_url' in self.md.Meta: base_url = self.md.Meta['wiki_base_url'][0] if 'wiki_end_url' in self.md.Meta: end_url = self.md.Meta['wiki_end_url'][0] if 'wiki_html_class' in self.md.Meta: html_class = self.md.Meta['wiki_html_class'][0] return base_url, end_url, html_class def makeExtension(**kwargs): # pragma: no cover return WikiLinkExtension(**kwargs)
WikiLinksInlineProcessor
python
numba__numba
numba/tests/test_datamodel.py
{ "start": 1010, "end": 1077 }
class ____(test_factory()): fe_type = types.complex64
TestComplex
python
huggingface__transformers
src/transformers/models/segformer/modeling_segformer.py
{ "start": 17383, "end": 19084 }
class ____(SegformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = SegformerEncoder(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. """ )
SegformerModel
python
apache__airflow
airflow-core/src/airflow/api_fastapi/common/router.py
{ "start": 959, "end": 1508 }
class ____(APIRouter): """Extends the FastAPI default router.""" def api_route( self, path: str, operation_id: str | None = None, **kwargs: Any, ) -> Callable[[DecoratedCallable], DecoratedCallable]: def decorator(func: DecoratedCallable) -> DecoratedCallable: self.add_api_route( path, func, operation_id=operation_id or func.__name__, **kwargs, ) return func return decorator
AirflowRouter
python
pypa__pipenv
pipenv/vendor/pipdeptree/_models/package.py
{ "start": 5642, "end": 8429 }
class ____(Package): """ Wrapper class for Requirement instance. :param obj: The `Requirement` instance to wrap over :param dist: optional `importlib.metadata.Distribution` instance for this requirement """ UNKNOWN_VERSION = "?" def __init__(self, obj: Requirement, dist: DistPackage | None = None) -> None: super().__init__(obj.name) self._obj = obj self.dist = dist def render_as_root(self, *, frozen: bool) -> str: if not frozen: return f"{self.project_name}=={self.installed_version}" if self.dist: return self.as_frozen_repr(self.dist.unwrap()) return self.project_name def render_as_branch(self, *, frozen: bool) -> str: if not frozen: req_ver = self.version_spec or "Any" return f"{self.project_name} [required: {req_ver}, installed: {self.installed_version}]" return self.render_as_root(frozen=frozen) @property def version_spec(self) -> str | None: result = None specs = sorted(map(str, self._obj.specifier), reverse=True) # `reverse` makes '>' prior to '<' if specs: result = ",".join(specs) return result @property def installed_version(self) -> str: if not self.dist: try: return version(self.key) except PackageNotFoundError: pass # Avoid AssertionError with setuptools, see https://github.com/tox-dev/pipdeptree/issues/162 if self.key == "setuptools": return self.UNKNOWN_VERSION try: m = import_module(self.key) except ImportError: return self.UNKNOWN_VERSION else: v = getattr(m, "__version__", self.UNKNOWN_VERSION) if ismodule(v): return getattr(v, "__version__", self.UNKNOWN_VERSION) return v return self.dist.version def is_conflicting(self) -> bool: """If installed version conflicts with required version.""" # unknown installed version is also considered conflicting if self.is_missing: return True return not self._obj.specifier.contains(self.installed_version, prereleases=True) @property def is_missing(self) -> bool: return self.installed_version == self.UNKNOWN_VERSION def as_dict(self) -> dict[str, str]: return { "key": self.key, "package_name": self.project_name, "installed_version": self.installed_version, "required_version": self.version_spec if self.version_spec is not None else "Any", } __all__ = [ "DistPackage", "ReqPackage", ]
ReqPackage
python
pypa__warehouse
warehouse/accounts/forms.py
{ "start": 13956, "end": 15555 }
class ____( # type: ignore[misc] # Both `NewEmailMixin` and `NewPasswordMixin` declare an `email` field, # we ignore the difference in implementation. NewUsernameMixin, NewEmailMixin, NewPasswordMixin, HoneypotMixin, wtforms.Form, ): full_name = wtforms.StringField( validators=[ wtforms.validators.Length( max=100, message=_( "The name is too long. " "Choose a name with 100 characters or less." ), ), wtforms.validators.Regexp( r"(?i)(?:(?!:\/\/).)*$", message=_("URLs are not allowed in the name field."), ), PreventNullBytesValidator(), ] ) g_recaptcha_response = wtforms.StringField() def __init__(self, *args, captcha_service, user_service, **kwargs): super().__init__(*args, **kwargs) self.user_service = user_service self.user_id = None self.captcha_service = captcha_service def validate_g_recaptcha_response(self, field): # do required data validation here due to enabled flag being required if self.captcha_service.enabled and not field.data: raise wtforms.validators.ValidationError("Captcha error.") try: self.captcha_service.verify_response(field.data) except CaptchaError: # TODO: log error # don't want to provide the user with any detail raise wtforms.validators.ValidationError("Captcha error.")
RegistrationForm
python
cython__cython
Cython/Compiler/ParseTreeTransforms.py
{ "start": 171457, "end": 174751 }
class ____(VisitorTransform): """ This is not a transform in the pipeline. It is invoked on the specific versions of a cdef function with fused argument types. It filters out any type branches that don't match. e.g. if fused_t is mytype: ... elif fused_t in other_fused_type: ... """ def __init__(self, local_scope): super().__init__() self.local_scope = local_scope # defer the import until now to avoid circular import time dependencies from .Optimize import ConstantFolding self.transform = ConstantFolding(reevaluate=True) def visit_IfStatNode(self, node): """ Filters out any if clauses with false compile time type check expression. """ self.visitchildren(node) return self.transform(node) def visit_GILStatNode(self, node): """ Fold constant condition of GILStatNode. """ self.visitchildren(node) return self.transform(node) def visit_PrimaryCmpNode(self, node): with Errors.local_errors(ignore=True): type1 = node.operand1.analyse_as_type(self.local_scope) type2 = node.operand2.analyse_as_type(self.local_scope) if type1 and type2: false_node = ExprNodes.BoolNode(node.pos, value=False) true_node = ExprNodes.BoolNode(node.pos, value=True) type1 = self.specialize_type(type1, node.operand1.pos) op = node.operator if op in ('is', 'is_not', '==', '!='): type2 = self.specialize_type(type2, node.operand2.pos) is_same = type1.same_as(type2) eq = op in ('is', '==') if (is_same and eq) or (not is_same and not eq): return true_node elif op in ('in', 'not_in'): # We have to do an instance check directly, as operand2 # needs to be a fused type and not a type with a subtype # that is fused. First unpack the typedef if isinstance(type2, PyrexTypes.CTypedefType): type2 = type2.typedef_base_type if type1.is_fused: error(node.operand1.pos, "Type is fused") elif not type2.is_fused: error(node.operand2.pos, "Can only use 'in' or 'not in' on a fused type") else: types = PyrexTypes.get_specialized_types(type2) for specialized_type in types: if type1.same_as(specialized_type): if op == 'in': return true_node else: return false_node if op == 'not_in': return true_node return false_node return node def specialize_type(self, type, pos): try: return type.specialize(self.local_scope.fused_to_specific) except KeyError: error(pos, "Type is not specific") return type def visit_Node(self, node): self.visitchildren(node) return node
ReplaceFusedTypeChecks
python
networkx__networkx
networkx/algorithms/bipartite/tests/test_link_analysis.py
{ "start": 129, "end": 6914 }
class ____: @classmethod def setup_class(cls): cls.davis_southern_women_graph = nx.davis_southern_women_graph() cls.women_bipartite_set = { node for node, bipartite in cls.davis_southern_women_graph.nodes( data="bipartite" ) if bipartite == 0 } cls.gnmk_random_graph = nx.bipartite.generators.gnmk_random_graph( 5 * 10**2, 10**2, 5 * 10**2, seed=27 ) cls.gnmk_random_graph_top_nodes = { node for node, bipartite in cls.gnmk_random_graph.nodes(data="bipartite") if bipartite == 0 } def test_collaborative_filtering_birank(self): elist = [ ("u1", "p1", 5), ("u2", "p1", 5), ("u2", "p2", 4), ("u3", "p1", 3), ("u3", "p3", 2), ] item_recommendation_graph = nx.DiGraph() item_recommendation_graph.add_weighted_edges_from(elist, weight="rating") product_nodes = ("p1", "p2", "p3") u1_query = { product: rating for _, product, rating in item_recommendation_graph.edges( nbunch="u1", data="rating" ) } u1_birank_results = bipartite.birank( item_recommendation_graph, product_nodes, alpha=0.8, beta=1.0, top_personalization=u1_query, weight="rating", ) assert u1_birank_results["p2"] > u1_birank_results["p3"] u1_birank_results_unweighted = bipartite.birank( item_recommendation_graph, product_nodes, alpha=0.8, beta=1.0, top_personalization=u1_query, weight=None, ) assert u1_birank_results_unweighted["p2"] == pytest.approx( u1_birank_results_unweighted["p3"], rel=2e-6 ) def test_davis_birank(self): scores = bipartite.birank( self.davis_southern_women_graph, self.women_bipartite_set ) answer = { "Laura Mandeville": 0.07, "Olivia Carleton": 0.04, "Frances Anderson": 0.05, "Pearl Oglethorpe": 0.04, "Katherina Rogers": 0.06, "Flora Price": 0.04, "Dorothy Murchison": 0.04, "Helen Lloyd": 0.06, "Theresa Anderson": 0.07, "Eleanor Nye": 0.05, "Evelyn Jefferson": 0.07, "Sylvia Avondale": 0.07, "Charlotte McDowd": 0.05, "Verne Sanderson": 0.05, "Myra Liddel": 0.05, "Brenda Rogers": 0.07, "Ruth DeSand": 0.05, "Nora Fayette": 0.07, "E8": 0.11, "E7": 0.09, "E10": 0.07, "E9": 0.1, "E13": 0.05, "E3": 0.07, "E12": 0.07, "E11": 0.06, "E2": 0.05, "E5": 0.08, "E6": 0.08, "E14": 0.05, "E4": 0.06, "E1": 0.05, } for node, value in answer.items(): assert scores[node] == pytest.approx(value, abs=1e-2) def test_davis_birank_with_personalization(self): women_personalization = {"Laura Mandeville": 1} scores = bipartite.birank( self.davis_southern_women_graph, self.women_bipartite_set, top_personalization=women_personalization, ) answer = { "Laura Mandeville": 0.29, "Olivia Carleton": 0.02, "Frances Anderson": 0.06, "Pearl Oglethorpe": 0.04, "Katherina Rogers": 0.04, "Flora Price": 0.02, "Dorothy Murchison": 0.03, "Helen Lloyd": 0.04, "Theresa Anderson": 0.08, "Eleanor Nye": 0.05, "Evelyn Jefferson": 0.09, "Sylvia Avondale": 0.05, "Charlotte McDowd": 0.06, "Verne Sanderson": 0.04, "Myra Liddel": 0.03, "Brenda Rogers": 0.08, "Ruth DeSand": 0.05, "Nora Fayette": 0.05, "E8": 0.11, "E7": 0.1, "E10": 0.04, "E9": 0.07, "E13": 0.03, "E3": 0.11, "E12": 0.04, "E11": 0.03, "E2": 0.1, "E5": 0.11, "E6": 0.1, "E14": 0.03, "E4": 0.06, "E1": 0.1, } for node, value in answer.items(): assert scores[node] == pytest.approx(value, abs=1e-2) def test_birank_empty_bipartite_set(self): G = nx.Graph() all_nodes = [1, 2, 3] G.add_nodes_from(all_nodes) # Test with empty bipartite set with pytest.raises(nx.NetworkXAlgorithmError): bipartite.birank(G, all_nodes) @pytest.mark.parametrize( "damping_factor,value", itertools.product(["alpha", "beta"], [-0.1, 1.1]) ) def test_birank_invalid_alpha_beta(self, damping_factor, value): kwargs = {damping_factor: value} with pytest.raises(nx.NetworkXAlgorithmError): bipartite.birank( self.davis_southern_women_graph, self.women_bipartite_set, **kwargs ) def test_birank_power_iteration_failed_convergence(self): with pytest.raises(nx.PowerIterationFailedConvergence): bipartite.birank( self.davis_southern_women_graph, self.women_bipartite_set, max_iter=1 ) @pytest.mark.parametrize( "personalization,alpha,beta", itertools.product( [ # Concentrated case lambda x: 1000 if x == 0 else 0, # Uniform case lambda x: 5, # Zero case lambda x: 0, ], [i / 2 for i in range(3)], [i / 2 for i in range(3)], ), ) def test_gnmk_convergence_birank(self, personalization, alpha, beta): top_personalization_dict = { node: personalization(node) for node in self.gnmk_random_graph_top_nodes } bipartite.birank( self.gnmk_random_graph, self.gnmk_random_graph_top_nodes, top_personalization=top_personalization_dict, alpha=alpha, beta=beta, ) def test_negative_personalization(self): top_personalization_dict = {0: -1} with pytest.raises(nx.NetworkXAlgorithmError): bipartite.birank( self.gnmk_random_graph, self.gnmk_random_graph_top_nodes, top_personalization=top_personalization_dict, )
TestBipartiteLinkAnalysis
python
realpython__materials
python-copy/rectangle.py
{ "start": 246, "end": 786 }
class ____: def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return f"Point(x={self.x}, y={self.y})" if __name__ == "__main__": bounding_box = Rectangle( top_left := Point(10, 20), bottom_right := Point(30, 40) ) shallow_copy = copy.copy(bounding_box) deep_copy = copy.deepcopy(bounding_box) bounding_box.bottom_right = Point(500, 700) bottom_right.x += 100 print(f"{bounding_box = }") print(f"{shallow_copy = }") print(f"{deep_copy = }")
Point
python
getsentry__sentry
src/sentry/integrations/slack/threads/activity_notifications.py
{ "start": 4417, "end": 5423 }
class ____(_ExternalIssueCreatedActivity): """ Override class for Asana as, at this time, the label, or ticket number, does not exist and has to be derived. If plausible, this could be removed if the activity object itself properly has the correct data, but side effects for that change are not yet known. """ _DEFAULT_ASANA_LABEL_VALUE = "Asana Issue" def get_ticket_number(self) -> str: # Try to use the base logic if it works as a just in-case stored_value = super().get_ticket_number() if stored_value != "" and stored_value != self._DEFAULT_ASANA_LABEL_VALUE: return stored_value link = self.get_link() if not link: return "" # Remove any trailing slashes if link.endswith("/"): link = link[:-1] # Split the URL by "/" parts = link.split("/") # Get the last part last_part = parts[-1] return last_part
_AsanaExternalIssueCreatedActivity
python
eriklindernoren__ML-From-Scratch
mlfromscratch/deep_learning/layers.py
{ "start": 16829, "end": 17213 }
class ____(PoolingLayer): def _pool_forward(self, X_col): output = np.mean(X_col, axis=0) return output def _pool_backward(self, accum_grad): accum_grad_col = np.zeros((np.prod(self.pool_shape), accum_grad.size)) accum_grad_col[:, range(accum_grad.size)] = 1. / accum_grad_col.shape[0] * accum_grad return accum_grad_col
AveragePooling2D
python
kamyu104__LeetCode-Solutions
Python/number-of-sub-arrays-of-size-k-and-average-greater-than-or-equal-to-threshold.py
{ "start": 505, "end": 935 }
class ____(object): def numOfSubarrays(self, arr, k, threshold): """ :type arr: List[int] :type k: int :type threshold: int :rtype: int """ accu = [0] for x in arr: accu.append(accu[-1]+x) result = 0 for i in xrange(len(accu)-k): if accu[i+k]-accu[i] >= threshold*k: result += 1 return result
Solution2
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarTuple7.py
{ "start": 676, "end": 727 }
class ____(dict[tuple[Unpack[_Ys]], _T1]): ...
Class3
python
nedbat__coveragepy
coverage/types.py
{ "start": 742, "end": 1237 }
class ____(Protocol): """A Python trace function.""" def __call__( self, frame: FrameType, event: str, arg: Any, lineno: TLineNo | None = None, # Our own twist, see collector.py ) -> TTraceFn | None: ... ## Coverage.py tracing # Line numbers are pervasive enough that they deserve their own type. TLineNo = int # Bytecode offsets are pervasive enough that they deserve their own type. TOffset = int TArc = tuple[TLineNo, TLineNo]
TTraceFn
python
apache__airflow
airflow-core/src/airflow/security/permissions.py
{ "start": 2872, "end": 4338 }
class ____(TypedDict): """Details of a resource (actions and prefix).""" actions: set[str] prefix: str # Keeping DAG_ACTIONS to keep the compatibility with outdated versions of FAB provider DAG_ACTIONS = {ACTION_CAN_READ, ACTION_CAN_EDIT, ACTION_CAN_DELETE} RESOURCE_DETAILS_MAP = { RESOURCE_DAG: ResourceDetails( actions={ACTION_CAN_READ, ACTION_CAN_EDIT, ACTION_CAN_DELETE}, prefix=RESOURCE_DAG_PREFIX ), RESOURCE_DAG_RUN: ResourceDetails( actions={ACTION_CAN_READ, ACTION_CAN_CREATE, ACTION_CAN_DELETE, ACTION_CAN_ACCESS_MENU}, prefix="DAG Run:", ), } PREFIX_LIST = [details["prefix"] for details in RESOURCE_DETAILS_MAP.values()] PREFIX_RESOURCES_MAP = {details["prefix"]: resource for resource, details in RESOURCE_DETAILS_MAP.items()} def resource_name(dag_id: str, resource: str) -> str: """Return the resource name for a DAG id.""" if dag_id in RESOURCE_DETAILS_MAP.keys(): return dag_id if dag_id.startswith(tuple(PREFIX_RESOURCES_MAP.keys())): return dag_id return f"{RESOURCE_DETAILS_MAP[resource]['prefix']}{dag_id}" def resource_name_for_dag(dag_id: str) -> str: """ Return the resource name for a DAG id. Note: This function is kept for backwards compatibility. """ if dag_id == RESOURCE_DAG: return dag_id if dag_id.startswith(RESOURCE_DAG_PREFIX): return dag_id return f"{RESOURCE_DAG_PREFIX}{dag_id}"
ResourceDetails
python
altair-viz__altair
altair/vegalite/v6/schema/channels.py
{ "start": 221310, "end": 250825 }
class ____( FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefnumber ): r""" FillOpacity schema wrapper. Parameters ---------- shorthand : str, dict, Sequence[str], :class:`RepeatRef` shorthand for field, aggregate, and type aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb'] Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``, ``"min"``, ``"max"``, ``"count"``). **Default value:** ``undefined`` (None) **See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__ documentation. bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. bin : bool, dict, :class:`BinParams`, None A flag for binning a ``quantitative`` field, `an object defining binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating that the data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (``"binned"``). * If ``true``, default `binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be applied. * If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are already binned. You can map the bin-start field to ``x`` (or ``y``) and the bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can also set the axis's `tickMinStep <https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property. **Default value:** ``false`` **See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__ documentation. condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`] One or more value definition(s) with `a parameter or a test predicate <https://vega.github.io/vega-lite/docs/condition.html>`__. **Note:** A field definition's ``condition`` property can only contain `conditional value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__ since Vega-Lite only allows at most one encoded field per encoding channel. field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef` **Required.** A string defining the name of the field from which to pull a data value or an object defining iterated values from the `repeat <https://vega.github.io/vega-lite/docs/repeat.html>`__ operator. **See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__ documentation. **Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If field names contain dots or brackets but are not nested, you can use ``\\`` to escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details about escaping in the `field documentation <https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required if ``aggregate`` is ``count``. legend : dict, :class:`Legend`, None An object defining properties of the legend. If ``null``, the legend for the encoding channel will be removed. **Default value:** If undefined, default `legend properties <https://vega.github.io/vega-lite/docs/legend.html>`__ are applied. **See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__ documentation. scale : dict, :class:`Scale`, None An object defining properties of the channel's scale, which is the function that transforms values in the data domain (numbers, dates, strings, etc) to visual values (pixels, colors, sizes) of the encoding channels. If ``null``, the scale will be `disabled and the data value will be directly encoded <https://vega.github.io/vega-lite/docs/scale.html#disable>`__. **Default value:** If undefined, default `scale properties <https://vega.github.io/vega-lite/docs/scale.html>`__ are applied. **See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__ documentation. sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None Sort order for the encoded field. For continuous fields (quantitative or temporal), ``sort`` can be either ``"ascending"`` or ``"descending"``. For discrete fields, ``sort`` can be one of the following: * ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in JavaScript. * `A string indicating an encoding channel name to sort by <https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g., ``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g., ``"-x"`` to sort by x-field, descending). This channel string is short-form of `a sort-by-encoding definition <https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order": "descending"}``. * `A sort field definition <https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by another field. * `An array specifying the field values in preferred order <https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the sort order will obey the values in the array, followed by any unspecified values in their original order. For discrete time field, values in the sort array can be `date-time definition objects <https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time units ``"month"`` and ``"day"``, the values can be the month or day names (case insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``). * ``null`` indicating no sort. **Default value:** ``"ascending"`` **Note:** ``null`` and sorting by another channel is not supported for ``row`` and ``column``. **See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__ documentation. timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds'] Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal field. or `a temporal field that gets casted as ordinal <https://vega.github.io/vega-lite/docs/type.html#cast>`__. **Default value:** ``undefined`` (None) **See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__ documentation. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _class_is_valid_at_instantiation = False _encoding_name = "fillOpacity" @overload def aggregate(self, _: NonArgAggregateOp_T, /) -> FillOpacity: ... @overload def aggregate( self, *, argmax: Optional[str | SchemaBase] = Undefined ) -> FillOpacity: ... @overload def aggregate( self, *, argmin: Optional[str | SchemaBase] = Undefined ) -> FillOpacity: ... @overload def bandPosition(self, _: float, /) -> FillOpacity: ... @overload def bin(self, _: bool | Bin | None, /) -> FillOpacity: ... @overload def bin( self, *, anchor: Optional[float] = Undefined, base: Optional[float] = Undefined, binned: Optional[bool] = Undefined, divide: Optional[Sequence[float]] = Undefined, extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined, maxbins: Optional[float] = Undefined, minstep: Optional[float] = Undefined, nice: Optional[bool] = Undefined, step: Optional[float] = Undefined, steps: Optional[Sequence[float]] = Undefined, ) -> FillOpacity: ... @overload def condition( self, *, test: Optional[str | SchemaBase | Map] = Undefined, value: Optional[float | Parameter | SchemaBase | Map] = Undefined, ) -> FillOpacity: ... @overload def condition( self, *, empty: Optional[bool] = Undefined, param: Optional[str | SchemaBase] = Undefined, value: Optional[float | Parameter | SchemaBase | Map] = Undefined, ) -> FillOpacity: ... @overload def condition( self, _: list[core.ConditionalValueDefnumberExprRef], / ) -> FillOpacity: ... @overload def field(self, _: str | RepeatRef, /) -> FillOpacity: ... @overload def field( self, *, repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined, ) -> FillOpacity: ... @overload def legend(self, _: Legend | None, /) -> FillOpacity: ... @overload def legend( self, *, aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined, clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined, columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, columns: Optional[float | Parameter | SchemaBase | Map] = Undefined, cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined, description: Optional[str | Parameter | SchemaBase | Map] = Undefined, direction: Optional[SchemaBase | Orientation_T] = Undefined, fillColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, format: Optional[str | SchemaBase | Map] = Undefined, formatType: Optional[str] = Undefined, gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientStrokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined, gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined, gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined, labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined, labelBaseline: Optional[ Parameter | SchemaBase | Map | TextBaseline_T ] = Undefined, labelColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, labelExpr: Optional[str] = Undefined, labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined, labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined, labelFontWeight: Optional[ Parameter | SchemaBase | Map | FontWeight_T ] = Undefined, labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelOverlap: Optional[ bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map ] = Undefined, labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined, legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined, legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined, offset: Optional[float | Parameter | SchemaBase | Map] = Undefined, orient: Optional[SchemaBase | LegendOrient_T] = Undefined, padding: Optional[float | Parameter | SchemaBase | Map] = Undefined, rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, strokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolDash: Optional[ Parameter | SchemaBase | Sequence[float] | Map ] = Undefined, symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolFillColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolStrokeColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined, symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined, tickCount: Optional[ float | Parameter | SchemaBase | Map | TimeInterval_T ] = Undefined, tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined, titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined, titleBaseline: Optional[ Parameter | SchemaBase | Map | TextBaseline_T ] = Undefined, titleColor: Optional[ str | Parameter | SchemaBase | Map | ColorName_T | None ] = Undefined, titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined, titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined, titleFontWeight: Optional[ Parameter | SchemaBase | Map | FontWeight_T ] = Undefined, titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined, titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined, titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined, type: Optional[Literal["symbol", "gradient"]] = Undefined, values: Optional[ Parameter | SchemaBase | Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[Temporal | SchemaBase | Map] | Map ] = Undefined, zindex: Optional[float] = Undefined, ) -> FillOpacity: ... @overload def scale(self, _: Scale | None, /) -> FillOpacity: ... @overload def scale( self, *, align: Optional[float | Parameter | SchemaBase | Map] = Undefined, base: Optional[float | Parameter | SchemaBase | Map] = Undefined, bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined, clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined, constant: Optional[float | Parameter | SchemaBase | Map] = Undefined, domain: Optional[ Parameter | SchemaBase | Literal["unaggregated"] | Sequence[ str | bool | float | Temporal | Parameter | SchemaBase | Map | None ] | Map ] = Undefined, domainMax: Optional[ float | Temporal | Parameter | SchemaBase | Map ] = Undefined, domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined, domainMin: Optional[ float | Temporal | Parameter | SchemaBase | Map ] = Undefined, domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined, exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined, interpolate: Optional[ Parameter | SchemaBase | Map | ScaleInterpolateEnum_T ] = Undefined, nice: Optional[ bool | float | Parameter | SchemaBase | Map | TimeInterval_T ] = Undefined, padding: Optional[float | Parameter | SchemaBase | Map] = Undefined, paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined, paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined, range: Optional[ SchemaBase | Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map] | Map | RangeEnum_T ] = Undefined, rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined, rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined, reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined, round: Optional[bool | Parameter | SchemaBase | Map] = Undefined, scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined, type: Optional[SchemaBase | ScaleType_T] = Undefined, zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined, ) -> FillOpacity: ... @overload def sort( self, _: Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[DateTime | Temporal] | AllSortString_T | None, /, ) -> FillOpacity: ... @overload def sort( self, *, field: Optional[str | SchemaBase | Map] = Undefined, op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined, order: Optional[SchemaBase | SortOrder_T | None] = Undefined, ) -> FillOpacity: ... @overload def sort( self, *, encoding: Optional[SchemaBase | SortByChannel_T] = Undefined, order: Optional[SchemaBase | SortOrder_T | None] = Undefined, ) -> FillOpacity: ... @overload def timeUnit( self, _: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T, /, ) -> FillOpacity: ... @overload def timeUnit( self, *, binned: Optional[bool] = Undefined, maxbins: Optional[float] = Undefined, step: Optional[float] = Undefined, unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined, utc: Optional[bool] = Undefined, ) -> FillOpacity: ... @overload def title(self, _: str | Sequence[str] | None, /) -> FillOpacity: ... @overload def type(self, _: StandardType_T, /) -> FillOpacity: ... def __init__( self, shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined, aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined, bandPosition: Optional[float] = Undefined, bin: Optional[bool | SchemaBase | Map | None] = Undefined, condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined, field: Optional[str | SchemaBase | Map] = Undefined, legend: Optional[SchemaBase | Map | None] = Undefined, scale: Optional[SchemaBase | Map | None] = Undefined, sort: Optional[ SchemaBase | Sequence[str] | Sequence[bool] | Sequence[float] | Sequence[Temporal | SchemaBase | Map] | Map | AllSortString_T | None ] = Undefined, timeUnit: Optional[ SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T ] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[SchemaBase | StandardType_T] = Undefined, **kwds, ): super().__init__( shorthand=shorthand, aggregate=aggregate, bandPosition=bandPosition, bin=bin, condition=condition, field=field, legend=legend, scale=scale, sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds, ) @with_property_setters
FillOpacity
python
ijl__orjson
test/test_dict.py
{ "start": 79, "end": 5436 }
class ____: def test_dict(self): """ dict """ obj = {"key": "value"} ref = '{"key":"value"}' assert orjson.dumps(obj) == ref.encode("utf-8") assert orjson.loads(ref) == obj def test_dict_duplicate_loads(self): assert orjson.loads(b'{"1":true,"1":false}') == {"1": False} def test_dict_empty(self): obj = [{"key": [{}] * 4096}] * 4096 # type:ignore assert orjson.loads(orjson.dumps(obj)) == obj def test_dict_large_dict(self): """ dict with >512 keys """ obj = {f"key_{idx}": [{}, {"a": [{}, {}, {}]}, {}] for idx in range(513)} # type: ignore assert len(obj) == 513 assert orjson.loads(orjson.dumps(obj)) == obj def test_dict_large_4096(self): """ dict with >4096 keys """ obj = {f"key_{idx}": f"value_{idx}" for idx in range(4097)} assert len(obj) == 4097 assert orjson.loads(orjson.dumps(obj)) == obj def test_dict_large_65536(self): """ dict with >65536 keys """ obj = {f"key_{idx}": f"value_{idx}" for idx in range(65537)} assert len(obj) == 65537 assert orjson.loads(orjson.dumps(obj)) == obj def test_dict_large_keys(self): """ dict with keys too large to cache """ obj = { "keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey": "value", } ref = '{"keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey":"value"}' assert orjson.dumps(obj) == ref.encode("utf-8") assert orjson.loads(ref) == obj def test_dict_unicode(self): """ dict unicode keys """ obj = {"🐈": "value"} ref = b'{"\xf0\x9f\x90\x88":"value"}' assert orjson.dumps(obj) == ref assert orjson.loads(ref) == obj assert orjson.loads(ref)["🐈"] == "value" def test_dict_invalid_key_dumps(self): """ dict invalid key dumps() """ with pytest.raises(orjson.JSONEncodeError): orjson.dumps({1: "value"}) with pytest.raises(orjson.JSONEncodeError): orjson.dumps({b"key": "value"}) def test_dict_invalid_key_loads(self): """ dict invalid key loads() """ with pytest.raises(orjson.JSONDecodeError): orjson.loads('{1:"value"}') with pytest.raises(orjson.JSONDecodeError): orjson.loads('{{"a":true}:true}') def test_dict_similar_keys(self): """ loads() similar keys This was a regression in 3.4.2 caused by using the implementation in wy instead of wyhash. """ assert orjson.loads( '{"cf_status_firefox67": "---", "cf_status_firefox57": "verified"}', ) == {"cf_status_firefox57": "verified", "cf_status_firefox67": "---"} def test_dict_pop_replace_first(self): "Test pop and replace a first key in a dict with other keys." data = {"id": "any", "other": "any"} data.pop("id") assert orjson.dumps(data) == b'{"other":"any"}' data["id"] = "new" assert orjson.dumps(data) == b'{"other":"any","id":"new"}' def test_dict_pop_replace_last(self): "Test pop and replace a last key in a dict with other keys." data = {"other": "any", "id": "any"} data.pop("id") assert orjson.dumps(data) == b'{"other":"any"}' data["id"] = "new" assert orjson.dumps(data) == b'{"other":"any","id":"new"}' def test_dict_pop(self): "Test pop and replace a key in a dict with no other keys." data = {"id": "any"} data.pop("id") assert orjson.dumps(data) == b"{}" data["id"] = "new" assert orjson.dumps(data) == b'{"id":"new"}' def test_in_place(self): "Mutate dict in-place" data = {"id": "any", "static": "msg"} data["id"] = "new" assert orjson.dumps(data) == b'{"id":"new","static":"msg"}' def test_dict_0xff(self): "dk_size <= 0xff" data = {str(idx): idx for idx in range(0xFF)} data.pop("112") data["112"] = 1 data["113"] = 2 assert orjson.loads(orjson.dumps(data)) == data def test_dict_0xff_repeated(self): "dk_size <= 0xff repeated" for _ in range(100): data = {str(idx): idx for idx in range(0xFF)} data.pop("112") data["112"] = 1 data["113"] = 2 assert orjson.loads(orjson.dumps(data)) == data def test_dict_0xffff(self): "dk_size <= 0xffff" data = {str(idx): idx for idx in range(0xFFFF)} data.pop("112") data["112"] = 1 data["113"] = 2 assert orjson.loads(orjson.dumps(data)) == data def test_dict_0xffff_repeated(self): "dk_size <= 0xffff repeated" for _ in range(100): data = {str(idx): idx for idx in range(0xFFFF)} data.pop("112") data["112"] = 1 data["113"] = 2 assert orjson.loads(orjson.dumps(data)) == data def test_dict_dict(self): class C: def __init__(self): self.a = 0 self.b = 1 assert orjson.dumps(C().__dict__) == b'{"a":0,"b":1}'
TestDict
python
pennersr__django-allauth
allauth/socialaccount/providers/twentythreeandme/provider.py
{ "start": 307, "end": 827 }
class ____(OAuth2Provider): id = "twentythreeandme" slug = "23andme" name = "23andMe" account_class = TwentyThreeAndMeAccount oauth2_adapter_class = TwentyThreeAndMeOAuth2Adapter def extract_uid(self, data): return data["id"] def get_default_scope(self): scope = ["basic"] return scope def extract_common_fields(self, data): return dict( email=data.get("email"), ) provider_classes = [TwentyThreeAndMeProvider]
TwentyThreeAndMeProvider
python
numba__numba
numba/core/typeconv/typeconv.py
{ "start": 953, "end": 2840 }
class ____(object): # The character codes used by the C/C++ API (_typeconv.cpp) _conversion_codes = {Conversion.safe: ord("s"), Conversion.unsafe: ord("u"), Conversion.promote: ord("p"),} def __init__(self): self._ptr = _typeconv.new_type_manager() self._types = set() def select_overload(self, sig, overloads, allow_unsafe, exact_match_required): sig = [t._code for t in sig] overloads = [[t._code for t in s] for s in overloads] return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe, exact_match_required) def check_compatible(self, fromty, toty): if not isinstance(toty, types.Type): raise ValueError("Specified type '%s' (%s) is not a Numba type" % (toty, type(toty))) name = _typeconv.check_compatible(self._ptr, fromty._code, toty._code) conv = Conversion[name] if name is not None else None assert conv is not Conversion.nil return conv def set_compatible(self, fromty, toty, by): code = self._conversion_codes[by] _typeconv.set_compatible(self._ptr, fromty._code, toty._code, code) # Ensure the types don't die, otherwise they may be recreated with # other type codes and pollute the hash table. self._types.add(fromty) self._types.add(toty) def set_promote(self, fromty, toty): self.set_compatible(fromty, toty, Conversion.promote) def set_unsafe_convert(self, fromty, toty): self.set_compatible(fromty, toty, Conversion.unsafe) def set_safe_convert(self, fromty, toty): self.set_compatible(fromty, toty, Conversion.safe) def get_pointer(self): return _typeconv.get_pointer(self._ptr)
TypeManager
python
jina-ai__jina
jina/parsers/orchestrate/pod.py
{ "start": 363, "end": 11003 }
class ____: """Data Class representing possible parameters for each pod type""" runtime_cls: str role_type: PodRoleType POD_PARAMS_MAPPING: Dict[str, PodTypeParams] = { 'worker': PodTypeParams(runtime_cls='WorkerRuntime', role_type=PodRoleType.WORKER), 'head': PodTypeParams(runtime_cls='HeadRuntime', role_type=PodRoleType.HEAD), 'gateway': PodTypeParams( runtime_cls='GatewayRuntime', role_type=PodRoleType.GATEWAY ), } def mixin_pod_parser(parser, pod_type: str = 'worker'): """Mixing in arguments required by :class:`Pod` into the given parser. :param parser: the parser instance to which we add arguments :param pod_type: the pod_type configured by the parser. Can be either 'worker' for WorkerRuntime or 'gateway' for GatewayRuntime """ gp = add_arg_group(parser, title='Pod') gp.add_argument( '--runtime-cls', type=str, default=POD_PARAMS_MAPPING[pod_type].runtime_cls, help='The runtime class to run inside the Pod', ) gp.add_argument( '--timeout-ready', type=int, default=600000, help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting ' 'forever', ) gp.add_argument( '--env', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help='The map of environment variables that are available inside runtime', ) gp.add_argument( '--env-from-secret', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=( 'The map of environment variables that are read from kubernetes cluster secrets' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) gp.add_argument( '--image-pull-secrets', type=str, nargs='+', default=None, help=( 'List of ImagePullSecrets that the Kubernetes Pods need to have access to in order to pull the image. Used in `to_kubernetes_yaml`' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) # hidden CLI used for internal only gp.add_argument( '--shard-id', type=int, default=0, help=( 'defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) gp.add_argument( '--pod-role', type=PodRoleType.from_string, choices=list(PodRoleType), default=POD_PARAMS_MAPPING[pod_type].role_type, help=( 'The role of this Pod in a Deployment' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) gp.add_argument( '--noblock-on-start', action='store_true', default=False, help=( 'If set, starting a Pod/Deployment does not block the thread/process. It then relies on ' '`wait_start_success` at outer function for the postpone check.' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) gp.add_argument( '--floating', action='store_true', default=False, help='If set, the current Pod/Deployment can not be further chained, ' 'and the next `.add()` will chain after the last Pod/Deployment not this current one.', ) gp.add_argument( '--replica-id', type=int, default=0, help=( 'defines the replica identifier for the executor. It is used when `stateful` is set to true' if _SHOW_ALL_ARGS else argparse.SUPPRESS ), ) if pod_type != 'gateway': gp.add_argument( '--reload', action='store_true', default=False, help='If set, the Executor will restart while serving if YAML configuration source or Executor modules ' 'are changed. If YAML configuration is changed, the whole deployment is reloaded and new ' 'processes will be restarted. If only Python modules of the Executor have changed, they will be ' 'reloaded to the interpreter without restarting process.', ) gp.add_argument( '--install-requirements', action='store_true', default=False, help='If set, try to install `requirements.txt` from the local Executor if exists in the Executor folder. If using Hub, install `requirements.txt` in the Hub Executor bundle to local.', ) else: gp.add_argument( '--reload', action='store_true', default=False, help='If set, the Gateway will restart while serving if YAML configuration source is changed.', ) mixin_pod_runtime_args_parser(gp, pod_type=pod_type) mixin_stateful_parser(gp) def mixin_pod_runtime_args_parser(arg_group, pod_type='worker'): """Mixin for runtime arguments of pods :param arg_group: the parser instance or args group to which we add arguments :param pod_type: the pod_type configured by the parser. Can be either 'worker' for WorkerRuntime or 'gateway' for GatewayRuntime """ alias = ['--port', '--ports'] if pod_type != 'gateway': port_description = ( 'The port for input data to bind to, default is a random port between [49152, 65535]. ' 'In the case of an external Executor (`--external` or `external=True`) this can be a list of ports. ' 'Then, every resulting address will be considered as one replica of the Executor.' ) else: port_description = ( 'The port for input data to bind the gateway server to, by default, random ports between range [49152, 65535] will be assigned. ' 'The port argument can be either 1 single value in case only 1 protocol is used or multiple values when ' 'many protocols are used.' ) alias.extend(['--port-expose', '--port-in']) arg_group.add_argument( *alias, action=CastToIntAction, type=str, nargs='+', default=[random_port()], help=port_description, ) server_name = 'Gateway' if pod_type == 'gateway' else 'Executor' arg_group.add_argument( '--protocol', '--protocols', nargs='+', type=ProtocolType.from_string, choices=list(ProtocolType), default=[ProtocolType.GRPC], help=f'Communication protocol of the server exposed by the {server_name}. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: {[protocol.to_string() for protocol in list(ProtocolType)]}.', ) arg_group.add_argument( '--provider', type=ProviderType.from_string, choices=list(ProviderType), default=[ProviderType.NONE], help=f'If set, Executor is translated to a custom container compatible with the chosen provider. Choose the convenient providers from: {[provider.to_string() for provider in list(ProviderType)]}.', ) arg_group.add_argument( '--provider-endpoint', type=str, default=None, help=f'If set, Executor endpoint will be explicitly chosen and used in the custom container operated by the provider.', ) arg_group.add_argument( '--monitoring', action='store_true', default=False, help='If set, spawn an http server with a prometheus endpoint to expose metrics', ) arg_group.add_argument( '--port-monitoring', type=str, nargs='+', default=[random_port()], action=CastToIntAction, dest='port_monitoring', help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]', ) arg_group.add_argument( '--retries', type=int, default=-1, dest='retries', help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)', ) arg_group.add_argument( '--tracing', action='store_true', default=False, help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. ' 'Otherwise a no-op implementation will be provided.', ) arg_group.add_argument( '--traces-exporter-host', type=str, default=None, help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.', ) arg_group.add_argument( '--traces-exporter-port', type=int, default=None, help='If tracing is enabled, this port will be used to configure the trace exporter agent.', ) arg_group.add_argument( '--metrics', action='store_true', default=False, help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. ' 'Otherwise a no-op implementation will be provided.', ) arg_group.add_argument( '--metrics-exporter-host', type=str, default=None, help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.', ) arg_group.add_argument( '--metrics-exporter-port', type=int, default=None, help='If tracing is enabled, this port will be used to configure the metrics exporter agent.', ) def mixin_stateful_parser(parser): """Mixing in arguments required to work with Stateful Executors into the given parser. :param parser: the parser instance to which we add arguments """ gp = add_arg_group(parser, title='Stateful Executor') gp.add_argument( '--stateful', action='store_true', default=False, help='If set, start consensus module to make sure write operations are properly replicated between all the replicas', ) gp.add_argument( '--peer-ports', type=str, default=None, help='When using --stateful option, it is required to tell the cluster what are the cluster configuration. This is important' 'when the Deployment is restarted. It indicates the ports to which each replica of the cluster binds.' ' It is expected to be a single list if shards == 1 or a dictionary if shards > 1.', action=CastPeerPorts, nargs='+', )
PodTypeParams
python
facelessuser__soupsieve
tests/test_level4/test_local_link.py
{ "start": 55, "end": 729 }
class ____(util.TestCase): """Test local link selectors.""" MARKUP = """ <a id="1" href="./somelink/index.html">Link</link> <a id="2" href="http://somelink.com/somelink/index.html">Another link</a> """ def test_local_link(self): """Test local link (matches nothing).""" self.assert_selector( self.MARKUP, "a:local-link", [], flags=util.HTML ) def test_not_local_link(self): """Test not local link.""" self.assert_selector( self.MARKUP, "a:not(:local-link)", ["1", "2"], flags=util.HTML )
TestLocalLink
python
Lightning-AI__lightning
tests/tests_pytorch/test_cli.py
{ "start": 43932, "end": 48609 }
class ____(BoringDataModule): def __init__(self, batch_size: int = 32, num_workers: int = 4): super().__init__() self.save_hyperparameters() self.batch_size = batch_size self.num_workers = num_workers def test_lightning_cli_save_hyperparameters_merge(cleandir): config = { "model": { "class_path": f"{__name__}.TestModelSaveHparams", }, "data": { "class_path": f"{__name__}.TestDataSaveHparams", }, } with mock.patch("sys.argv", ["any.py", "fit", f"--config={json.dumps(config)}", "--trainer.max_epochs=1"]): cli = LightningCLI(auto_configure_optimizers=False) assert set(cli.model.hparams) == {"optimizer", "scheduler", "activation", "_instantiator", "_class_path"} assert set(cli.datamodule.hparams) == {"batch_size", "num_workers", "_instantiator", "_class_path"} @pytest.mark.parametrize("fn", [fn.value for fn in TrainerFn]) def test_lightning_cli_trainer_fn(fn): class TestCLI(LightningCLI): def __init__(self, *args, **kwargs): self.called = [] super().__init__(*args, **kwargs) def before_fit(self): self.called.append("before_fit") def fit(self, **_): self.called.append("fit") def after_fit(self): self.called.append("after_fit") def before_validate(self): self.called.append("before_validate") def validate(self, **_): self.called.append("validate") def after_validate(self): self.called.append("after_validate") def before_test(self): self.called.append("before_test") def test(self, **_): self.called.append("test") def after_test(self): self.called.append("after_test") def before_predict(self): self.called.append("before_predict") def predict(self, **_): self.called.append("predict") def after_predict(self): self.called.append("after_predict") with mock.patch("sys.argv", ["any.py", fn]): cli = TestCLI(BoringModel) assert cli.called == [f"before_{fn}", fn, f"after_{fn}"] def test_lightning_cli_subcommands(): subcommands = LightningCLI.subcommands() trainer = Trainer() for subcommand, exclude in subcommands.items(): fn = getattr(trainer, subcommand) parameters = list(inspect.signature(fn).parameters) for e in exclude: # if this fails, it's because the parameter has been removed from the associated `Trainer` function # and the `LightningCLI` subcommand exclusion list needs to be updated assert e in parameters @pytest.mark.skipif(compare_version("jsonargparse", operator.lt, "4.21.3"), reason="vulnerability with failing imports") def test_lightning_cli_custom_subcommand(): class TestTrainer(Trainer): def foo(self, model: LightningModule, x: int, y: float = 1.0): """Sample extra function. Args: model: A model x: The x y: The y """ class TestCLI(LightningCLI): @staticmethod def subcommands(): subcommands = LightningCLI.subcommands() subcommands["foo"] = {"model"} return subcommands out = StringIO() with mock.patch("sys.argv", ["any.py", "-h"]), redirect_stdout(out), pytest.raises(SystemExit): TestCLI(BoringModel, trainer_class=TestTrainer) out = out.getvalue() assert "Sample extra function." in out assert "{fit,validate,test,predict,foo}" in out out = StringIO() with mock.patch("sys.argv", ["any.py", "foo", "-h"]), redirect_stdout(out), pytest.raises(SystemExit): TestCLI(BoringModel, trainer_class=TestTrainer) out = out.getvalue() assert "A model" not in out assert "Sample extra function:" in out assert "--x X" in out assert "The x (required, type: int)" in out assert "--y Y" in out assert "The y (type: float, default: 1.0)" in out def test_lightning_cli_run(cleandir): with mock.patch("sys.argv", ["any.py"]): cli = LightningCLI(BoringModel, run=False) assert cli.trainer.global_step == 0 assert isinstance(cli.trainer, Trainer) assert isinstance(cli.model, LightningModule) with mock.patch("sys.argv", ["any.py", "fit"]): cli = LightningCLI(BoringModel, trainer_defaults={"max_steps": 1, "max_epochs": 1}) assert cli.trainer.global_step == 1 assert isinstance(cli.trainer, Trainer) assert isinstance(cli.model, LightningModule)
TestDataSaveHparams
python
doocs__leetcode
solution/1300-1399/1395.Count Number of Teams/Solution2.py
{ "start": 0, "end": 362 }
class ____: def __init__(self, n: int): self.n = n self.c = [0] * (n + 1) def update(self, x: int, v: int): while x <= self.n: self.c[x] += v x += x & -x def query(self, x: int) -> int: s = 0 while x: s += self.c[x] x -= x & -x return s
BinaryIndexedTree
python
sqlalchemy__sqlalchemy
test/sql/test_compare.py
{ "start": 5064, "end": 5242 }
class ____: x = 10 y = 15 dml.Insert.argument_for("sqlite", "foo", None) dml.Update.argument_for("sqlite", "foo", None) dml.Delete.argument_for("sqlite", "foo", None)
Foo
python
sanic-org__sanic
sanic/pages/base.py
{ "start": 212, "end": 2301 }
class ____(ABC, metaclass=CSS): # no cov """Base page for Sanic pages.""" TITLE = "Sanic" HEADING = None CSS: str doc: Builder def __init__(self, debug: bool = True) -> None: self.debug = debug @property def style(self) -> str: """Returns the CSS for the page. Returns: str: The CSS for the page. """ return self.CSS def render(self) -> str: """Renders the page. Returns: str: The rendered page. """ self.doc = Document(self.TITLE, lang="en", id="sanic") self._head() self._body() self._foot() return str(self.doc) def _head(self) -> None: self.doc.style(HTML(self.style)) with self.doc.header: self.doc.div(self.HEADING or self.TITLE) def _foot(self) -> None: with self.doc.footer: self.doc.div("powered by") with self.doc.div: self._sanic_logo() if self.debug: self.doc.div(f"Version {VERSION}") with self.doc.div: for idx, (title, href) in enumerate( ( ("Docs", "https://sanic.dev"), ("Help", "https://sanic.dev/en/help.html"), ("GitHub", "https://github.com/sanic-org/sanic"), ) ): if idx > 0: self.doc(" | ") self.doc.a( title, href=href, target="_blank", referrerpolicy="no-referrer", ) self.doc.div("DEBUG mode") @abstractmethod def _body(self) -> None: ... def _sanic_logo(self) -> None: self.doc.a( HTML(SVG_LOGO_SIMPLE), href="https://sanic.dev", target="_blank", referrerpolicy="no-referrer", )
BasePage
python
facebook__pyre-check
client/language_server/protocol.py
{ "start": 1341, "end": 1468 }
class ____(json_rpc.JSONRPCException): @override def error_code(self) -> int: return -32800
RequestCancelledError
python
huggingface__transformers
src/transformers/models/glm4/configuration_glm4.py
{ "start": 786, "end": 8112 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Glm4Model`]. It is used to instantiate an Glm4 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Glm4-4-9b-chat. e.g. [THUDM/GLM-4-9B-0414](https://huggingface.co/THUDM/GLM-4-9B-0414) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151552): Vocabulary size of the Glm4 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Glm4Model`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 13696): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 128): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The legacy activation function. It is overwritten by the `hidden_activation`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1.5625e-07): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_parameters (`RopeParameters`, *optional*): Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE with longer `max_position_embeddings`. pad_token_id (`int`, *optional*, defaults to 151329): Padding token id. eos_token_id (`int` | `list`, *optional*, defaults to `[151329, 151336, 151338]`): End of stream token id. bos_token_id (`int`, *optional*): Beginning of stream token id. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `True`): Whether to use a bias in the query, key, value and output projection layers during self-attention. ```python >>> from transformers import Glm4Model, Glm4Config >>> # Initializing a Glm4 glm4-4-9b-chat style configuration >>> configuration = Glm4Config() >>> # Initializing a model from the glm4-4-9b-chat style configuration >>> model = Glm4Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "glm4" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_up_proj": "colwise_rep", # we need to replicate here due to the `chunk` operation "layers.*.mlp.down_proj": "rowwise_rep", # we need to replicate here due to the `chunk` operation } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: Optional[int] = 151552, hidden_size: Optional[int] = 4096, intermediate_size: Optional[int] = 13696, num_hidden_layers: Optional[int] = 40, num_attention_heads: Optional[int] = 32, num_key_value_heads: Optional[int] = 2, head_dim: Optional[int] = 128, hidden_act: Optional[str] = "silu", attention_dropout: Optional[float] = 0.0, max_position_embeddings: Optional[int] = 131072, initializer_range: Optional[float] = 0.02, rms_norm_eps: Optional[float] = 0.00000015625, use_cache: Optional[bool] = True, tie_word_embeddings: Optional[bool] = False, rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None, pad_token_id: Optional[int] = 151329, eos_token_id: Optional[list[int]] = [151329, 151336, 151338], bos_token_id: Optional[int] = None, attention_bias: Optional[bool] = True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.rope_parameters = rope_parameters kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Glm4Config"]
Glm4Config
python
scrapy__scrapy
tests/test_request_dict.py
{ "start": 6211, "end": 6328 }
class ____: def __mixin_callback(self, response): # pylint: disable=unused-private-member pass
SpiderMixin
python
huggingface__transformers
src/transformers/models/esm/modeling_esmfold.py
{ "start": 9511, "end": 10665 }
class ____(nn.Module): def __init__(self, c_in, eps=1e-5): super().__init__() self.c_in = (c_in,) self.eps = eps self.weight = nn.Parameter(torch.ones(c_in)) self.bias = nn.Parameter(torch.zeros(c_in)) def forward(self, x): d = x.dtype if d is torch.bfloat16 and not is_deepspeed_initialized(): with torch.autocast(device_type="cuda", enabled=False): out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps) else: out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps) return out @torch.jit.ignore def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor: """ Softmax, but without automatic casting to fp32 when the input is of type bfloat16 """ d = t.dtype if d is torch.bfloat16 and not is_deepspeed_initialized(): with torch.autocast(device_type="cuda", enabled=False): s = torch.nn.functional.softmax(t, dim=dim) else: s = torch.nn.functional.softmax(t, dim=dim) return s
EsmFoldLayerNorm
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/raiser/package.py
{ "start": 234, "end": 1218 }
class ____(Package): """A package that can raise a built-in exception of any kind with any message """ homepage = "http://www.example.com" url = "http://www.example.com/a-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") version("2.0", md5="abcdef0123456789abcdef0123456789") variant( "exc_type", values=lambda x: isinstance(x, str), default="RuntimeError", description="type of the exception to be raised", multi=False, ) variant( "msg", values=lambda x: isinstance(x, str), default="Unknown Exception", description="message that will be tied to the exception", multi=False, ) def install(self, spec, prefix): print("Raiser will raise ") exc_typename = self.spec.variants["exc_type"].value exc_type = getattr(builtins, exc_typename) msg = self.spec.variants["msg"].value raise exc_type(msg)
Raiser
python
tornadoweb__tornado
tornado/test/auth_test.py
{ "start": 5656, "end": 5749 }
class ____(RequestHandler): def get(self): self.write("{}")
FacebookServerMeHandler
python
huggingface__transformers
src/transformers/models/deepseek_v3/modular_deepseek_v3.py
{ "start": 3891, "end": 4052 }
class ____(MixtralExperts): def __init__(self, config): super().__init__(config) self.num_experts = config.num_local_experts
DeepseekV3NaiveMoe
python
google__pytype
pytype/analyze.py
{ "start": 639, "end": 4433 }
class ____: context: context.Context ast: pytd.TypeDeclUnit | None ast_deps: pytd.TypeDeclUnit | None def check_types( src, options, loader, init_maximum_depth=INIT_MAXIMUM_DEPTH, maximum_depth=None, ): """Verify the Python code.""" ctx = context.Context(options, loader, src=src) loc, defs = ctx.vm.run_program(src, options.input, init_maximum_depth) snapshotter = metrics.get_metric("memory", metrics.Snapshot) snapshotter.take_snapshot("analyze:check_types:tracer") if maximum_depth is None: maximum_depth = ( QUICK_CHECK_MAXIMUM_DEPTH if options.quick else MAXIMUM_DEPTH ) ctx.vm.analyze(loc, defs, maximum_depth=maximum_depth) snapshotter.take_snapshot("analyze:check_types:post") _maybe_output_debug(options, ctx.program) return Analysis(ctx, None, None) def infer_types( src, options, loader, init_maximum_depth=INIT_MAXIMUM_DEPTH, maximum_depth=None, ): """Given Python source return its types. Args: src: A string containing Python source code. options: config.Options object loader: A load_pytd.Loader instance to load PYI information. init_maximum_depth: Depth of analysis during module loading. maximum_depth: Depth of the analysis. Default: unlimited. Returns: A tuple of (ast: TypeDeclUnit, builtins: TypeDeclUnit) Raises: AssertionError: In case of a bad parameter combination. """ ctx = context.Context(options, loader, src=src) loc, defs = ctx.vm.run_program(src, options.input, init_maximum_depth) log.info("===Done running definitions and module-level code===") snapshotter = metrics.get_metric("memory", metrics.Snapshot) snapshotter.take_snapshot("analyze:infer_types:tracer") if maximum_depth is None: if not options.quick: maximum_depth = MAXIMUM_DEPTH elif options.analyze_annotated: # Since there's no point in analyzing annotated functions for inference, # the presence of this option means that the user wants checking, too. maximum_depth = QUICK_CHECK_MAXIMUM_DEPTH else: maximum_depth = QUICK_INFER_MAXIMUM_DEPTH ctx.exitpoint = ctx.vm.analyze(loc, defs, maximum_depth) snapshotter.take_snapshot("analyze:infer_types:post") ast = ctx.vm.compute_types(defs) ast = ctx.loader.resolve_ast(ast) if ctx.vm.has_unknown_wildcard_imports or any( a in defs for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS ): if "__getattr__" not in ast: ast = pytd_utils.Concat(ast, ctx.loader.get_default_ast()) # If merged with other if statement, triggers a ValueError: Unresolved class # when attempts to load from the protocols file if options.protocols: protocols_pytd = ctx.loader.import_name("protocols") else: protocols_pytd = None deps_pytd = ctx.loader.concat_all() # Insert type parameters, where appropriate ast = ast.Visit(visitors.CreateTypeParametersForSignatures()) if options.protocols: log.info( "=========== PyTD to solve =============\n%s", pytd_utils.Print(ast) ) ast = convert_structural.convert_pytd(ast, deps_pytd, protocols_pytd) else: log.info("Solving is turned off. Discarding call traces.") # Rename remaining "~unknown" to "?" ast = ast.Visit(visitors.RemoveUnknownClasses()) # Remove "~list" etc.: ast = convert_structural.extract_local(ast) _maybe_output_debug(options, ctx.program) return Analysis(ctx, ast, deps_pytd) def _maybe_output_debug(options, program): """Maybe emit debugging output.""" if options.output_debug: text = debug.program_to_text(program) if options.output_debug == "-": log.info("=========== Program Dump =============\n%s", text) else: with options.open_function(options.output_debug, "w") as fi: fi.write(text)
Analysis
python
ray-project__ray
rllib/algorithms/ppo/ppo.py
{ "start": 1899, "end": 14444 }
class ____(AlgorithmConfig): """Defines a configuration class from which a PPO Algorithm can be built. .. testcode:: from ray.rllib.algorithms.ppo import PPOConfig config = PPOConfig() config.environment("CartPole-v1") config.env_runners(num_env_runners=1) config.training( gamma=0.9, lr=0.01, kl_coeff=0.3, train_batch_size_per_learner=256 ) # Build a Algorithm object from the config and run 1 training iteration. algo = config.build() algo.train() .. testcode:: from ray.rllib.algorithms.ppo import PPOConfig from ray import tune config = ( PPOConfig() # Set the config object's env. .environment(env="CartPole-v1") # Update the config object's training parameters. .training( lr=0.001, clip_param=0.2 ) ) tune.Tuner( "PPO", run_config=tune.RunConfig(stop={"training_iteration": 1}), param_space=config, ).fit() .. testoutput:: :hide: ... """ def __init__(self, algo_class=None): """Initializes a PPOConfig instance.""" self.exploration_config = { # The Exploration class to use. In the simplest case, this is the name # (str) of any class present in the `rllib.utils.exploration` package. # You can also provide the python class directly or the full location # of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy. # EpsilonGreedy"). "type": "StochasticSampling", # Add constructor kwargs here (if any). } super().__init__(algo_class=algo_class or PPO) # fmt: off # __sphinx_doc_begin__ self.lr = 5e-5 self.rollout_fragment_length = "auto" self.train_batch_size = 4000 # PPO specific settings: self.use_critic = True self.use_gae = True self.num_epochs = 30 self.minibatch_size = 128 self.shuffle_batch_per_epoch = True self.lambda_ = 1.0 self.use_kl_loss = True self.kl_coeff = 0.2 self.kl_target = 0.01 self.vf_loss_coeff = 1.0 self.entropy_coeff = 0.0 self.clip_param = 0.3 self.vf_clip_param = 10.0 self.grad_clip = None # Override some of AlgorithmConfig's default values with PPO-specific values. self.num_env_runners = 2 # __sphinx_doc_end__ # fmt: on self.model["vf_share_layers"] = False # @OldAPIStack self.entropy_coeff_schedule = None # @OldAPIStack self.lr_schedule = None # @OldAPIStack # Deprecated keys. self.sgd_minibatch_size = DEPRECATED_VALUE self.vf_share_layers = DEPRECATED_VALUE @override(AlgorithmConfig) def get_default_rl_module_spec(self) -> RLModuleSpec: if self.framework_str == "torch": from ray.rllib.algorithms.ppo.torch.default_ppo_torch_rl_module import ( DefaultPPOTorchRLModule, ) return RLModuleSpec(module_class=DefaultPPOTorchRLModule) else: raise ValueError( f"The framework {self.framework_str} is not supported. " "Use either 'torch' or 'tf2'." ) @override(AlgorithmConfig) def get_default_learner_class(self) -> Union[Type["Learner"], str]: if self.framework_str == "torch": from ray.rllib.algorithms.ppo.torch.ppo_torch_learner import ( PPOTorchLearner, ) return PPOTorchLearner elif self.framework_str in ["tf2", "tf"]: raise ValueError( "TensorFlow is no longer supported on the new API stack! " "Use `framework='torch'`." ) else: raise ValueError( f"The framework {self.framework_str} is not supported. " "Use `framework='torch'`." ) @override(AlgorithmConfig) def training( self, *, use_critic: Optional[bool] = NotProvided, use_gae: Optional[bool] = NotProvided, lambda_: Optional[float] = NotProvided, use_kl_loss: Optional[bool] = NotProvided, kl_coeff: Optional[float] = NotProvided, kl_target: Optional[float] = NotProvided, vf_loss_coeff: Optional[float] = NotProvided, entropy_coeff: Optional[float] = NotProvided, entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, clip_param: Optional[float] = NotProvided, vf_clip_param: Optional[float] = NotProvided, grad_clip: Optional[float] = NotProvided, # @OldAPIStack lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, # Deprecated. vf_share_layers=DEPRECATED_VALUE, **kwargs, ) -> Self: """Sets the training related configuration. Args: use_critic: Should use a critic as a baseline (otherwise don't use value baseline; required for using GAE). use_gae: If true, use the Generalized Advantage Estimator (GAE) with a value function, see https://arxiv.org/pdf/1506.02438.pdf. lambda_: The lambda parameter for General Advantage Estimation (GAE). Defines the exponential weight used between actually measured rewards vs value function estimates over multiple time steps. Specifically, `lambda_` balances short-term, low-variance estimates against long-term, high-variance returns. A `lambda_` of 0.0 makes the GAE rely only on immediate rewards (and vf predictions from there on, reducing variance, but increasing bias), while a `lambda_` of 1.0 only incorporates vf predictions at the truncation points of the given episodes or episode chunks (reducing bias but increasing variance). use_kl_loss: Whether to use the KL-term in the loss function. kl_coeff: Initial coefficient for KL divergence. kl_target: Target value for KL divergence. vf_loss_coeff: Coefficient of the value function loss. IMPORTANT: you must tune this if you set vf_share_layers=True inside your model's config. entropy_coeff: The entropy coefficient (float) or entropy coefficient schedule in the format of [[timestep, coeff-value], [timestep, coeff-value], ...] In case of a schedule, intermediary timesteps will be assigned to linearly interpolated coefficient values. A schedule config's first entry must start with timestep 0, i.e.: [[0, initial_value], [...]]. clip_param: The PPO clip parameter. vf_clip_param: Clip param for the value function. Note that this is sensitive to the scale of the rewards. If your expected V is large, increase this. grad_clip: If specified, clip the global norm of gradients by this amount. Returns: This updated AlgorithmConfig object. """ # Pass kwargs onto super's `training()` method. super().training(**kwargs) if use_critic is not NotProvided: self.use_critic = use_critic # TODO (Kourosh) This is experimental. # Don't forget to remove .use_critic from algorithm config. if use_gae is not NotProvided: self.use_gae = use_gae if lambda_ is not NotProvided: self.lambda_ = lambda_ if use_kl_loss is not NotProvided: self.use_kl_loss = use_kl_loss if kl_coeff is not NotProvided: self.kl_coeff = kl_coeff if kl_target is not NotProvided: self.kl_target = kl_target if vf_loss_coeff is not NotProvided: self.vf_loss_coeff = vf_loss_coeff if entropy_coeff is not NotProvided: self.entropy_coeff = entropy_coeff if clip_param is not NotProvided: self.clip_param = clip_param if vf_clip_param is not NotProvided: self.vf_clip_param = vf_clip_param if grad_clip is not NotProvided: self.grad_clip = grad_clip # TODO (sven): Remove these once new API stack is only option for PPO. if lr_schedule is not NotProvided: self.lr_schedule = lr_schedule if entropy_coeff_schedule is not NotProvided: self.entropy_coeff_schedule = entropy_coeff_schedule return self @override(AlgorithmConfig) def validate(self) -> None: # Call super's validation method. super().validate() # Synchronous sampling, on-policy/PPO algos -> Check mismatches between # `rollout_fragment_length` and `train_batch_size_per_learner` to avoid user # confusion. # TODO (sven): Make rollout_fragment_length a property and create a private # attribute to store (possibly) user provided value (or "auto") in. Deprecate # `self.get_rollout_fragment_length()`. self.validate_train_batch_size_vs_rollout_fragment_length() # SGD minibatch size must be smaller than train_batch_size (b/c # we subsample a batch of `minibatch_size` from the train-batch for # each `num_epochs`). if ( not self.enable_rl_module_and_learner and self.minibatch_size > self.train_batch_size ): self._value_error( f"`minibatch_size` ({self.minibatch_size}) must be <= " f"`train_batch_size` ({self.train_batch_size}). In PPO, the train batch" f" will be split into {self.minibatch_size} chunks, each of which " f"is iterated over (used for updating the policy) {self.num_epochs} " "times." ) elif self.enable_rl_module_and_learner: mbs = self.minibatch_size tbs = self.train_batch_size_per_learner or self.train_batch_size if isinstance(mbs, int) and isinstance(tbs, int) and mbs > tbs: self._value_error( f"`minibatch_size` ({mbs}) must be <= " f"`train_batch_size_per_learner` ({tbs}). In PPO, the train batch" f" will be split into {mbs} chunks, each of which is iterated over " f"(used for updating the policy) {self.num_epochs} times." ) # Episodes may only be truncated (and passed into PPO's # `postprocessing_fn`), iff generalized advantage estimation is used # (value function estimate at end of truncated episode to estimate # remaining value). if ( not self.in_evaluation and self.batch_mode == "truncate_episodes" and not self.use_gae ): self._value_error( "Episode truncation is not supported without a value " "function (to estimate the return at the end of the truncated" " trajectory). Consider setting " "batch_mode=complete_episodes." ) # New API stack checks. if self.enable_rl_module_and_learner: # `lr_schedule` checking. if self.lr_schedule is not None: self._value_error( "`lr_schedule` is deprecated and must be None! Use the " "`lr` setting to setup a schedule." ) if self.entropy_coeff_schedule is not None: self._value_error( "`entropy_coeff_schedule` is deprecated and must be None! Use the " "`entropy_coeff` setting to setup a schedule." ) Scheduler.validate( fixed_value_or_schedule=self.entropy_coeff, setting_name="entropy_coeff", description="entropy coefficient", ) if isinstance(self.entropy_coeff, float) and self.entropy_coeff < 0.0: self._value_error("`entropy_coeff` must be >= 0.0") @property @override(AlgorithmConfig) def _model_config_auto_includes(self) -> Dict[str, Any]: return super()._model_config_auto_includes | {"vf_share_layers": False}
PPOConfig
python
pennersr__django-allauth
allauth/headless/spec/views.py
{ "start": 1186, "end": 1315 }
class ____(TemplateView): def get_template_names(self): return [app_settings.SPECIFICATION_TEMPLATE_NAME]
OpenAPIHTMLView
python
pytorch__pytorch
test/inductor/test_torchinductor_strided_blocks.py
{ "start": 3390, "end": 6912 }
class ____(InductorTestCase): block_descriptor_constructor_str = "tl.make_block_ptr" def _discontiguous_tensor( self, view_size: tuple[int, ...], device: Union[torch.device, str] ) -> torch.Tensor: """ Create a padded tensor of the given size. The strides correspond to a tensor that is twice as large in each dimension. """ if isinstance(device, str): device = torch.device(device) full_size = tuple(2 * dim for dim in view_size) full = torch.randn(full_size).to(device) view = torch.as_strided(full, view_size, full.stride()) return view def _assert_pointwise_ndims(self, code, num_dims: int) -> None: pointwise_blocks = ["XBLOCK", "YBLOCK", "ZBLOCK"] return self._assert_tiling_ndims(code, pointwise_blocks, num_dims) def _assert_reduction_ndims(self, code, num_dims: int) -> None: reduction_blocks = ["R0_BLOCK", "R1_BLOCK"] return self._assert_tiling_ndims(code, reduction_blocks, num_dims) def _assert_tiling_ndims(self, code, blocks: list[str], num_dims: int) -> None: for expected_block in blocks[:num_dims]: self.assertIn(expected_block, code) for unexpected_block in blocks[num_dims:]: self.assertNotIn(unexpected_block, code) def _get_lines_containing_substr(self, code: str, substr: str) -> str: return "\n".join(line for line in code.split("\n") if substr in line) def _run_and_compare( self: InductorTestCase, func: Callable[..., Any], *args, compile_kwargs: Optional[dict] = None, expected_num_block_pointers: Optional[int] = None, expected_num_programs: int = 1, expected_num_triton_kernels: int = 1, config_patches: Optional[dict] = None, rtol: Optional[float] = None, atol: Optional[float] = None, ): """ Runs the module through Inductor, comparing to eager reference. """ if compile_kwargs is None: compile_kwargs = {} if config_patches is None: config_patches = {} def flatten_tensors(tensors): flat, spec = pytree.tree_flatten(tensors) return flat with config.patch(config_patches): compiled = torch.compile(func, backend="inductor", **compile_kwargs) result, code = run_and_get_code(compiled, *args) # Check numerical accuracy ref_tensors = flatten_tensors(func(*args)) actual_tensors = flatten_tensors(result) for ref, actual in zip(ref_tensors, actual_tensors): # Don't clobber the default tolerance values tol = { t: v for t, v in {"rtol": rtol, "atol": atol}.items() if v is not None } self.assertTrue(torch.allclose(ref, actual, **tol)) def count_code(substr: str, expected: Optional[int]): count = sum(prog.count(substr) for prog in code) if expected is not None: self.assertEqual(count, expected) # Check the code self.assertEqual(len(code), expected_num_programs) count_code("@triton.jit", expected_num_triton_kernels) count_code(self.block_descriptor_constructor_str, expected_num_block_pointers) # Verify that 1D shapes aren't being transposed for the TMA store. count_code("tl.trans", 0) return result, code @instantiate_parametrized_tests
BlockDescriptorTestBase
python
pytorch__pytorch
test/export/test_experimental.py
{ "start": 2012, "end": 31064 }
class ____(TestCase): def test_joint_basic(self) -> None: class Module(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(3, 3) self.loss = torch.nn.CrossEntropyLoss() def forward(self, x): return self.loss( self.linear(x).softmax(dim=0), torch.tensor([1.0, 0.0, 0.0]) ) m = Module() example_inputs = (torch.randn(3),) m(*example_inputs) with torch._export.config.patch(use_new_tracer_experimental=True): ep = torch.export.export(m, example_inputs, strict=True) joint_ep = _export_forward_backward(ep) self.assertExpectedInline( str(joint_ep.graph_module.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, c_lifted_tensor_0, x): view = torch.ops.aten.view.default(x, [1, 3]); x = None permute = torch.ops.aten.permute.default(p_linear_weight, [1, 0]); p_linear_weight = None addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None alias = torch.ops.aten.alias.default(_softmax) clone = torch.ops.aten.clone.default(c_lifted_tensor_0); c_lifted_tensor_0 = None _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None alias_1 = torch.ops.aten.alias.default(_log_softmax) mul = torch.ops.aten.mul.Tensor(_log_softmax, clone); _log_softmax = None sum_1 = torch.ops.aten.sum.dim_IntList(mul, []); mul = None neg = torch.ops.aten.neg.default(sum_1); sum_1 = None div = torch.ops.aten.div.Scalar(neg, 1); neg = None full_like = torch.ops.aten.full_like.default(div, 1, pin_memory = False, memory_format = torch.preserve_format) div_1 = torch.ops.aten.div.Scalar(full_like, 1); full_like = None neg_1 = torch.ops.aten.neg.default(div_1); div_1 = None expand = torch.ops.aten.expand.default(neg_1, [3]); neg_1 = None mul_1 = torch.ops.aten.mul.Tensor(expand, clone); expand = clone = None alias_2 = torch.ops.aten.alias.default(alias_1); alias_1 = None exp = torch.ops.aten.exp.default(alias_2); alias_2 = None sum_2 = torch.ops.aten.sum.dim_IntList(mul_1, [0], True) mul_2 = torch.ops.aten.mul.Tensor(exp, sum_2); exp = sum_2 = None sub = torch.ops.aten.sub.Tensor(mul_1, mul_2); mul_1 = mul_2 = None alias_3 = torch.ops.aten.alias.default(alias); alias = None mul_3 = torch.ops.aten.mul.Tensor(sub, alias_3); sub = None sum_3 = torch.ops.aten.sum.dim_IntList(mul_3, [0], True) mul_4 = torch.ops.aten.mul.Tensor(alias_3, sum_3); alias_3 = sum_3 = None sub_1 = torch.ops.aten.sub.Tensor(mul_3, mul_4); mul_3 = mul_4 = None view_2 = torch.ops.aten.view.default(sub_1, [1, 3]); sub_1 = None permute_1 = torch.ops.aten.permute.default(view_2, [1, 0]) mm = torch.ops.aten.mm.default(permute_1, view); permute_1 = view = None permute_2 = torch.ops.aten.permute.default(mm, [1, 0]); mm = None sum_4 = torch.ops.aten.sum.dim_IntList(view_2, [0], True); view_2 = None view_3 = torch.ops.aten.view.default(sum_4, [3]); sum_4 = None permute_3 = torch.ops.aten.permute.default(permute_2, [1, 0]); permute_2 = None return (div, permute_3, view_3)""", ) ep = joint_ep.run_decompositions() self.assertExpectedInline( str(ep.graph_module.code).strip(), """\ def forward(self, p_linear_weight, p_linear_bias, c_lifted_tensor_0, x): view = torch.ops.aten.view.default(x, [1, 3]); x = None permute = torch.ops.aten.permute.default(p_linear_weight, [1, 0]); p_linear_weight = None addmm = torch.ops.aten.addmm.default(p_linear_bias, view, permute); p_linear_bias = permute = None view_1 = torch.ops.aten.view.default(addmm, [3]); addmm = None _softmax = torch.ops.aten._softmax.default(view_1, 0, False); view_1 = None alias = torch.ops.aten.alias.default(_softmax) clone = torch.ops.aten.clone.default(c_lifted_tensor_0); c_lifted_tensor_0 = None _log_softmax = torch.ops.aten._log_softmax.default(_softmax, 0, False); _softmax = None alias_1 = torch.ops.aten.alias.default(_log_softmax) mul = torch.ops.aten.mul.Tensor(_log_softmax, clone); _log_softmax = None sum_1 = torch.ops.aten.sum.dim_IntList(mul, []); mul = None neg = torch.ops.aten.neg.default(sum_1); sum_1 = None div = torch.ops.aten.div.Scalar(neg, 1); neg = None full_like = torch.ops.aten.full_like.default(div, 1, pin_memory = False, memory_format = torch.preserve_format) div_1 = torch.ops.aten.div.Scalar(full_like, 1); full_like = None neg_1 = torch.ops.aten.neg.default(div_1); div_1 = None expand = torch.ops.aten.expand.default(neg_1, [3]); neg_1 = None mul_1 = torch.ops.aten.mul.Tensor(expand, clone); expand = clone = None alias_2 = torch.ops.aten.alias.default(alias_1); alias_1 = None exp = torch.ops.aten.exp.default(alias_2); alias_2 = None sum_2 = torch.ops.aten.sum.dim_IntList(mul_1, [0], True) mul_2 = torch.ops.aten.mul.Tensor(exp, sum_2); exp = sum_2 = None sub = torch.ops.aten.sub.Tensor(mul_1, mul_2); mul_1 = mul_2 = None alias_3 = torch.ops.aten.alias.default(alias); alias = None mul_3 = torch.ops.aten.mul.Tensor(sub, alias_3); sub = None sum_3 = torch.ops.aten.sum.dim_IntList(mul_3, [0], True) mul_4 = torch.ops.aten.mul.Tensor(alias_3, sum_3); alias_3 = sum_3 = None sub_1 = torch.ops.aten.sub.Tensor(mul_3, mul_4); mul_3 = mul_4 = None view_2 = torch.ops.aten.view.default(sub_1, [1, 3]); sub_1 = None permute_1 = torch.ops.aten.permute.default(view_2, [1, 0]) mm = torch.ops.aten.mm.default(permute_1, view); permute_1 = view = None permute_2 = torch.ops.aten.permute.default(mm, [1, 0]); mm = None sum_4 = torch.ops.aten.sum.dim_IntList(view_2, [0], True); view_2 = None view_3 = torch.ops.aten.view.default(sum_4, [3]); sum_4 = None permute_3 = torch.ops.aten.permute.default(permute_2, [1, 0]); permute_2 = None return (div, permute_3, view_3)""", ) def test_joint_dynamic(self) -> None: from torch.export import Dim class Module(torch.nn.Module): def __init__(self) -> None: super().__init__() self.y = torch.nn.Parameter(torch.randn(3)) def forward(self, x): x = torch.ones(x.shape[0], 3) return (self.y + x).sum() m = Module() example_inputs = (torch.randn(3),) m(*example_inputs) ep = torch.export.export( m, example_inputs, dynamic_shapes={"x": {0: Dim("x0")}}, strict=True ) _export_forward_backward(ep) def test_joint_cifar10_backwards(self) -> None: import torch.nn as nn import torch.nn.functional as F # From Pytorch's CIFAR10 example: # https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.loss = nn.CrossEntropyLoss() def forward(self, x, labels): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) # flatten all dimensions except batch x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return self.loss(x, labels) net = Net() x = torch.randn(4, 3, 32, 32) labels = torch.ones(4, dtype=torch.int64) inputs = (x, labels) ep = export(net, inputs, strict=True) ep = _export_forward_backward(ep) def test_joint_loss_index(self): class Foo(torch.nn.Module): def __init__(self, index): super().__init__() self.l = torch.nn.Linear(4, 4) self.index = index def forward(self, x): x = self.l(x) x = x.sum() if self.index == 0: return x, -x.detach() else: return x.detach(), x inputs = (torch.randn(4, 4),) for i in [0, 1]: ep = export(Foo(i), inputs, strict=True) ep_joint = _export_forward_backward(ep, joint_loss_index=i) for j, spec in enumerate(ep_joint.graph_signature.output_specs): if i == j: self.assertTrue(spec.kind == OutputKind.LOSS_OUTPUT) else: self.assertTrue(spec.kind != OutputKind.LOSS_OUTPUT) def test_joint_buffer_input_mutations(self): class Foo(torch.nn.Module): def __init__(self): super().__init__() self.l = torch.nn.Linear(4, 4) self.register_buffer("buf", torch.randn(4)) self.loss = torch.nn.CrossEntropyLoss() def forward(self, x, label): x.add_(self.buf) x = self.l(x) self.buf.add_(2.0) return self.loss(x, label) inputs = ( torch.randn(4, 4), torch.randint(0, 4, (4,)), ) ep = export(Foo(), inputs) ep_joint = _export_forward_backward(ep) self.assertEqual(len(ep_joint.graph_signature.output_specs), 5) self.assertEqual( ep_joint.graph_signature.output_specs[0].kind, OutputKind.BUFFER_MUTATION, ) self.assertEqual( ep_joint.graph_signature.output_specs[0].target, "buf", ) self.assertEqual( ep_joint.graph_signature.output_specs[1].kind, OutputKind.USER_INPUT_MUTATION, ) self.assertEqual( ep_joint.graph_signature.output_specs[1].target, "x", ) self.assertEqual( ep_joint.graph_signature.output_specs[2].kind, OutputKind.LOSS_OUTPUT, ) def test_sticky_export(self): class Model(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): return self.linear(x) class Pipeline: def __init__(self, model): self.model = model def generate(self, *args, **kwargs): return self.model(*args, **kwargs) inp = torch.randn(4, 4) p = Pipeline(Model()) orig_forward = p.model.forward p.model.forward = _sticky_export(p.model.forward) res = p.generate(inp) p.model.forward = orig_forward res2 = p.generate(inp) self.assertTrue(torch.allclose(res, res2)) def test_sticky_export_dynamic(self): class Model(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, x): if x.shape[0] < 5: return self.linear(x) return x.sin() class Pipeline: def __init__(self, model): self.model = model def generate(self, *args, **kwargs): return self.model(*args, **kwargs) inp = torch.randn(4, 4) def callback(*args, **kwargs): # I think it is bit weird to use the forward arg name here, so # lets just use ShapeCollections flat_args, _ = torch.utils._pytree.tree_flatten((args, kwargs)) collections = torch.export.ShapesCollection() for arg in flat_args: if isinstance(arg, torch.Tensor): collections[arg] = { i: torch.export.Dim.AUTO for i in range(len(arg.shape)) } return collections p = Pipeline(Model()) p.model.forward = _sticky_export( p.model.forward, dynamic_shapes_callback=callback ) _ = p.generate(inp) self.assertExpectedInline( str(p.model.forward._exported_artifact.code).strip(), """\ def forward(self, x): x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) linear_weight = self.linear.weight linear_bias = self.linear.bias _guards_fn = self._guards_fn(x); _guards_fn = None linear = torch.ops.aten.linear.default(x, linear_weight, linear_bias); x = linear_weight = linear_bias = None return pytree.tree_unflatten((linear,), self._out_spec)""", ) def test_sticky_export_nested_inp(self): class Model(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(4, 4) def forward(self, *, inputs): return self.linear(inputs[0]) + self.linear(inputs[1]) class Pipeline: def __init__(self, model): self.model = model def generate(self, *, input_tensor, input_tensor2): inputs = [input_tensor, input_tensor2] return self.model(inputs=inputs) inp = torch.randn(4, 4) inp2 = torch.randn(4, 4) p = Pipeline(Model()) orig_forward = p.model.forward p.model.forward = _sticky_export(p.model.forward) res = p.generate(input_tensor=inp, input_tensor2=inp2) p.model.forward = orig_forward res2 = p.generate(input_tensor=inp, input_tensor2=inp2) self.assertTrue(torch.allclose(res, res2)) def test_side_effect(self): global_env = [] class Foo(torch.nn.Module): def forward(self, x): global_env.append(x) return x.sin() with torch._dynamo.config.patch(replay_side_effects=False): _ = dynamo_graph_capture_for_export(Foo())(torch.randn(4, 4)) self.assertEqual(len(global_env), 0) def test_export_add_in_out_info(self): class Foo(torch.nn.Module): def forward(self, dct, lst, bleh): x = dct["a"] * lst[1][0] y = dct["b"] * lst[0] out_dict = {} # Mutate and get a new entry in there lst_copy = lst.copy() lst_copy.append(lst[0]) out_dict["a"] = x out_dict["b"] = y return ( dct["a"], out_dict["b"], bleh, lst_copy[-1], out_dict["a"], [5, 6], ) dct = {"a": torch.randn(2, 3), "b": torch.randn(2, 3)} lst = [torch.randn(2, 3), [torch.randn(2, 3), torch.randn(2, 3)]] export_inputs = ((dct, lst, 56), {}) eager_inputs = copy.deepcopy(export_inputs) from torch._dynamo.functional_export import dynamo_graph_capture_for_export graph_module = dynamo_graph_capture_for_export(Foo())( *export_inputs[0], **export_inputs[1] ) res_export = graph_module(*export_inputs[0], **export_inputs[1]) res_eager = Foo()(*eager_inputs[0], **eager_inputs[1]) self.assertEqual(res_export, res_eager) def test_export_leaf(self): class Foo(torch.nn.Module): def forward(self, x): return x.sin() export_inputs = ((torch.randn(4, 4),), {}) eager_inputs = copy.deepcopy(export_inputs) from torch._dynamo.functional_export import dynamo_graph_capture_for_export graph_module = dynamo_graph_capture_for_export(Foo())( *export_inputs[0], **export_inputs[1] ) res_export = graph_module(*export_inputs[0], **export_inputs[1]) res_eager = Foo()(*eager_inputs[0], **eager_inputs[1]) self.assertEqual(res_export, res_eager) def test_dynamo_graph_capture(self): class Foo(torch.nn.Module): def forward(self, dct, lst, bleh): x = dct["a"] * lst[1][0] y = dct["b"] * lst[0] out_dict = {} # Mutate and get a new entry in there lst_copy = lst.copy() lst_copy.append(lst[0]) out_dict["a"] = x out_dict["b"] = y return ( dct["a"], out_dict["b"], bleh, lst_copy[-1], out_dict["a"], [5, 6], ) foo = Foo() def make_inputs(): return ( {"a": torch.randn(2, 3), "b": torch.randn(2, 3)}, [torch.randn(2, 3), (torch.randn(2, 3),)], torch.randn(2, 3), ) trace_inputs = make_inputs() gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs() self.assertEqual(gm(*test_inputs), foo(*test_inputs)) def test_dynamo_graph_capture_with_call_override(self): class _InterestingModule(torch.nn.Module): def __init__(self, module): super().__init__() self._module = module def __call__(self, *args, **kwargs): return self._module(*args, **kwargs) class MyModel(torch.nn.Module): def forward(self, x): return x + 1 foo = _InterestingModule(MyModel()) def make_inputs(): return (torch.randn(2, 3),) trace_inputs = make_inputs() gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs() self.assertEqual(gm(*test_inputs), foo(*test_inputs)) self.assertEqual(len(list(gm.buffers())), len(list(foo.buffers()))) self.assertEqual(len(list(gm.parameters())), len(list(foo.parameters()))) def test_dynamo_graph_capture_custom_pytree_type(self): import torch.utils._pytree as pytree @dataclass class Bar: x: torch.Tensor y: torch.Tensor class Foo(torch.nn.Module): def forward(self, bar: Bar): return bar.x + bar.y foo = Foo() def make_inputs(): return (Bar(torch.randn(2, 3), torch.randn(2, 3)),) pytree.register_dataclass(Bar) try: trace_inputs = make_inputs() gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs() self.assertExpectedInline( gm._in_shuffle_graph.code.strip("\r\n "), """\ def forward(self, arg0_1, arg1_1, arg2_1): return (arg1_1, arg2_1)""", ) self.assertExpectedInline( gm.code.strip("\r\n "), """\ def forward(self, args_0): _tree_leaf_0, _tree_leaf_1, _tree_leaf_2, = pytree.tree_leaves((self, args_0,)) L_bar_x , L_bar_y , = self._in_shuffle_graph(_tree_leaf_0, _tree_leaf_1, _tree_leaf_2) l_bar_x = L_bar_x l_bar_y = L_bar_y add = l_bar_x + l_bar_y; l_bar_x = l_bar_y = None return pytree.tree_unflatten(self._out_shuffle_graph(_tree_leaf_0, _tree_leaf_1, _tree_leaf_2, add), self._out_spec)""", ) self.assertEqual(gm(*test_inputs), foo(*test_inputs)) finally: pytree._deregister_pytree_node(Bar) def test_dynamo_graph_capture_closure(self): from torch.export import Dim N = 3 outer = torch.randn(10, 32) class MyModel(torch.nn.Module): def forward(self, x): z = x + outer y = z[:-1, :] # [s0 - 1, 32] stacked = torch.stack([y] * N, dim=0) # [N * (s0 - 1), 32] reshaped = stacked.reshape(-1, N, 32) # [(s0 - 1), N, 32] return reshaped inps = (torch.randn(10, 32),) ep = dynamo_graph_capture_for_export(MyModel())(*inps) self.assertExpectedInline( ep._in_shuffle_graph.code.strip("\r\n "), """\ def forward(self, arg0_1, arg1_1): _tensor_constant0 = self._tensor_constant0 return (arg1_1, _tensor_constant0)""", ) self.assertExpectedInline( ep.code.strip("\r\n "), """\ def forward(self, args_0): _tree_leaf_0, _tree_leaf_1, = pytree.tree_leaves((self, args_0,)) L_x_ , L_outer_ , = self._in_shuffle_graph(_tree_leaf_0, _tree_leaf_1) l_x_ = L_x_ l_outer_ = L_outer_ z = l_x_ + l_outer_; l_x_ = l_outer_ = None y = z[(slice(None, -1, None), slice(None, None, None))]; z = None stacked = torch.stack([y, y, y], dim = 0); y = None reshaped = stacked.reshape(-1, 3, 32); stacked = None return pytree.tree_unflatten(self._out_shuffle_graph(_tree_leaf_0, _tree_leaf_1, reshaped), self._out_spec)""", ) self.assertEqual(ep(*inps), MyModel()(*inps)) def test_dynamo_graph_capture_full_tracing_context(self) -> None: class Foo(torch.nn.Module): def forward(self, x): return x + x.shape[0] foo = Foo() def make_inputs(b: int): ret = (torch.randn(b, 3),) torch._dynamo.mark_dynamic(ret[0], 0) return ret trace_inputs = make_inputs(2) gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs(3) self.assertEqual(gm(*test_inputs), foo(*test_inputs)) self.assertIsNotNone(gm.meta["tracing_context"].fake_mode) self.assertEqual(len(gm.meta["tracing_context"].tensor_to_context), 1) def test_dynamo_graph_capture_ctx_return(self): class Module(torch.nn.Module): def forward(self, x): with GlobalContext() as ctx: z = x + 1 ctx._tensors["6"] = x + 2 return z, ctx def make_inputs(): return (torch.randn(2, 3),) try: pytree.register_pytree_node( GlobalContext, lambda x: x.__flatten__(), GlobalContext.__unflatten__, ) mod = Module() gm = dynamo_graph_capture_for_export(mod)(*make_inputs()) test_inputs = make_inputs() actual_outputs = pytree.tree_leaves(gm(*test_inputs)) expected_outputs = pytree.tree_leaves(mod(*test_inputs)) self.assertEqual(actual_outputs, expected_outputs) finally: pytree._deregister_pytree_node(GlobalContext) def test_dynamo_graph_capture_dict_keys_getitem(self): class Module(torch.nn.Module): def forward(self, x): return x * 2 foo = Module() class BlockMask: def __init__(self, d): self.d = d block_mask = BlockMask(torch.randn(4)) def pre_hook_function(m, input): block_mask.d = input[0] + 1 return input # Return a tuple of modified inputs foo.register_forward_pre_hook(pre_hook_function) def make_inputs(): return (torch.randn(4),) trace_inputs = make_inputs() gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs() self.assertExpectedInline( gm.code.strip("\r\n "), """\ def forward(self, args_0): _tree_leaf_0, _tree_leaf_1, = pytree.tree_leaves((self, args_0,)) L_args_0_ , = self._in_shuffle_graph(_tree_leaf_0, _tree_leaf_1) l_args_0_ = L_args_0_ add = l_args_0_ + 1 mul = l_args_0_ * 2; l_args_0_ = None return pytree.tree_unflatten(self._out_shuffle_graph(_tree_leaf_0, _tree_leaf_1, mul, add), self._out_spec)""", ) self.assertEqual(gm(*test_inputs), foo(*test_inputs)) def test_dynamo_graph_capture_with_tensor_constant(self): outer = torch.randn(2, 3) class MyModel(torch.nn.Module): def forward(self, x): z = x + outer return z foo = MyModel() def make_inputs(): return (torch.randn(2, 3),) trace_inputs = make_inputs() gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) test_inputs = make_inputs() self.assertEqual(gm(*test_inputs), foo(*test_inputs)) self.assertEqual(len(list(gm.buffers())), len(list(foo.buffers()))) self.assertEqual(len(list(gm.parameters())), len(list(foo.parameters()))) def test_dynamo_graph_capture_side_effects(self): GLOBAL_LIST.clear() def foo(x): z = x + 1 GLOBAL_LIST.append(z) return z def make_inputs(): return (torch.randn(2, 3),) trace_inputs = make_inputs() with ( torch._dynamo.config.patch(replay_side_effects=False), warnings.catch_warnings(record=True) as w, ): gm = dynamo_graph_capture_for_export(foo)(*trace_inputs) cnt = 0 for entry in w: if "While compiling, we found certain side effects happened" in str( entry.message ): cnt += 1 self.assertEqual(cnt, 1) self.assertEqual(len(GLOBAL_LIST), 0) test_inputs = make_inputs() gm_results = gm(*test_inputs) self.assertEqual(len(GLOBAL_LIST), 0) self.assertEqual(gm_results, foo(*test_inputs)) self.assertEqual(len(GLOBAL_LIST), 1) @unittest.skipIf(not TEST_CUDA, "CUDA not available") def test_dynamo_graph_capture_fx_graph_annotate_overlap_pass(self): class DummyOp(torch.autograd.Function): @staticmethod def forward(ctx, x, scalar): ctx.save_for_backward(x) return x + scalar @staticmethod def backward(ctx, grad_out): return grad_out, None def mock_fw_compute(x): with fx_traceback.annotate({"compute": 0}): return DummyOp.apply(x, 10) def mock_bw_comm(x): with fx_traceback.annotate({"comm": 0}): return DummyOp.apply(x, 20) def mock_bw_compute(x): return DummyOp.apply(x, 30) class Model(torch.nn.Module): def forward(self, fw_in, bw_in): fw_out = mock_fw_compute(fw_in) # bw_in blocks bw_out bw_in = mock_bw_comm(bw_in) bw_out = mock_bw_compute(bw_in) return fw_out, bw_out def input_fn(): inputs = (torch.rand(2, 128, device="cuda", requires_grad=True),) grad_ins = (torch.rand(2, 128, device="cuda"),) return ( *inputs, *grad_ins, ) with torch.device("meta"): model = Model() import torch.fx.traceback as fx_traceback with fx_traceback.preserve_node_meta(): gm = dynamo_graph_capture_for_export(model)(*input_fn()) """ def forward(self, args_0, args_1): _tree_leaf_0, _tree_leaf_1, _tree_leaf_2, = pytree.tree_leaves((self, args_0, args_1,)) L_fw_in_ , L_bw_in_ , = self._in_shuffle_graph(_tree_leaf_0, _tree_leaf_1, _tree_leaf_2) l_fw_in_ = L_fw_in_ l_bw_in_ = L_bw_in_ fwd_body_0 = self.fwd_body_0 bwd_body_0 = self.bwd_body_0 fw_out = torch.ops.higher_order.autograd_function_apply(fwd_body_0, bwd_body_0, l_fw_in_, args_tensor_mask = [True, False], non_differentiable_idx = []); fwd_body_0 = bwd_body_0 = l_fw_in_ = None bw_in = l_bw_in_ + 20; l_bw_in_ = None bw_out = bw_in + 30; bw_in = None return pytree.tree_unflatten(self._out_shuffle_graph(_tree_leaf_0, _tree_leaf_1, _tree_leaf_2, fw_out, bw_out), self._out_spec) """ test_inputs = input_fn() self.assertEqual(gm(*test_inputs), model(*test_inputs)) def test_dynamo_graph_capture_default_args(self): class Module(torch.nn.Module): def forward(self, x, y=1): return x + y m = Module() ep = dynamo_graph_capture_for_export(m)(torch.randn(2, 3)) test_inputs = (torch.randn(2, 3),) self.assertEqual(ep(*test_inputs), m(*test_inputs)) if __name__ == "__main__": run_tests()
TestExperiment
python
modin-project__modin
modin/tests/config/docs_module_with_just_base/classes.py
{ "start": 784, "end": 914 }
class ____: def astype(): """This is a test of the documentation module for BasePandasDataSet.astype."""
BasePandasDataset
python
doocs__leetcode
solution/1000-1099/1058.Minimize Rounding Error to Meet Target/Solution.py
{ "start": 0, "end": 452 }
class ____: def minimizeError(self, prices: List[str], target: int) -> str: mi = 0 arr = [] for p in prices: p = float(p) mi += int(p) if d := p - int(p): arr.append(d) if not mi <= target <= mi + len(arr): return "-1" d = target - mi arr.sort(reverse=True) ans = d - sum(arr[:d]) + sum(arr[d:]) return f'{ans:.3f}'
Solution
python
PyCQA__pyflakes
pyflakes/test/test_other.py
{ "start": 170, "end": 26588 }
class ____(TestCase): def test_duplicateArgs(self): self.flakes('def fu(bar, bar): pass', m.DuplicateArgument) def test_localReferencedBeforeAssignment(self): self.flakes(''' a = 1 def f(): a; a=1 f() ''', m.UndefinedLocal, m.UnusedVariable) def test_redefinedInGenerator(self): """ Test that reusing a variable in a generator does not raise a warning. """ self.flakes(''' a = 1 (1 for a, b in [(1, 2)]) ''') self.flakes(''' class A: a = 1 list(1 for a, b in [(1, 2)]) ''') self.flakes(''' def f(): a = 1 (1 for a, b in [(1, 2)]) ''', m.UnusedVariable) self.flakes(''' (1 for a, b in [(1, 2)]) (1 for a, b in [(1, 2)]) ''') self.flakes(''' for a, b in [(1, 2)]: pass (1 for a, b in [(1, 2)]) ''') def test_redefinedInSetComprehension(self): """ Test that reusing a variable in a set comprehension does not raise a warning. """ self.flakes(''' a = 1 {1 for a, b in [(1, 2)]} ''') self.flakes(''' class A: a = 1 {1 for a, b in [(1, 2)]} ''') self.flakes(''' def f(): a = 1 {1 for a, b in [(1, 2)]} ''', m.UnusedVariable) self.flakes(''' {1 for a, b in [(1, 2)]} {1 for a, b in [(1, 2)]} ''') self.flakes(''' for a, b in [(1, 2)]: pass {1 for a, b in [(1, 2)]} ''') def test_redefinedInDictComprehension(self): """ Test that reusing a variable in a dict comprehension does not raise a warning. """ self.flakes(''' a = 1 {1: 42 for a, b in [(1, 2)]} ''') self.flakes(''' class A: a = 1 {1: 42 for a, b in [(1, 2)]} ''') self.flakes(''' def f(): a = 1 {1: 42 for a, b in [(1, 2)]} ''', m.UnusedVariable) self.flakes(''' {1: 42 for a, b in [(1, 2)]} {1: 42 for a, b in [(1, 2)]} ''') self.flakes(''' for a, b in [(1, 2)]: pass {1: 42 for a, b in [(1, 2)]} ''') def test_redefinedFunction(self): """ Test that shadowing a function definition with another one raises a warning. """ self.flakes(''' def a(): pass def a(): pass ''', m.RedefinedWhileUnused) def test_redefined_function_shadows_variable(self): self.flakes(''' x = 1 def x(): pass ''', m.RedefinedWhileUnused) def test_redefinedUnderscoreFunction(self): """ Test that shadowing a function definition named with underscore doesn't raise anything. """ self.flakes(''' def _(): pass def _(): pass ''') def test_redefinedUnderscoreImportation(self): """ Test that shadowing an underscore importation raises a warning. """ self.flakes(''' from .i18n import _ def _(): pass ''', m.RedefinedWhileUnused) def test_redefinedClassFunction(self): """ Test that shadowing a function definition in a class suite with another one raises a warning. """ self.flakes(''' class A: def a(): pass def a(): pass ''', m.RedefinedWhileUnused) def test_redefinedIfElseFunction(self): """ Test that shadowing a function definition twice in an if and else block does not raise a warning. """ self.flakes(''' if True: def a(): pass else: def a(): pass ''') def test_redefinedIfFunction(self): """ Test that shadowing a function definition within an if block raises a warning. """ self.flakes(''' if True: def a(): pass def a(): pass ''', m.RedefinedWhileUnused) def test_redefinedTryExceptFunction(self): """ Test that shadowing a function definition twice in try and except block does not raise a warning. """ self.flakes(''' try: def a(): pass except: def a(): pass ''') def test_redefinedTryFunction(self): """ Test that shadowing a function definition within a try block raises a warning. """ self.flakes(''' try: def a(): pass def a(): pass except: pass ''', m.RedefinedWhileUnused) def test_redefinedIfElseInListComp(self): """ Test that shadowing a variable in a list comprehension in an if and else block does not raise a warning. """ self.flakes(''' if False: a = 1 else: [a for a in '12'] ''') def test_functionDecorator(self): """ Test that shadowing a function definition with a decorated version of that function does not raise a warning. """ self.flakes(''' from somewhere import somedecorator def a(): pass a = somedecorator(a) ''') def test_classFunctionDecorator(self): """ Test that shadowing a function definition in a class suite with a decorated version of that function does not raise a warning. """ self.flakes(''' class A: def a(): pass a = classmethod(a) ''') def test_modernProperty(self): self.flakes(""" class A: @property def t(self): pass @t.setter def t(self, value): pass @t.deleter def t(self): pass """) def test_unaryPlus(self): """Don't die on unary +.""" self.flakes('+1') def test_undefinedBaseClass(self): """ If a name in the base list of a class definition is undefined, a warning is emitted. """ self.flakes(''' class foo(foo): pass ''', m.UndefinedName) def test_classNameUndefinedInClassBody(self): """ If a class name is used in the body of that class's definition and the name is not already defined, a warning is emitted. """ self.flakes(''' class foo: foo ''', m.UndefinedName) def test_classNameDefinedPreviously(self): """ If a class name is used in the body of that class's definition and the name was previously defined in some other way, no warning is emitted. """ self.flakes(''' foo = None class foo: foo ''') def test_classRedefinition(self): """ If a class is defined twice in the same module, a warning is emitted. """ self.flakes(''' class Foo: pass class Foo: pass ''', m.RedefinedWhileUnused) def test_functionRedefinedAsClass(self): """ If a function is redefined as a class, a warning is emitted. """ self.flakes(''' def Foo(): pass class Foo: pass ''', m.RedefinedWhileUnused) def test_classRedefinedAsFunction(self): """ If a class is redefined as a function, a warning is emitted. """ self.flakes(''' class Foo: pass def Foo(): pass ''', m.RedefinedWhileUnused) def test_classWithReturn(self): """ If a return is used inside a class, a warning is emitted. """ self.flakes(''' class Foo(object): return ''', m.ReturnOutsideFunction) def test_moduleWithReturn(self): """ If a return is used at the module level, a warning is emitted. """ self.flakes(''' return ''', m.ReturnOutsideFunction) def test_classWithYield(self): """ If a yield is used inside a class, a warning is emitted. """ self.flakes(''' class Foo(object): yield ''', m.YieldOutsideFunction) def test_moduleWithYield(self): """ If a yield is used at the module level, a warning is emitted. """ self.flakes(''' yield ''', m.YieldOutsideFunction) def test_classWithYieldFrom(self): """ If a yield from is used inside a class, a warning is emitted. """ self.flakes(''' class Foo(object): yield from range(10) ''', m.YieldOutsideFunction) def test_moduleWithYieldFrom(self): """ If a yield from is used at the module level, a warning is emitted. """ self.flakes(''' yield from range(10) ''', m.YieldOutsideFunction) def test_continueOutsideLoop(self): self.flakes(''' continue ''', m.ContinueOutsideLoop) self.flakes(''' def f(): continue ''', m.ContinueOutsideLoop) self.flakes(''' while True: pass else: continue ''', m.ContinueOutsideLoop) self.flakes(''' while True: pass else: if 1: if 2: continue ''', m.ContinueOutsideLoop) self.flakes(''' while True: def f(): continue ''', m.ContinueOutsideLoop) self.flakes(''' while True: class A: continue ''', m.ContinueOutsideLoop) def test_continueInsideLoop(self): self.flakes(''' while True: continue ''') self.flakes(''' for i in range(10): continue ''') self.flakes(''' while True: if 1: continue ''') self.flakes(''' for i in range(10): if 1: continue ''') self.flakes(''' while True: while True: pass else: continue else: pass ''') self.flakes(''' while True: try: pass finally: while True: continue ''') def test_breakOutsideLoop(self): self.flakes(''' break ''', m.BreakOutsideLoop) self.flakes(''' def f(): break ''', m.BreakOutsideLoop) self.flakes(''' while True: pass else: break ''', m.BreakOutsideLoop) self.flakes(''' while True: pass else: if 1: if 2: break ''', m.BreakOutsideLoop) self.flakes(''' while True: def f(): break ''', m.BreakOutsideLoop) self.flakes(''' while True: class A: break ''', m.BreakOutsideLoop) self.flakes(''' try: pass finally: break ''', m.BreakOutsideLoop) def test_breakInsideLoop(self): self.flakes(''' while True: break ''') self.flakes(''' for i in range(10): break ''') self.flakes(''' while True: if 1: break ''') self.flakes(''' for i in range(10): if 1: break ''') self.flakes(''' while True: while True: pass else: break else: pass ''') self.flakes(''' while True: try: pass finally: while True: break ''') self.flakes(''' while True: try: pass finally: break ''') self.flakes(''' while True: try: pass finally: if 1: if 2: break ''') def test_defaultExceptLast(self): """ A default except block should be last. YES: try: ... except Exception: ... except: ... NO: try: ... except: ... except Exception: ... """ self.flakes(''' try: pass except ValueError: pass ''') self.flakes(''' try: pass except ValueError: pass except: pass ''') self.flakes(''' try: pass except: pass ''') self.flakes(''' try: pass except ValueError: pass else: pass ''') self.flakes(''' try: pass except: pass else: pass ''') self.flakes(''' try: pass except ValueError: pass except: pass else: pass ''') def test_defaultExceptNotLast(self): self.flakes(''' try: pass except: pass except ValueError: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass except ValueError: pass ''', m.DefaultExceptNotLast, m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass else: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except: pass else: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass else: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass except ValueError: pass else: pass ''', m.DefaultExceptNotLast, m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass except ValueError: pass finally: pass ''', m.DefaultExceptNotLast, m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass else: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except: pass else: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass else: pass finally: pass ''', m.DefaultExceptNotLast) self.flakes(''' try: pass except: pass except ValueError: pass except: pass except ValueError: pass else: pass finally: pass ''', m.DefaultExceptNotLast, m.DefaultExceptNotLast) def test_starredAssignmentNoError(self): """ Python 3 extended iterable unpacking """ self.flakes(''' a, *b = range(10) ''') self.flakes(''' *a, b = range(10) ''') self.flakes(''' a, *b, c = range(10) ''') self.flakes(''' (a, *b) = range(10) ''') self.flakes(''' (*a, b) = range(10) ''') self.flakes(''' (a, *b, c) = range(10) ''') self.flakes(''' [a, *b] = range(10) ''') self.flakes(''' [*a, b] = range(10) ''') self.flakes(''' [a, *b, c] = range(10) ''') # Taken from test_unpack_ex.py in the cPython source s = ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \ ", *rest = range(1<<8)" self.flakes(s) s = "(" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \ ", *rest) = range(1<<8)" self.flakes(s) s = "[" + ", ".join("a%d" % i for i in range(1 << 8 - 1)) + \ ", *rest] = range(1<<8)" self.flakes(s) def test_starredAssignmentErrors(self): """ SyntaxErrors (not encoded in the ast) surrounding Python 3 extended iterable unpacking """ # Taken from test_unpack_ex.py in the cPython source s = ", ".join("a%d" % i for i in range(1 << 8)) + \ ", *rest = range(1<<8 + 1)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) s = "(" + ", ".join("a%d" % i for i in range(1 << 8)) + \ ", *rest) = range(1<<8 + 1)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) s = "[" + ", ".join("a%d" % i for i in range(1 << 8)) + \ ", *rest] = range(1<<8 + 1)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) s = ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \ ", *rest = range(1<<8 + 2)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) s = "(" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \ ", *rest) = range(1<<8 + 2)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) s = "[" + ", ".join("a%d" % i for i in range(1 << 8 + 1)) + \ ", *rest] = range(1<<8 + 2)" self.flakes(s, m.TooManyExpressionsInStarredAssignment) # No way we can actually test this! # s = "*rest, " + ", ".join("a%d" % i for i in range(1<<24)) + \ # ", *rest = range(1<<24 + 1)" # self.flakes(s, m.TooManyExpressionsInStarredAssignment) self.flakes(''' a, *b, *c = range(10) ''', m.TwoStarredExpressions) self.flakes(''' a, *b, c, *d = range(10) ''', m.TwoStarredExpressions) self.flakes(''' *a, *b, *c = range(10) ''', m.TwoStarredExpressions) self.flakes(''' (a, *b, *c) = range(10) ''', m.TwoStarredExpressions) self.flakes(''' (a, *b, c, *d) = range(10) ''', m.TwoStarredExpressions) self.flakes(''' (*a, *b, *c) = range(10) ''', m.TwoStarredExpressions) self.flakes(''' [a, *b, *c] = range(10) ''', m.TwoStarredExpressions) self.flakes(''' [a, *b, c, *d] = range(10) ''', m.TwoStarredExpressions) self.flakes(''' [*a, *b, *c] = range(10) ''', m.TwoStarredExpressions) @skip("todo: Too hard to make this warn but other cases stay silent") def test_doubleAssignment(self): """ If a variable is re-assigned to without being used, no warning is emitted. """ self.flakes(''' x = 10 x = 20 ''', m.RedefinedWhileUnused) def test_doubleAssignmentConditionally(self): """ If a variable is re-assigned within a conditional, no warning is emitted. """ self.flakes(''' x = 10 if True: x = 20 ''') def test_doubleAssignmentWithUse(self): """ If a variable is re-assigned to after being used, no warning is emitted. """ self.flakes(''' x = 10 y = x * 2 x = 20 ''') def test_comparison(self): """ If a defined name is used on either side of any of the six comparison operators, no warning is emitted. """ self.flakes(''' x = 10 y = 20 x < y x <= y x == y x != y x >= y x > y ''') def test_identity(self): """ If a defined name is used on either side of an identity test, no warning is emitted. """ self.flakes(''' x = 10 y = 20 x is y x is not y ''') def test_containment(self): """ If a defined name is used on either side of a containment test, no warning is emitted. """ self.flakes(''' x = 10 y = 20 x in y x not in y ''') def test_loopControl(self): """ break and continue statements are supported. """ self.flakes(''' for x in [1, 2]: break ''') self.flakes(''' for x in [1, 2]: continue ''') def test_ellipsis(self): """ Ellipsis in a slice is supported. """ self.flakes(''' [1, 2][...] ''') def test_extendedSlice(self): """ Extended slices are supported. """ self.flakes(''' x = 3 [1, 2][x,:] ''') def test_varAugmentedAssignment(self): """ Augmented assignment of a variable is supported. We don't care about var refs. """ self.flakes(''' foo = 0 foo += 1 ''') def test_attrAugmentedAssignment(self): """ Augmented assignment of attributes is supported. We don't care about attr refs. """ self.flakes(''' foo = None foo.bar += foo.baz ''') def test_globalDeclaredInDifferentScope(self): """ A 'global' can be declared in one scope and reused in another. """ self.flakes(''' def f(): global foo def g(): foo = 'anything'; foo.is_used() ''', m.UnusedIndirectAssignment) def test_unused_global_statement(self): self.flakes(''' g = 0 def f1(): global g g = 1 def f2(): global g # this is unused! return g ''', m.UnusedIndirectAssignment) def test_unused_nonlocal_statement(self): self.flakes(''' def f(): x = 1 def set_x(): nonlocal x x = 2 def get_x(): nonlocal x return x set_x() return get_x() ''', m.UnusedIndirectAssignment) def test_unused_global_statement_not_marked_as_used_by_nested_scope(self): self.flakes(''' g = 0 def f(): global g def f2(): g = 2 ''', m.UnusedIndirectAssignment, m.UnusedVariable) def test_global_nonlocal_in_class_bodies(self): self.flakes(''' g = 0 class C: global g g = 1 def f(): nl = 0 class C: nonlocal nl nl = 1 ''') def test_unused_global_in_class(self): self.flakes(''' g = 0 class C: global g u = g ''', m.UnusedIndirectAssignment) def test_unused_nonlocal_in_clas(self): self.flakes(''' def f(): nl = 1 class C: nonlocal nl u = nl ''', m.UnusedIndirectAssignment) def test_function_arguments(self): """ Test to traverse ARG and ARGUMENT handler """ self.flakes(''' def foo(a, b): pass ''') self.flakes(''' def foo(a, b, c=0): pass ''') self.flakes(''' def foo(a, b, c=0, *args): pass ''') self.flakes(''' def foo(a, b, c=0, *args, **kwargs): pass ''') def test_function_arguments_python3(self): self.flakes(''' def foo(a, b, c=0, *args, d=0, **kwargs): pass ''')
Test
python
PyCQA__pylint
tests/functional/t/typing_generic.py
{ "start": 591, "end": 627 }
class ____(A[MoreSpecific]): pass
B
python
getsentry__sentry
src/sentry/integrations/api/serializers/models/integration.py
{ "start": 799, "end": 1626 }
class ____(TypedDict): id: str name: str icon: str | None domainName: str | None accountType: str | None scopes: list[str] | None status: str provider: Any configOrganization: Any configData: Any externalId: str organizationId: int organizationIntegrationStatus: str gracePeriodEnd: str | None # converts the provider to JSON def serialize_provider(provider: IntegrationProvider) -> Mapping[str, Any]: return { "key": provider.key, "slug": provider.key, "name": provider.name, "canAdd": provider.can_add, "canDisable": provider.can_disable, "features": sorted(f.value for f in provider.features), "aspects": getattr(provider.metadata, "aspects", {}), } @register(Integration)
OrganizationIntegrationResponse
python
python__mypy
mypy/types.py
{ "start": 17523, "end": 17922 }
class ____(Type): """ReadOnly[T] Only usable at top-level of a TypedDict definition.""" def __init__(self, item: Type) -> None: super().__init__(line=item.line, column=item.column) self.item = item def __repr__(self) -> str: return f"ReadOnly[{self.item}]" def accept(self, visitor: TypeVisitor[T]) -> T: return self.item.accept(visitor)
ReadOnlyType
python
realpython__materials
hashtable/03_autoresize/hashtable.py
{ "start": 127, "end": 3302 }
class ____: @classmethod def from_dict(cls, dictionary, capacity=None): hash_table = cls(capacity or len(dictionary)) for key, value in dictionary.items(): hash_table[key] = value return hash_table def __init__(self, capacity=8): if capacity < 1: raise ValueError("Capacity must be a positive number") self._slots = capacity * [None] def __len__(self): return len(self.pairs) def __iter__(self): yield from self.keys def __delitem__(self, key): for index, pair in self._probe(key): if pair is None: raise KeyError(key) if pair is DELETED: continue if pair.key == key: self._slots[index] = DELETED break else: raise KeyError(key) def __setitem__(self, key, value): for index, pair in self._probe(key): if pair is DELETED: continue if pair is None or pair.key == key: self._slots[index] = Pair(key, value) break else: self._resize_and_rehash() self[key] = value def __getitem__(self, key): for _, pair in self._probe(key): if pair is None: raise KeyError(key) if pair is DELETED: continue if pair.key == key: return pair.value raise KeyError(key) def __contains__(self, key): try: self[key] except KeyError: return False else: return True def __eq__(self, other): if self is other: return True if type(self) is not type(other): return False return set(self.pairs) == set(other.pairs) def __str__(self): pairs = [] for key, value in self.pairs: pairs.append(f"{key!r}: {value!r}") return "{" + ", ".join(pairs) + "}" def __repr__(self): cls = self.__class__.__name__ return f"{cls}.from_dict({str(self)})" def copy(self): return HashTable.from_dict(dict(self.pairs), self.capacity) def get(self, key, default=None): try: return self[key] except KeyError: return default @property def pairs(self): return {pair for pair in self._slots if pair not in (None, DELETED)} @property def values(self): return [pair.value for pair in self.pairs] @property def keys(self): return {pair.key for pair in self.pairs} @property def capacity(self): return len(self._slots) def _index(self, key): return hash(key) % self.capacity def _probe(self, key): index = self._index(key) for _ in range(self.capacity): yield index, self._slots[index] index = (index + 1) % self.capacity def _resize_and_rehash(self): copy = HashTable(capacity=self.capacity * 2) for key, value in self.pairs: copy[key] = value self._slots = copy._slots
HashTable
python
cython__cython
Cython/Compiler/Tests/TestFlowControl.py
{ "start": 204, "end": 356 }
class ____: pos = ('filename.pyx', 1, 2) cf_state = None type = FakeType() def infer_type(self, scope): return self.type
FakeNode
python
fluentpython__example-code-2e
18-with-match/lispy/py3.10/lis.py
{ "start": 1746, "end": 5403 }
class ____(ChainMap[Symbol, Any]): "A ChainMap that allows changing an item in-place." def change(self, key: Symbol, value: Any) -> None: "Find where key is defined and change the value there." for map in self.maps: if key in map: map[key] = value # type: ignore[index] return raise KeyError(key) # end::ENV_CLASS[] def standard_env() -> Environment: "An environment with some Scheme standard procedures." env = Environment() env.update(vars(math)) # sin, cos, sqrt, pi, ... env.update({ '+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, 'quotient': op.floordiv, '>': op.gt, '<': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs, 'append': lambda *args: list(chain(*args)), 'apply': lambda proc, args: proc(*args), 'begin': lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:], 'cons': lambda x, y: [x] + y, 'display': lambda x: print(lispstr(x)), 'eq?': op.is_, 'equal?': op.eq, 'filter': lambda *args: list(filter(*args)), 'length': len, 'list': lambda *x: list(x), 'list?': lambda x: isinstance(x, list), 'map': lambda *args: list(map(*args)), 'max': max, 'min': min, 'not': op.not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x, (int, float)), 'procedure?': callable, 'round': round, 'symbol?': lambda x: isinstance(x, Symbol), }) return env ################ Interaction: A REPL # tag::REPL[] def repl(prompt: str = 'lis.py> ') -> NoReturn: "A prompt-read-eval-print loop." global_env = Environment({}, standard_env()) while True: ast = parse(input(prompt)) val = evaluate(ast, global_env) if val is not None: print(lispstr(val)) def lispstr(exp: object) -> str: "Convert a Python object back into a Lisp-readable string." if isinstance(exp, list): return '(' + ' '.join(map(lispstr, exp)) + ')' else: return str(exp) # end::REPL[] ################ Evaluator # tag::EVALUATE[] KEYWORDS = ['quote', 'if', 'lambda', 'define', 'set!'] def evaluate(exp: Expression, env: Environment) -> Any: "Evaluate an expression in an environment." match exp: case int(x) | float(x): return x case Symbol(var): return env[var] case ['quote', x]: return x case ['if', test, consequence, alternative]: if evaluate(test, env): return evaluate(consequence, env) else: return evaluate(alternative, env) case ['lambda', [*parms], *body] if body: return Procedure(parms, body, env) case ['define', Symbol(name), value_exp]: env[name] = evaluate(value_exp, env) case ['define', [Symbol(name), *parms], *body] if body: env[name] = Procedure(parms, body, env) case ['set!', Symbol(name), value_exp]: env.change(name, evaluate(value_exp, env)) case [func_exp, *args] if func_exp not in KEYWORDS: proc = evaluate(func_exp, env) values = [evaluate(arg, env) for arg in args] return proc(*values) case _: raise SyntaxError(lispstr(exp)) # end::EVALUATE[] # tag::PROCEDURE[]
Environment
python
pytorch__pytorch
test/test_jiterator.py
{ "start": 930, "end": 6673 }
class ____(TestCase): @parametrize("shape_strides", [ (([3, 3], [3, 1]), ([3, 3], [3, 1])), # contiguous ]) @dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16), all_types_and_complex_and(torch.half, torch.bfloat16))) def test_all_dtype_contiguous(self, device, dtypes, shape_strides): a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0]) b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1]) a = a_buffer.as_strided(*shape_strides[0]) b = b_buffer.as_strided(*shape_strides[1]) expected = ref_fn(a, b) result = jitted_fn(a, b) self.assertEqual(expected, result) # See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details # On cuda 11.3, nvrtcCompileProgram is taking too long to # compile jiterator generated kernels for non-contiguous input that requires dynamic-casting. @skipCUDAIfVersionLessThan((11, 6)) @parametrize("shape_strides", [ (([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous ]) @dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16), all_types_and_complex_and(torch.half, torch.bfloat16))) def test_all_dtype_noncontiguous(self, device, dtypes, shape_strides): a_buffer = torch.rand(9, device=device).mul(10).type(dtypes[0]) b_buffer = torch.rand(9, device=device).mul(10).type(dtypes[1]) a = a_buffer.as_strided(*shape_strides[0]) b = b_buffer.as_strided(*shape_strides[1]) expected = ref_fn(a, b) result = jitted_fn(a, b) self.assertEqual(expected, result) @dtypes(torch.float, torch.double, torch.float16, torch.bfloat16) @parametrize("alpha", [-1, 2.0, None]) @parametrize("beta", [3, -4.2, None]) @toleranceOverride({torch.float16 : tol(atol=1e-2, rtol=1e-3)}) def test_extra_args(self, device, dtype, alpha, beta): a = torch.rand(3, device=device).mul(10).type(dtype) b = torch.rand(3, device=device).mul(10).type(dtype) extra_args = {} if alpha is not None: extra_args["alpha"] = alpha if beta is not None: extra_args["beta"] = beta expected = ref_fn(a, b, **extra_args) result = jitted_fn(a, b, **extra_args) self.assertEqual(expected, result) @parametrize("is_train", [True, False]) def test_bool_extra_args(self, device, is_train): code_string = "template <typename T> T conditional(T x, T mask, bool is_train) { return is_train ? x * mask : x; }" jitted_fn = create_jit_fn(code_string, is_train=False) def ref_fn(x, mask, is_train): return x * mask if is_train else x a = torch.rand(3, device=device) b = torch.rand(3, device=device) expected = ref_fn(a, b, is_train=is_train) result = jitted_fn(a, b, is_train=is_train) self.assertEqual(expected, result) def test_multiple_functors(self, device): code_string = ''' template <typename T> T fn(T x, T mask) { return x * mask; } template <typename T> T main_fn(T x, T mask, T y) { return fn(x, mask) + y; } ''' jitted_fn = create_jit_fn(code_string) def ref_fn(x, mask, y): return x * mask + y a = torch.rand(3, device=device) b = torch.rand(3, device=device) c = torch.rand(3, device=device) expected = ref_fn(a, b, c) result = jitted_fn(a, b, c) self.assertEqual(expected, result) @parametrize("num_inputs", [1, 5, 8]) def test_various_num_inputs(self, num_inputs): inputs = [] for _ in range(num_inputs): inputs.append(torch.rand(3, device='cuda').mul(10)) input_string = ",".join([f"T i{i}" for i in range(num_inputs)]) function_body = "+".join([f"i{i}" for i in range(num_inputs)]) code_string = f"template <typename T> T my_kernel({input_string}) {{ return {function_body}; }}" jitted_fn = create_jit_fn(code_string) def ref_fn(*inputs): return torch.sum(torch.stack(inputs), dim=0) expected = ref_fn(*inputs) result = jitted_fn(*inputs) self.assertEqual(expected, result) @parametrize("num_outputs", [1, 4, 8]) def test_various_num_outputs(self, num_outputs): input = torch.rand(3, device='cuda') output_string = ", ".join([f"T& out{i}" for i in range(num_outputs)]) function_body = "" for i in range(num_outputs): function_body += f"out{i} = input + {i};\n" # NB: return type must be void, otherwise ROCm silently fails code_string = f"template <typename T> void my_kernel(T input, {output_string}) {{ {function_body} }}" jitted_fn = create_multi_output_jit_fn(code_string, num_outputs) def ref_fn(input): outputs = [] for i in range(num_outputs): outputs.append(input + i) if num_outputs == 1: return outputs[0] return tuple(outputs) expected = ref_fn(input) result = jitted_fn(input) for i in range(num_outputs): self.assertEqual(expected[i], result[i]) @parametrize("code_string", [ "template <typename T> T my _kernel(T x) { return x; }", "template <typename T> Tmy_kernel(T x) { return x; }", ]) def test_invalid_function_name(self, code_string): with self.assertRaises(Exception): create_jit_fn(code_string) instantiate_device_type_tests(TestPythonJiterator, globals(), only_for="cuda") if __name__ == '__main__': run_tests()
TestPythonJiterator
python
scikit-learn__scikit-learn
sklearn/neural_network/_rbm.py
{ "start": 623, "end": 14998 }
class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): """Bernoulli Restricted Boltzmann Machine (RBM). A Restricted Boltzmann Machine with binary visible units and binary hidden units. Parameters are estimated using Stochastic Maximum Likelihood (SML), also known as Persistent Contrastive Divergence (PCD) [2]. The time complexity of this implementation is ``O(d ** 2)`` assuming d ~ n_features ~ n_components. Read more in the :ref:`User Guide <rbm>`. Parameters ---------- n_components : int, default=256 Number of binary hidden units. learning_rate : float, default=0.1 The learning rate for weight updates. It is *highly* recommended to tune this hyper-parameter. Reasonable values are in the 10**[0., -3.] range. batch_size : int, default=10 Number of examples per minibatch. n_iter : int, default=10 Number of iterations/sweeps over the training dataset to perform during training. verbose : int, default=0 The verbosity level. The default, zero, means silent mode. Range of values is [0, inf]. random_state : int, RandomState instance or None, default=None Determines random number generation for: - Gibbs sampling from visible and hidden layers. - Initializing components, sampling from layers during fit. - Corrupting the data when scoring samples. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- intercept_hidden_ : array-like of shape (n_components,) Biases of the hidden units. intercept_visible_ : array-like of shape (n_features,) Biases of the visible units. components_ : array-like of shape (n_components, n_features) Weight matrix, where `n_features` is the number of visible units and `n_components` is the number of hidden units. h_samples_ : array-like of shape (batch_size, n_components) Hidden Activation sampled from the model distribution, where `batch_size` is the number of examples per minibatch and `n_components` is the number of hidden units. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.neural_network.MLPRegressor : Multi-layer Perceptron regressor. sklearn.neural_network.MLPClassifier : Multi-layer Perceptron classifier. sklearn.decomposition.PCA : An unsupervised linear dimensionality reduction model. References ---------- [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for deep belief nets. Neural Computation 18, pp 1527-1554. https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf [2] Tieleman, T. Training Restricted Boltzmann Machines using Approximations to the Likelihood Gradient. International Conference on Machine Learning (ICML) 2008 Examples -------- >>> import numpy as np >>> from sklearn.neural_network import BernoulliRBM >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = BernoulliRBM(n_components=2) >>> model.fit(X) BernoulliRBM(n_components=2) For a more detailed example usage, see :ref:`sphx_glr_auto_examples_neural_networks_plot_rbm_logistic_classification.py`. """ _parameter_constraints: dict = { "n_components": [Interval(Integral, 1, None, closed="left")], "learning_rate": [Interval(Real, 0, None, closed="neither")], "batch_size": [Interval(Integral, 1, None, closed="left")], "n_iter": [Interval(Integral, 0, None, closed="left")], "verbose": ["verbose"], "random_state": ["random_state"], } def __init__( self, n_components=256, *, learning_rate=0.1, batch_size=10, n_iter=10, verbose=0, random_state=None, ): self.n_components = n_components self.learning_rate = learning_rate self.batch_size = batch_size self.n_iter = n_iter self.verbose = verbose self.random_state = random_state def transform(self, X): """Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to be transformed. Returns ------- h : ndarray of shape (n_samples, n_components) Latent representations of the data. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse="csr", reset=False, dtype=(np.float64, np.float32) ) return self._mean_hiddens(X) def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- h : ndarray of shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p) def _sample_hiddens(self, v, rng): """Sample from the distribution P(h|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer. """ p = self._mean_hiddens(v) return rng.uniform(size=p.shape) < p def _sample_visibles(self, h, rng): """Sample from the distribution P(v|h). Parameters ---------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. """ p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return rng.uniform(size=p.shape) < p def _free_energy(self, v): """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : ndarray of shape (n_samples,) The value of the free energy. """ return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp( 0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_ ).sum(axis=1) def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self) if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_ @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y=None): """Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ first_pass = not hasattr(self, "components_") X = validate_data( self, X, accept_sparse="csr", dtype=np.float64, reset=first_pass ) if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, "components_"): self.components_ = np.asarray( self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", ) self._n_features_out = self.components_.shape[0] if not hasattr(self, "intercept_hidden_"): self.intercept_hidden_ = np.zeros( self.n_components, ) if not hasattr(self, "intercept_visible_"): self.intercept_visible_ = np.zeros( X.shape[1], ) if not hasattr(self, "h_samples_"): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_) def _fit(self, v_pos, rng): """Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : ndarray of shape (n_samples, n_features) The data to use for training. rng : RandomState instance Random number generator to use for sampling. """ h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = float(self.learning_rate) / v_pos.shape[0] update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += lr * update self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) self.intercept_visible_ += lr * ( np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0) ) h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial self.h_samples_ = np.floor(h_neg, h_neg) def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self) v = validate_data(self, X, accept_sparse="csr", reset=False) rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if sp.issparse(v): data = -2 * v[ind] + 1 if isinstance(data, np.matrix): # v is a sparse matrix v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: # v is a sparse array v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) # log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x) return -v.shape[1] * np.logaddexp(0, -(fe_ - fe)) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ X = validate_data(self, X, accept_sparse="csr", dtype=(np.float64, np.float32)) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", dtype=X.dtype, ) self._n_features_out = self.components_.shape[0] self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype) self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype) self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list( gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples) ) verbose = self.verbose begin = time.time() for iteration in range(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print( "[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs" % ( type(self).__name__, iteration, self.score_samples(X).mean(), end - begin, ) ) begin = end return self def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.transformer_tags.preserves_dtype = ["float64", "float32"] return tags
BernoulliRBM
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 459035, "end": 460041 }
class ____(AtomicExprNode): def __init__(self, pos, pyclass_dict): AtomicExprNode.__init__(self, pos) self.pyclass_dict = pyclass_dict def analyse_types(self, env): self.type = self.pyclass_dict.type self.is_temp = False return self def may_be_none(self): return False def result(self): return self.pyclass_dict.result() def generate_result_code(self, code): pass def LocalsExprNode(pos, scope_node, env): if env.is_module_scope: return GlobalsExprNode(pos) if env.is_py_class_scope: return PyClassLocalsExprNode(pos, scope_node.dict) return FuncLocalsExprNode(pos, env) #------------------------------------------------------------------- # # Unary operator nodes # #------------------------------------------------------------------- compile_time_unary_operators = { 'not': operator.not_, '~': operator.inv, '-': operator.neg, '+': operator.pos, }
PyClassLocalsExprNode
python
getsentry__sentry-python
tests/test_ai_monitoring.py
{ "start": 8983, "end": 12796 }
class ____: def test_no_truncation_returns_list(self, sample_messages): class MockSpan: def __init__(self): self.span_id = "test_span_id" self.data = {} def set_data(self, key, value): self.data[key] = value class MockScope: def __init__(self): self._gen_ai_original_message_count = {} span = MockSpan() scope = MockScope() result = truncate_and_annotate_messages(sample_messages, span, scope) assert isinstance(result, list) assert not isinstance(result, AnnotatedValue) assert len(result) == len(sample_messages) assert result == sample_messages assert span.span_id not in scope._gen_ai_original_message_count def test_truncation_sets_metadata_on_scope(self, large_messages): class MockSpan: def __init__(self): self.span_id = "test_span_id" self.data = {} def set_data(self, key, value): self.data[key] = value class MockScope: def __init__(self): self._gen_ai_original_message_count = {} small_limit = 3000 span = MockSpan() scope = MockScope() original_count = len(large_messages) result = truncate_and_annotate_messages( large_messages, span, scope, max_bytes=small_limit ) assert isinstance(result, list) assert not isinstance(result, AnnotatedValue) assert len(result) < len(large_messages) assert scope._gen_ai_original_message_count[span.span_id] == original_count def test_scope_tracks_original_message_count(self, large_messages): class MockSpan: def __init__(self): self.span_id = "test_span_id" self.data = {} def set_data(self, key, value): self.data[key] = value class MockScope: def __init__(self): self._gen_ai_original_message_count = {} small_limit = 3000 original_count = len(large_messages) span = MockSpan() scope = MockScope() result = truncate_and_annotate_messages( large_messages, span, scope, max_bytes=small_limit ) assert scope._gen_ai_original_message_count[span.span_id] == original_count assert len(result) == 1 def test_empty_messages_returns_none(self): class MockSpan: def __init__(self): self.span_id = "test_span_id" self.data = {} def set_data(self, key, value): self.data[key] = value class MockScope: def __init__(self): self._gen_ai_original_message_count = {} span = MockSpan() scope = MockScope() result = truncate_and_annotate_messages([], span, scope) assert result is None result = truncate_and_annotate_messages(None, span, scope) assert result is None def test_truncated_messages_newest_first(self, large_messages): class MockSpan: def __init__(self): self.span_id = "test_span_id" self.data = {} def set_data(self, key, value): self.data[key] = value class MockScope: def __init__(self): self._gen_ai_original_message_count = {} small_limit = 3000 span = MockSpan() scope = MockScope() result = truncate_and_annotate_messages( large_messages, span, scope, max_bytes=small_limit ) assert isinstance(result, list) assert result[0] == large_messages[-len(result)]
TestTruncateAndAnnotateMessages
python
simonw__datasette
datasette/database.py
{ "start": 627, "end": 24815 }
class ____: # For table counts stop at this many rows: count_limit = 10000 _thread_local_id_counter = 1 def __init__( self, ds, path=None, is_mutable=True, is_memory=False, memory_name=None, mode=None, ): self.name = None self._thread_local_id = f"x{self._thread_local_id_counter}" Database._thread_local_id_counter += 1 self.route = None self.ds = ds self.path = path self.is_mutable = is_mutable self.is_memory = is_memory self.memory_name = memory_name if memory_name is not None: self.is_memory = True self.cached_hash = None self.cached_size = None self._cached_table_counts = None self._write_thread = None self._write_queue = None # These are used when in non-threaded mode: self._read_connection = None self._write_connection = None # This is used to track all file connections so they can be closed self._all_file_connections = [] self.mode = mode @property def cached_table_counts(self): if self._cached_table_counts is not None: return self._cached_table_counts # Maybe use self.ds.inspect_data to populate cached_table_counts if self.ds.inspect_data and self.ds.inspect_data.get(self.name): self._cached_table_counts = { key: value["count"] for key, value in self.ds.inspect_data[self.name]["tables"].items() } return self._cached_table_counts @property def color(self): if self.hash: return self.hash[:6] return md5_not_usedforsecurity(self.name)[:6] def suggest_name(self): if self.path: return Path(self.path).stem elif self.memory_name: return self.memory_name else: return "db" def connect(self, write=False): extra_kwargs = {} if write: extra_kwargs["isolation_level"] = "IMMEDIATE" if self.memory_name: uri = "file:{}?mode=memory&cache=shared".format(self.memory_name) conn = sqlite3.connect( uri, uri=True, check_same_thread=False, **extra_kwargs ) if not write: conn.execute("PRAGMA query_only=1") return conn if self.is_memory: return sqlite3.connect(":memory:", uri=True) # mode=ro or immutable=1? if self.is_mutable: qs = "?mode=ro" if self.ds.nolock: qs += "&nolock=1" else: qs = "?immutable=1" assert not (write and not self.is_mutable) if write: qs = "" if self.mode is not None: qs = f"?mode={self.mode}" conn = sqlite3.connect( f"file:{self.path}{qs}", uri=True, check_same_thread=False, **extra_kwargs ) self._all_file_connections.append(conn) return conn def close(self): # Close all connections - useful to avoid running out of file handles in tests for connection in self._all_file_connections: connection.close() async def execute_write(self, sql, params=None, block=True): def _inner(conn): return conn.execute(sql, params or []) with trace("sql", database=self.name, sql=sql.strip(), params=params): results = await self.execute_write_fn(_inner, block=block) return results async def execute_write_script(self, sql, block=True): def _inner(conn): return conn.executescript(sql) with trace("sql", database=self.name, sql=sql.strip(), executescript=True): results = await self.execute_write_fn( _inner, block=block, transaction=False ) return results async def execute_write_many(self, sql, params_seq, block=True): def _inner(conn): count = 0 def count_params(params): nonlocal count for param in params: count += 1 yield param return conn.executemany(sql, count_params(params_seq)), count with trace( "sql", database=self.name, sql=sql.strip(), executemany=True ) as kwargs: results, count = await self.execute_write_fn(_inner, block=block) kwargs["count"] = count return results async def execute_isolated_fn(self, fn): # Open a new connection just for the duration of this function # blocking the write queue to avoid any writes occurring during it if self.ds.executor is None: # non-threaded mode isolated_connection = self.connect(write=True) try: result = fn(isolated_connection) finally: isolated_connection.close() try: self._all_file_connections.remove(isolated_connection) except ValueError: # Was probably a memory connection pass return result else: # Threaded mode - send to write thread return await self._send_to_write_thread(fn, isolated_connection=True) async def execute_write_fn(self, fn, block=True, transaction=True): if self.ds.executor is None: # non-threaded mode if self._write_connection is None: self._write_connection = self.connect(write=True) self.ds._prepare_connection(self._write_connection, self.name) if transaction: with self._write_connection: return fn(self._write_connection) else: return fn(self._write_connection) else: return await self._send_to_write_thread( fn, block=block, transaction=transaction ) async def _send_to_write_thread( self, fn, block=True, isolated_connection=False, transaction=True ): if self._write_queue is None: self._write_queue = queue.Queue() if self._write_thread is None: self._write_thread = threading.Thread( target=self._execute_writes, daemon=True ) self._write_thread.name = "_execute_writes for database {}".format( self.name ) self._write_thread.start() task_id = uuid.uuid5(uuid.NAMESPACE_DNS, "datasette.io") reply_queue = janus.Queue() self._write_queue.put( WriteTask(fn, task_id, reply_queue, isolated_connection, transaction) ) if block: result = await reply_queue.async_q.get() if isinstance(result, Exception): raise result else: return result else: return task_id def _execute_writes(self): # Infinite looping thread that protects the single write connection # to this database conn_exception = None conn = None try: conn = self.connect(write=True) self.ds._prepare_connection(conn, self.name) except Exception as e: conn_exception = e while True: task = self._write_queue.get() if conn_exception is not None: result = conn_exception else: if task.isolated_connection: isolated_connection = self.connect(write=True) try: result = task.fn(isolated_connection) except Exception as e: sys.stderr.write("{}\n".format(e)) sys.stderr.flush() result = e finally: isolated_connection.close() try: self._all_file_connections.remove(isolated_connection) except ValueError: # Was probably a memory connection pass else: try: if task.transaction: with conn: result = task.fn(conn) else: result = task.fn(conn) except Exception as e: sys.stderr.write("{}\n".format(e)) sys.stderr.flush() result = e task.reply_queue.sync_q.put(result) async def execute_fn(self, fn): if self.ds.executor is None: # non-threaded mode if self._read_connection is None: self._read_connection = self.connect() self.ds._prepare_connection(self._read_connection, self.name) return fn(self._read_connection) # threaded mode def in_thread(): conn = getattr(connections, self._thread_local_id, None) if not conn: conn = self.connect() self.ds._prepare_connection(conn, self.name) setattr(connections, self._thread_local_id, conn) return fn(conn) return await asyncio.get_event_loop().run_in_executor( self.ds.executor, in_thread ) async def execute( self, sql, params=None, truncate=False, custom_time_limit=None, page_size=None, log_sql_errors=True, ): """Executes sql against db_name in a thread""" page_size = page_size or self.ds.page_size def sql_operation_in_thread(conn): time_limit_ms = self.ds.sql_time_limit_ms if custom_time_limit and custom_time_limit < time_limit_ms: time_limit_ms = custom_time_limit with sqlite_timelimit(conn, time_limit_ms): try: cursor = conn.cursor() cursor.execute(sql, params if params is not None else {}) max_returned_rows = self.ds.max_returned_rows if max_returned_rows == page_size: max_returned_rows += 1 if max_returned_rows and truncate: rows = cursor.fetchmany(max_returned_rows + 1) truncated = len(rows) > max_returned_rows rows = rows[:max_returned_rows] else: rows = cursor.fetchall() truncated = False except (sqlite3.OperationalError, sqlite3.DatabaseError) as e: if e.args == ("interrupted",): raise QueryInterrupted(e, sql, params) if log_sql_errors: sys.stderr.write( "ERROR: conn={}, sql = {}, params = {}: {}\n".format( conn, repr(sql), params, e ) ) sys.stderr.flush() raise if truncate: return Results(rows, truncated, cursor.description) else: return Results(rows, False, cursor.description) with trace("sql", database=self.name, sql=sql.strip(), params=params): results = await self.execute_fn(sql_operation_in_thread) return results @property def hash(self): if self.cached_hash is not None: return self.cached_hash elif self.is_mutable or self.is_memory: return None elif self.ds.inspect_data and self.ds.inspect_data.get(self.name): self.cached_hash = self.ds.inspect_data[self.name]["hash"] return self.cached_hash else: p = Path(self.path) self.cached_hash = inspect_hash(p) return self.cached_hash @property def size(self): if self.cached_size is not None: return self.cached_size elif self.is_memory: return 0 elif self.is_mutable: return Path(self.path).stat().st_size elif self.ds.inspect_data and self.ds.inspect_data.get(self.name): self.cached_size = self.ds.inspect_data[self.name]["size"] return self.cached_size else: self.cached_size = Path(self.path).stat().st_size return self.cached_size async def table_counts(self, limit=10): if not self.is_mutable and self.cached_table_counts is not None: return self.cached_table_counts # Try to get counts for each table, $limit timeout for each count counts = {} for table in await self.table_names(): try: table_count = ( await self.execute( f"select count(*) from (select * from [{table}] limit {self.count_limit + 1})", custom_time_limit=limit, ) ).rows[0][0] counts[table] = table_count # In some cases I saw "SQL Logic Error" here in addition to # QueryInterrupted - so we catch that too: except (QueryInterrupted, sqlite3.OperationalError, sqlite3.DatabaseError): counts[table] = None if not self.is_mutable: self._cached_table_counts = counts return counts @property def mtime_ns(self): if self.is_memory: return None return Path(self.path).stat().st_mtime_ns async def attached_databases(self): # This used to be: # select seq, name, file from pragma_database_list() where seq > 0 # But SQLite prior to 3.16.0 doesn't support pragma functions results = await self.execute("PRAGMA database_list;") # {'seq': 0, 'name': 'main', 'file': ''} return [ AttachedDatabase(*row) for row in results.rows # Filter out the SQLite internal "temp" database, refs #2557 if row["seq"] > 0 and row["name"] != "temp" ] async def table_exists(self, table): results = await self.execute( "select 1 from sqlite_master where type='table' and name=?", params=(table,) ) return bool(results.rows) async def view_exists(self, table): results = await self.execute( "select 1 from sqlite_master where type='view' and name=?", params=(table,) ) return bool(results.rows) async def table_names(self): results = await self.execute( "select name from sqlite_master where type='table'" ) return [r[0] for r in results.rows] async def table_columns(self, table): return await self.execute_fn(lambda conn: table_columns(conn, table)) async def table_column_details(self, table): return await self.execute_fn(lambda conn: table_column_details(conn, table)) async def primary_keys(self, table): return await self.execute_fn(lambda conn: detect_primary_keys(conn, table)) async def fts_table(self, table): return await self.execute_fn(lambda conn: detect_fts(conn, table)) async def label_column_for_table(self, table): explicit_label_column = (await self.ds.table_config(self.name, table)).get( "label_column" ) if explicit_label_column: return explicit_label_column def column_details(conn): # Returns {column_name: (type, is_unique)} db = sqlite_utils.Database(conn) columns = db[table].columns_dict indexes = db[table].indexes details = {} for name in columns: is_unique = any( index for index in indexes if index.columns == [name] and index.unique ) details[name] = (columns[name], is_unique) return details column_details = await self.execute_fn(column_details) # Is there just one unique column that's text? unique_text_columns = [ name for name, (type_, is_unique) in column_details.items() if is_unique and type_ is str ] if len(unique_text_columns) == 1: return unique_text_columns[0] column_names = list(column_details.keys()) # Is there a name or title column? name_or_title = [c for c in column_names if c.lower() in ("name", "title")] if name_or_title: return name_or_title[0] # If a table has two columns, one of which is ID, then label_column is the other one if ( column_names and len(column_names) == 2 and ("id" in column_names or "pk" in column_names) and not set(column_names) == {"id", "pk"} ): return [c for c in column_names if c not in ("id", "pk")][0] # Couldn't find a label: return None async def foreign_keys_for_table(self, table): return await self.execute_fn( lambda conn: get_outbound_foreign_keys(conn, table) ) async def hidden_table_names(self): hidden_tables = [] # Add any tables marked as hidden in config db_config = self.ds.config.get("databases", {}).get(self.name, {}) if "tables" in db_config: hidden_tables += [ t for t in db_config["tables"] if db_config["tables"][t].get("hidden") ] if sqlite_version()[1] >= 37: hidden_tables += [ x[0] for x in await self.execute( """ with shadow_tables as ( select name from pragma_table_list where [type] = 'shadow' order by name ), core_tables as ( select name from sqlite_master WHERE name in ('sqlite_stat1', 'sqlite_stat2', 'sqlite_stat3', 'sqlite_stat4') OR substr(name, 1, 1) == '_' ), combined as ( select name from shadow_tables union all select name from core_tables ) select name from combined order by 1 """ ) ] else: hidden_tables += [ x[0] for x in await self.execute( """ WITH base AS ( SELECT name FROM sqlite_master WHERE name IN ('sqlite_stat1', 'sqlite_stat2', 'sqlite_stat3', 'sqlite_stat4') OR substr(name, 1, 1) == '_' ), fts_suffixes AS ( SELECT column1 AS suffix FROM (VALUES ('_data'), ('_idx'), ('_docsize'), ('_content'), ('_config')) ), fts5_names AS ( SELECT name FROM sqlite_master WHERE sql LIKE '%VIRTUAL TABLE%USING FTS%' ), fts5_shadow_tables AS ( SELECT printf('%s%s', fts5_names.name, fts_suffixes.suffix) AS name FROM fts5_names JOIN fts_suffixes ), fts3_suffixes AS ( SELECT column1 AS suffix FROM (VALUES ('_content'), ('_segdir'), ('_segments'), ('_stat'), ('_docsize')) ), fts3_names AS ( SELECT name FROM sqlite_master WHERE sql LIKE '%VIRTUAL TABLE%USING FTS3%' OR sql LIKE '%VIRTUAL TABLE%USING FTS4%' ), fts3_shadow_tables AS ( SELECT printf('%s%s', fts3_names.name, fts3_suffixes.suffix) AS name FROM fts3_names JOIN fts3_suffixes ), final AS ( SELECT name FROM base UNION ALL SELECT name FROM fts5_shadow_tables UNION ALL SELECT name FROM fts3_shadow_tables ) SELECT name FROM final ORDER BY 1 """ ) ] # Also hide any FTS tables that have a content= argument hidden_tables += [ x[0] for x in await self.execute( """ SELECT name FROM sqlite_master WHERE sql LIKE '%VIRTUAL TABLE%' AND sql LIKE '%USING FTS%' AND sql LIKE '%content=%' """ ) ] has_spatialite = await self.execute_fn(detect_spatialite) if has_spatialite: # Also hide Spatialite internal tables hidden_tables += [ "ElementaryGeometries", "SpatialIndex", "geometry_columns", "spatial_ref_sys", "spatialite_history", "sql_statements_log", "sqlite_sequence", "views_geometry_columns", "virts_geometry_columns", "data_licenses", "KNN", "KNN2", ] + [ r[0] for r in ( await self.execute( """ select name from sqlite_master where name like "idx_%" and type = "table" """ ) ).rows ] return hidden_tables async def view_names(self): results = await self.execute("select name from sqlite_master where type='view'") return [r[0] for r in results.rows] async def get_all_foreign_keys(self): return await self.execute_fn(get_all_foreign_keys) async def get_table_definition(self, table, type_="table"): table_definition_rows = list( await self.execute( "select sql from sqlite_master where name = :n and type=:t", {"n": table, "t": type_}, ) ) if not table_definition_rows: return None bits = [table_definition_rows[0][0] + ";"] # Add on any indexes index_rows = list( await self.execute( "select sql from sqlite_master where tbl_name = :n and type='index' and sql is not null", {"n": table}, ) ) for index_row in index_rows: bits.append(index_row[0] + ";") return "\n".join(bits) async def get_view_definition(self, view): return await self.get_table_definition(view, "view") def __repr__(self): tags = [] if self.is_mutable: tags.append("mutable") if self.is_memory: tags.append("memory") if self.hash: tags.append(f"hash={self.hash}") if self.size is not None: tags.append(f"size={self.size}") tags_str = "" if tags: tags_str = f" ({', '.join(tags)})" return f"<Database: {self.name}{tags_str}>"
Database
python
huggingface__transformers
src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py
{ "start": 4864, "end": 9427 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniAudioEncoder`]. It is used to instantiate a Qwen2.5-Omni-Thinker audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio architecture. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: num_mel_bins (`int`, *optional*, defaults to 128): Number of mel features used per input features. Should correspond to the value used in the `Qwen2_5OmniProcessor` class. encoder_layers (`int`, *optional*, defaults to 32): Number of encoder layers. encoder_attention_heads (`int`, *optional*, defaults to 20): Number of attention heads for each attention layer in the Transformer encoder. encoder_ffn_dim (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. d_model (`int`, *optional*, defaults to 1280): Dimensionality of the layers. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. max_source_positions (`int`, *optional*, defaults to 1500): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. n_window (`int`, *optional*, defaults to 100): The chunk for conv and flash attn in AudioEncoder. output_dim (`int`, *optional*, defaults to 3584): The output dimension of AudioEncoder. Example: ```python >>> from transformers import Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniAudioEncoder >>> # Initializing a Qwen2_5OmniAudioEncoderConfig >>> configuration = Qwen2_5OmniAudioEncoderConfig() >>> # Initializing a Qwen2_5OmniAudioEncoder (with random weights) >>> model = Qwen2_5OmniAudioEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_audio_encoder" def __init__( self, num_mel_bins=128, encoder_layers=32, encoder_attention_heads=20, encoder_ffn_dim=5120, d_model=1280, dropout=0, attention_dropout=0, activation_function="gelu", activation_dropout=0, scale_embedding=False, initializer_range=0.02, max_source_positions=1500, n_window=100, output_dim=3584, **kwargs, ): super().__init__(**kwargs) self.num_mel_bins = num_mel_bins self.d_model = d_model self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation_function = activation_function self.activation_dropout = activation_dropout self.num_hidden_layers = encoder_layers self.initializer_range = initializer_range self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.max_source_positions = max_source_positions self.n_window = n_window self.output_dim = output_dim
Qwen2_5OmniAudioEncoderConfig
python
coleifer__peewee
tests/regressions.py
{ "start": 16504, "end": 17106 }
class ____(ModelTestCase): requires = [User] @skip_if(IS_SQLITE_OLD or IS_MYSQL) def test_select_value_conversion(self): u1 = User.create(username='u1') cte = User.select(User.id.cast('text')).cte('tmp', columns=('id',)) query = User.select(cte.c.id.alias('id')).with_cte(cte).from_(cte) u1_id, = [user.id for user in query] self.assertEqual(u1_id, u1.id) query2 = User.select(cte.c.id.coerce(False)).with_cte(cte).from_(cte) u1_id, = [user.id for user in query2] self.assertEqual(u1_id, str(u1.id))
TestSelectValueConversion
python
joke2k__faker
faker/providers/date_time/gu_IN/__init__.py
{ "start": 46, "end": 1763 }
class ____(DateTimeProvider): DAY_NAMES = { "0": "Ravivar", "1": "Somvar", "2": "Mangalvar", "3": "Budhvar", "4": "Guruvar", "5": "Shukravar", "6": "Shanivar", } DAY_NAMES_IN_GUJARATI = { "0": "રવિવાર", "1": "સોમવાર", "2": "મંગળવાર", "3": "બુધવાર", "4": "ગુરુવાર", "5": "શુક્રવાર", "6": "શનિવાર", } MONTH_NAMES = { "01": "Kartak", "02": "Magshar", "03": "Posh", "04": "Maha", "05": "Fagan", "06": "Chaitra", "07": "Vaishakh", "08": "Jeth", "09": "Ashadh", "10": "Shravan", "11": "Bhadarvo", "12": "Aaso", } MONTH_NAMES_IN_GUJARATI = { "01": "કારતક", "02": "માગશર", "03": "પોષ", "04": "મહા", "05": "ફાગણ", "06": "ચૈત્ર", "07": "વૈશાખ", "08": "જેઠ", "09": "અષાઢ", "10": "શ્રાવણ", "11": "ભાદરવો", "12": "આસો", } def day_of_week(self) -> str: day = self.date("%w") return self.DAY_NAMES[day] def month_name(self) -> str: month = self.month() return self.MONTH_NAMES[month] def day_of_week_in_guj(self) -> str: """Returns day of the week in `Gujarati`""" day = self.date("%w") return self.DAY_NAMES_IN_GUJARATI[day] def month_name_in_guj(self) -> str: """Returns month name in `Gujarati`""" month = self.month() return self.MONTH_NAMES_IN_GUJARATI[month] def month_in_guj(self) -> str: """Returns month name in `Gujarati`""" return self.month_name_in_guj()
Provider
python
qdrant__qdrant-client
qdrant_client/local/sparse_distances.py
{ "start": 409, "end": 1549 }
class ____: def __init__( self, positive: Optional[list[SparseVector]] = None, negative: Optional[list[SparseVector]] = None, strategy: Optional[types.RecommendStrategy] = None, ): assert strategy is not None, "Recommend strategy must be provided" self.strategy = strategy positive = positive if positive is not None else [] negative = negative if negative is not None else [] for i, vector in enumerate(positive): validate_sparse_vector(vector) positive[i] = sort_sparse_vector(vector) for i, vector in enumerate(negative): validate_sparse_vector(vector) negative[i] = sort_sparse_vector(vector) self.positive = positive self.negative = negative def transform_sparse( self, foo: Callable[["SparseVector"], "SparseVector"] ) -> "SparseRecoQuery": return SparseRecoQuery( positive=[foo(vector) for vector in self.positive], negative=[foo(vector) for vector in self.negative], strategy=self.strategy, )
SparseRecoQuery
python
doocs__leetcode
solution/2400-2499/2462.Total Cost to Hire K Workers/Solution.py
{ "start": 0, "end": 772 }
class ____: def totalCost(self, costs: List[int], k: int, candidates: int) -> int: n = len(costs) if candidates * 2 >= n: return sum(sorted(costs)[:k]) pq = [] for i, c in enumerate(costs[:candidates]): heappush(pq, (c, i)) for i in range(n - candidates, n): heappush(pq, (costs[i], i)) heapify(pq) l, r = candidates, n - candidates - 1 ans = 0 for _ in range(k): c, i = heappop(pq) ans += c if l > r: continue if i < l: heappush(pq, (costs[l], l)) l += 1 else: heappush(pq, (costs[r], r)) r -= 1 return ans
Solution
python
tensorflow__tensorflow
tensorflow/python/client/session.py
{ "start": 22214, "end": 23335 }
class ____(object): """Struct-like object describing a device's attributes. Each device has 3 key properties: - name: the fully-qualified TensorFlow path to the device. For example: /job:worker/replica:0/task:3/device:CPU:0 - device_type: the type of the device (e.g. CPU, GPU, TPU, etc.) - memory_limit_bytes: the maximum amount of memory available on the device (in bytes). """ def __init__(self, name, device_type, memory_limit_bytes, incarnation): self._name = device.canonical_name(name) self._device_type = device_type self._memory_limit_bytes = memory_limit_bytes self._incarnation = incarnation @property def name(self): return self._name @property def device_type(self): return self._device_type @property def memory_limit_bytes(self): return self._memory_limit_bytes @property def incarnation(self): return self._incarnation def __repr__(self): return '_DeviceAttributes(%s, %s, %d, %d)' % ( self.name, self.device_type, self.memory_limit_bytes, self.incarnation, )
_DeviceAttributes
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 193764, "end": 195848 }
class ____(Response): """ Response of datasets.get_tags endpoint. :param tags: The list of unique tag values :type tags: Sequence[str] :param system_tags: The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request :type system_tags: Sequence[str] """ _service = "datasets" _action = "get_tags" _version = "2.23" _schema = { "definitions": {}, "properties": { "system_tags": { "description": ( "The list of unique system tag values. Returned only if 'include_system' is set to " "'true' in the request" ), "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "The list of unique tag values", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", } def __init__(self, tags=None, system_tags=None, **kwargs): super(GetTagsResponse, self).__init__(**kwargs) self.tags = tags self.system_tags = system_tags @schema_property("tags") def tags(self): return self._property_tags @tags.setter def tags(self, value): if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self): return self._property_system_tags @system_tags.setter def system_tags(self, value): if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value
GetTagsResponse
python
ray-project__ray
python/ray/data/_internal/planner/exchange/aggregate_task_spec.py
{ "start": 438, "end": 4129 }
class ____(ExchangeTaskSpec): """ The implementation for sort-based aggregate tasks. Aggregate is done in 2 steps: partial aggregate of individual blocks, and final aggregate of sorted blocks. Partial aggregate (`map`): each block is sorted locally, then partitioned into smaller blocks according to the boundaries. Each partitioned block is aggregated separately, then passed to a final aggregate task. Final aggregate (`reduce`): each task would receive a block from every worker that consists of items in a certain range. It then merges the sorted blocks and aggregates on-the-fly. """ def __init__( self, boundaries: List[KeyType], key: SortKey, aggs: List[AggregateFn], batch_format: str, ): super().__init__( map_args=[boundaries, key, aggs], reduce_args=[key, aggs, batch_format], ) @staticmethod def map( idx: int, block: Block, output_num_blocks: int, boundaries: List[KeyType], sort_key: SortKey, aggs: List[AggregateFn], ) -> List[Union[Block, "BlockMetadataWithSchema"]]: stats = BlockExecStats.builder() block = SortAggregateTaskSpec._prune_unused_columns(block, sort_key, aggs) if sort_key.get_columns(): partitions = BlockAccessor.for_block(block).sort_and_partition( boundaries, sort_key, ) else: partitions = [block] parts = [ BlockAccessor.for_block(p)._aggregate(sort_key, aggs) for p in partitions ] from ray.data.block import BlockMetadataWithSchema meta_with_schema = BlockMetadataWithSchema.from_block( block, stats=stats.build() ) return parts + [meta_with_schema] @staticmethod def reduce( key: SortKey, aggs: List[AggregateFn], batch_format: str, *mapper_outputs: List[Block], partial_reduce: bool = False, ) -> Tuple[Block, "BlockMetadataWithSchema"]: normalized_blocks = TableBlockAccessor.normalize_block_types( mapper_outputs, target_block_type=ExchangeTaskSpec._derive_target_block_type(batch_format), ) blocks, meta_with_schema = BlockAccessor.for_block( normalized_blocks[0] )._combine_aggregated_blocks( list(normalized_blocks), key, aggs, finalize=not partial_reduce ) return blocks, meta_with_schema @staticmethod def _prune_unused_columns( block: Block, sort_key: SortKey, aggs: Tuple[AggregateFn], ) -> Block: """Prune unused columns from block before aggregate.""" prune_columns = True columns = set() key = sort_key.get_columns() if isinstance(key, str): columns.add(key) elif isinstance(key, list): columns.update(key) elif callable(key): prune_columns = False for agg in aggs: if isinstance(agg, AggregateFnV2) and agg.get_target_column(): columns.add(agg.get_target_column()) elif not isinstance(agg, Count): # Don't prune columns if any aggregate key is not string. prune_columns = False block_accessor = BlockAccessor.for_block(block) if ( prune_columns and isinstance(block_accessor, TableBlockAccessor) and block_accessor.num_rows() > 0 ): return block_accessor.select(list(columns)) else: return block
SortAggregateTaskSpec
python
lazyprogrammer__machine_learning_examples
rl3/flappy2envs.py
{ "start": 1602, "end": 4306 }
class ____: def __init__(self, D, M, K, f=relu): self.D = D self.M = M self.K = K self.f = f def init(self): D, M, K = self.D, self.M, self.K self.W1 = np.random.randn(D, M) / np.sqrt(D) # self.W1 = np.zeros((D, M)) self.b1 = np.zeros(M) self.W2 = np.random.randn(M, K) / np.sqrt(M) # self.W2 = np.zeros((M, K)) self.b2 = np.zeros(K) def forward(self, X): Z = self.f(X.dot(self.W1) + self.b1) return softmax(Z.dot(self.W2) + self.b2) def sample_action(self, x): # assume input is a single state of size (D,) # first make it (N, D) to fit ML conventions X = np.atleast_2d(x) P = self.forward(X) p = P[0] # the first row # return np.random.choice(len(p), p=p) return np.argmax(p) def score(self, X, Y): P = np.argmax(self.forward(X), axis=1) return np.mean(Y == P) def get_params(self): # return a flat array of parameters return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2]) def get_params_dict(self): return { 'W1': self.W1, 'b1': self.b1, 'W2': self.W2, 'b2': self.b2, } def set_params(self, params): # params is a flat list # unflatten into individual weights D, M, K = self.D, self.M, self.K self.W1 = params[:D * M].reshape(D, M) self.b1 = params[D * M:D * M + M] self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K) self.b2 = params[-K:] env1, env2 = Env(), Env() def reward_function(params, env): model = ANN(D, M, K) model.set_params(params) # play one episode and return the total reward episode_reward = 0 episode_length = 0 # not sure if it will be used done = False obs = env.reset() obs_dim = len(obs) if HISTORY_LENGTH > 1: state = np.zeros(HISTORY_LENGTH*obs_dim) # current state state[obs_dim:] = obs else: state = obs while not done: # get the action action = model.sample_action(state) # perform the action obs, reward, done = env.step(action) # update total reward episode_reward += reward episode_length += 1 # update state if HISTORY_LENGTH > 1: state = np.roll(state, -obs_dim) state[-obs_dim:] = obs else: state = obs print("Reward:", episode_reward) if __name__ == '__main__': j = np.load('es_flappy_results.npz') best_params = np.concatenate([j['W1'].flatten(), j['b1'], j['W2'].flatten(), j['b2']]) # in case D isn't correct D, M = j['W1'].shape K = len(j['b2']) t1 = Thread(target=reward_function, args=(best_params, env1)) t2 = Thread(target=reward_function, args=(best_params, env2)) t1.start() t2.start() t1.join() t2.join()
ANN
python
doocs__leetcode
solution/1700-1799/1743.Restore the Array From Adjacent Pairs/Solution.py
{ "start": 0, "end": 540 }
class ____: def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]: g = defaultdict(list) for a, b in adjacentPairs: g[a].append(b) g[b].append(a) n = len(adjacentPairs) + 1 ans = [0] * n for i, v in g.items(): if len(v) == 1: ans[0] = i ans[1] = v[0] break for i in range(2, n): v = g[ans[i - 1]] ans[i] = v[0] if v[1] == ans[i - 2] else v[1] return ans
Solution
python
google__jax
tests/pallas/mosaic_gpu_test.py
{ "start": 158412, "end": 158532 }
class ____( PallasCallSm100ATest, lowering_semantics=plgpu.LoweringSemantics.Warpgroup ): ...
PallasCallSm100AWGTest
python
kamyu104__LeetCode-Solutions
Python/trapping-rain-water.py
{ "start": 1200, "end": 1702 }
class ____(object): def trap(self, height): """ :type height: List[int] :rtype: int """ right = [0]*len(height) mx = 0 for i in reversed(xrange(len(height))): right[i] = mx mx = max(mx, height[i]) result = left = 0 for i in xrange(len(height)): left = max(left, height[i]) result += max(min(left, right[i])-height[i], 0) return result # Time: O(n) # Space: O(n)
Solution3
python
fastapi__sqlmodel
docs_src/tutorial/connect/create_tables/tutorial001.py
{ "start": 83, "end": 237 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) headquarters: str
Team
python
openai__openai-python
src/openai/pagination.py
{ "start": 496, "end": 552 }
class ____(Protocol): id: Optional[str]
CursorPageItem
python
sympy__sympy
sympy/combinatorics/graycode.py
{ "start": 55, "end": 11205 }
class ____(Basic): """ A Gray code is essentially a Hamiltonian walk on a n-dimensional cube with edge length of one. The vertices of the cube are represented by vectors whose values are binary. The Hamilton walk visits each vertex exactly once. The Gray code for a 3d cube is ['000','100','110','010','011','111','101', '001']. A Gray code solves the problem of sequentially generating all possible subsets of n objects in such a way that each subset is obtained from the previous one by either deleting or adding a single object. In the above example, 1 indicates that the object is present, and 0 indicates that its absent. Gray codes have applications in statistics as well when we want to compute various statistics related to subsets in an efficient manner. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> a = GrayCode(4) >>> list(a.generate_gray()) ['0000', '0001', '0011', '0010', '0110', '0111', '0101', '0100', \ '1100', '1101', '1111', '1110', '1010', '1011', '1001', '1000'] References ========== .. [1] Nijenhuis,A. and Wilf,H.S.(1978). Combinatorial Algorithms. Academic Press. .. [2] Knuth, D. (2011). The Art of Computer Programming, Vol 4 Addison Wesley """ _skip = False _current = 0 _rank = None def __new__(cls, n, *args, **kw_args): """ Default constructor. It takes a single argument ``n`` which gives the dimension of the Gray code. The starting Gray code string (``start``) or the starting ``rank`` may also be given; the default is to start at rank = 0 ('0...0'). Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> a GrayCode(3) >>> a.n 3 >>> a = GrayCode(3, start='100') >>> a.current '100' >>> a = GrayCode(4, rank=4) >>> a.current '0110' >>> a.rank 4 """ if n < 1 or int(n) != n: raise ValueError( 'Gray code dimension must be a positive integer, not %i' % n) n = Integer(n) args = (n,) + args obj = Basic.__new__(cls, *args) if 'start' in kw_args: obj._current = kw_args["start"] if len(obj._current) > n: raise ValueError('Gray code start has length %i but ' 'should not be greater than %i' % (len(obj._current), n)) elif 'rank' in kw_args: if int(kw_args["rank"]) != kw_args["rank"]: raise ValueError('Gray code rank must be a positive integer, ' 'not %i' % kw_args["rank"]) obj._rank = int(kw_args["rank"]) % obj.selections obj._current = obj.unrank(n, obj._rank) return obj def next(self, delta=1): """ Returns the Gray code a distance ``delta`` (default = 1) from the current value in canonical order. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3, start='110') >>> a.next().current '111' >>> a.next(-1).current '010' """ return GrayCode(self.n, rank=(self.rank + delta) % self.selections) @property def selections(self): """ Returns the number of bit vectors in the Gray code. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> a.selections 8 """ return 2**self.n @property def n(self): """ Returns the dimension of the Gray code. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(5) >>> a.n 5 """ return self.args[0] def generate_gray(self, **hints): """ Generates the sequence of bit vectors of a Gray Code. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> list(a.generate_gray(start='011')) ['011', '010', '110', '111', '101', '100'] >>> list(a.generate_gray(rank=4)) ['110', '111', '101', '100'] See Also ======== skip References ========== .. [1] Knuth, D. (2011). The Art of Computer Programming, Vol 4, Addison Wesley """ bits = self.n start = None if "start" in hints: start = hints["start"] elif "rank" in hints: start = GrayCode.unrank(self.n, hints["rank"]) if start is not None: self._current = start current = self.current graycode_bin = gray_to_bin(current) if len(graycode_bin) > self.n: raise ValueError('Gray code start has length %i but should ' 'not be greater than %i' % (len(graycode_bin), bits)) self._current = int(current, 2) graycode_int = int(''.join(graycode_bin), 2) for i in range(graycode_int, 1 << bits): if self._skip: self._skip = False else: yield self.current bbtc = (i ^ (i + 1)) gbtc = (bbtc ^ (bbtc >> 1)) self._current = (self._current ^ gbtc) self._current = 0 def skip(self): """ Skips the bit generation. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> for i in a.generate_gray(): ... if i == '010': ... a.skip() ... print(i) ... 000 001 011 010 111 101 100 See Also ======== generate_gray """ self._skip = True @property def rank(self): """ Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] https://web.archive.org/web/20200224064753/http://statweb.stanford.edu/~susan/courses/s208/node12.html """ if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank @property def current(self): """ Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(3, start='100').current '100' """ rv = self._current or '0' if not isinstance(rv, str): rv = f'{rv:b}' return rv.rjust(self.n, '0') @classmethod def unrank(self, n, rank): """ Unranks an n-bit sized Gray code of rank k. This method exists so that a derivative GrayCode class can define its own code of a given rank. The string here is generated in reverse order to allow for tail-call optimization. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(5, rank=3).current '00010' >>> GrayCode.unrank(5, 3) '00010' See Also ======== rank """ def _unrank(k, n): if n == 1: return str(k % 2) m = 2**(n - 1) if k < m: return '0' + _unrank(k, n - 1) return '1' + _unrank(m - (k % m) - 1, n - 1) return _unrank(rank, n) def random_bitstring(n): """ Generates a random bitlist of length n. Examples ======== >>> from sympy.combinatorics.graycode import random_bitstring >>> random_bitstring(3) # doctest: +SKIP 100 """ return ''.join([random.choice('01') for i in range(n)]) def gray_to_bin(bin_list): """ Convert from Gray coding to binary coding. We assume big endian encoding. Examples ======== >>> from sympy.combinatorics.graycode import gray_to_bin >>> gray_to_bin('100') '111' See Also ======== bin_to_gray """ b = [bin_list[0]] for i in range(1, len(bin_list)): b += str(int(b[i - 1] != bin_list[i])) return ''.join(b) def bin_to_gray(bin_list): """ Convert from binary coding to gray coding. We assume big endian encoding. Examples ======== >>> from sympy.combinatorics.graycode import bin_to_gray >>> bin_to_gray('111') '100' See Also ======== gray_to_bin """ b = [bin_list[0]] for i in range(1, len(bin_list)): b += str(int(bin_list[i]) ^ int(bin_list[i - 1])) return ''.join(b) def get_subset_from_bitstring(super_set, bitstring): """ Gets the subset defined by the bitstring. Examples ======== >>> from sympy.combinatorics.graycode import get_subset_from_bitstring >>> get_subset_from_bitstring(['a', 'b', 'c', 'd'], '0011') ['c', 'd'] >>> get_subset_from_bitstring(['c', 'a', 'c', 'c'], '1100') ['c', 'a'] See Also ======== graycode_subsets """ if len(super_set) != len(bitstring): raise ValueError("The sizes of the lists are not equal") return [super_set[i] for i, j in enumerate(bitstring) if bitstring[i] == '1'] def graycode_subsets(gray_code_set): """ Generates the subsets as enumerated by a Gray code. Examples ======== >>> from sympy.combinatorics.graycode import graycode_subsets >>> list(graycode_subsets(['a', 'b', 'c'])) [[], ['c'], ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], \ ['a', 'c'], ['a']] >>> list(graycode_subsets(['a', 'b', 'c', 'c'])) [[], ['c'], ['c', 'c'], ['c'], ['b', 'c'], ['b', 'c', 'c'], \ ['b', 'c'], ['b'], ['a', 'b'], ['a', 'b', 'c'], ['a', 'b', 'c', 'c'], \ ['a', 'b', 'c'], ['a', 'c'], ['a', 'c', 'c'], ['a', 'c'], ['a']] See Also ======== get_subset_from_bitstring """ for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()): yield get_subset_from_bitstring(gray_code_set, bitstring)
GrayCode
python
matplotlib__matplotlib
lib/matplotlib/projections/polar.py
{ "start": 25947, "end": 27914 }
class ____(mtransforms.Bbox): """ Transform (theta, r) wedge Bbox into Axes bounding box. Parameters ---------- center : (float, float) Center of the wedge viewLim : `~matplotlib.transforms.Bbox` Bbox determining the boundaries of the wedge originLim : `~matplotlib.transforms.Bbox` Bbox determining the origin for the wedge, if different from *viewLim* """ def __init__(self, center, viewLim, originLim, **kwargs): super().__init__([[0, 0], [1, 1]], **kwargs) self._center = center self._viewLim = viewLim self._originLim = originLim self.set_children(viewLim, originLim) __str__ = mtransforms._make_str_method("_center", "_viewLim", "_originLim") def get_points(self): # docstring inherited if self._invalid: points = self._viewLim.get_points().copy() # Scale angular limits to work with Wedge. points[:, 0] *= 180 / np.pi if points[0, 0] > points[1, 0]: points[:, 0] = points[::-1, 0] # Scale radial limits based on origin radius. points[:, 1] -= self._originLim.y0 # Scale radial limits to match axes limits. rscale = 0.5 / points[1, 1] points[:, 1] *= rscale width = min(points[1, 1] - points[0, 1], 0.5) # Generate bounding box for wedge. wedge = mpatches.Wedge(self._center, points[1, 1], points[0, 0], points[1, 0], width=width) self.update_from_path(wedge.get_path()) # Ensure equal aspect ratio. w, h = self._points[1] - self._points[0] deltah = max(w - h, 0) / 2 deltaw = max(h - w, 0) / 2 self._points += np.array([[-deltaw, -deltah], [deltaw, deltah]]) self._invalid = 0 return self._points
_WedgeBbox
python
huggingface__transformers
src/transformers/models/patchtst/modeling_patchtst.py
{ "start": 25165, "end": 26892 }
class ____(nn.Module): def __init__(self, config: PatchTSTConfig): super().__init__() self.num_input_channels = config.num_input_channels self.share_embedding = config.share_embedding # Input encoding: projection of feature vectors onto a d-dim vector space if self.share_embedding: self.input_embedding = nn.Linear(config.patch_length, config.d_model) else: self.input_embedding = nn.ModuleList() for _ in range(config.num_input_channels): self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) def forward(self, patch_input: torch.Tensor): """ Parameters: patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): Patch input for embedding return: `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` """ # Input encoding num_input_channels = patch_input.shape[1] if num_input_channels != self.num_input_channels: raise ValueError( f"The defined number of input channels ({self.num_input_channels}) in the config " f"has to be the same as the number of channels in the batch input ({num_input_channels})" ) if self.share_embedding: embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] else: embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] embeddings = torch.stack(embeddings, dim=1) return embeddings
PatchTSTEmbedding
python
RaRe-Technologies__gensim
gensim/similarities/levenshtein.py
{ "start": 576, "end": 4526 }
class ____(TermSimilarityIndex): r""" Retrieve the most similar terms from a static set of terms ("dictionary") given a query term, using Levenshtein similarity. "Levenshtein similarity" is a modification of the Levenshtein (edit) distance, defined in [charletetal17]_. This implementation uses the :class:`~gensim.similarities.fastss.FastSS` algorithm for fast kNN nearest-neighbor retrieval. Parameters ---------- dictionary : :class:`~gensim.corpora.dictionary.Dictionary` A dictionary that specifies the considered terms. alpha : float, optional Multiplicative factor `alpha` for the Levenshtein similarity. See [charletetal17]_. beta : float, optional The exponential factor `beta` for the Levenshtein similarity. See [charletetal17]_. max_distance : int, optional Do not consider terms with Levenshtein distance larger than this as "similar". This is done for performance reasons: keep this value below 3 for reasonable retrieval performance. Default is 1. See Also -------- :class:`~gensim.similarities.termsim.WordEmbeddingSimilarityIndex` Retrieve most similar terms for a given term using the cosine similarity over word embeddings. :class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix` Build a term similarity matrix and compute the Soft Cosine Measure. References ---------- .. [charletetal17] Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3: Soft-Cosine Semantic Similarity between Questions for Community Question Answering", 2017, https://www.aclweb.org/anthology/S17-2051/. """ def __init__(self, dictionary, alpha=1.8, beta=5.0, max_distance=2): self.dictionary = dictionary self.alpha = alpha self.beta = beta self.max_distance = max_distance logger.info("creating FastSS index from %s", dictionary) self.index = FastSS(words=self.dictionary.values(), max_dist=max_distance) super(LevenshteinSimilarityIndex, self).__init__() def levsim(self, t1, t2, distance): """Calculate the Levenshtein similarity between two terms given their Levenshtein distance.""" max_lengths = max(len(t1), len(t2)) or 1 return self.alpha * (1.0 - distance * 1.0 / max_lengths)**self.beta def most_similar(self, t1, topn=10): """kNN fuzzy search: find the `topn` most similar terms from `self.dictionary` to `t1`.""" result = {} # map of {dictionary term => its levenshtein similarity to t1} if self.max_distance > 0: effective_topn = topn + 1 if t1 in self.dictionary.token2id else topn effective_topn = min(len(self.dictionary), effective_topn) # Implement a "distance backoff" algorithm: # Start with max_distance=1, for performance. And if that doesn't return enough results, # continue with max_distance=2 etc, all the way until self.max_distance which # is a hard cutoff. # At that point stop searching, even if we don't have topn results yet. # # We use the backoff algo to speed up queries for short terms. These return enough results already # with max_distance=1. # # See the discussion at https://github.com/RaRe-Technologies/gensim/pull/3146 for distance in range(1, self.max_distance + 1): for t2 in self.index.query(t1, distance).get(distance, []): if t1 == t2: continue similarity = self.levsim(t1, t2, distance) if similarity > 0: result[t2] = similarity if len(result) >= effective_topn: break return sorted(result.items(), key=lambda x: (-x[1], x[0]))[:topn]
LevenshteinSimilarityIndex
python
kubernetes-client__python
kubernetes/client/models/v1_resource_health.py
{ "start": 383, "end": 5453 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'health': 'str', 'resource_id': 'str' } attribute_map = { 'health': 'health', 'resource_id': 'resourceID' } def __init__(self, health=None, resource_id=None, local_vars_configuration=None): # noqa: E501 """V1ResourceHealth - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._health = None self._resource_id = None self.discriminator = None if health is not None: self.health = health self.resource_id = resource_id @property def health(self): """Gets the health of this V1ResourceHealth. # noqa: E501 Health of the resource. can be one of: - Healthy: operates as normal - Unhealthy: reported unhealthy. We consider this a temporary health issue since we do not have a mechanism today to distinguish temporary and permanent issues. - Unknown: The status cannot be determined. For example, Device Plugin got unregistered and hasn't been re-registered since. In future we may want to introduce the PermanentlyUnhealthy Status. # noqa: E501 :return: The health of this V1ResourceHealth. # noqa: E501 :rtype: str """ return self._health @health.setter def health(self, health): """Sets the health of this V1ResourceHealth. Health of the resource. can be one of: - Healthy: operates as normal - Unhealthy: reported unhealthy. We consider this a temporary health issue since we do not have a mechanism today to distinguish temporary and permanent issues. - Unknown: The status cannot be determined. For example, Device Plugin got unregistered and hasn't been re-registered since. In future we may want to introduce the PermanentlyUnhealthy Status. # noqa: E501 :param health: The health of this V1ResourceHealth. # noqa: E501 :type: str """ self._health = health @property def resource_id(self): """Gets the resource_id of this V1ResourceHealth. # noqa: E501 ResourceID is the unique identifier of the resource. See the ResourceID type for more information. # noqa: E501 :return: The resource_id of this V1ResourceHealth. # noqa: E501 :rtype: str """ return self._resource_id @resource_id.setter def resource_id(self, resource_id): """Sets the resource_id of this V1ResourceHealth. ResourceID is the unique identifier of the resource. See the ResourceID type for more information. # noqa: E501 :param resource_id: The resource_id of this V1ResourceHealth. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and resource_id is None: # noqa: E501 raise ValueError("Invalid value for `resource_id`, must not be `None`") # noqa: E501 self._resource_id = resource_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ResourceHealth): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ResourceHealth): return True return self.to_dict() != other.to_dict()
V1ResourceHealth
python
tensorflow__tensorflow
tensorflow/lite/python/lite.py
{ "start": 63612, "end": 70729 }
class ____(TFLiteConverterBaseV2): """Converts the given Keras model into TensorFlow Lite model.""" def __init__(self, keras_model, trackable_obj=None): """Constructor for TFLiteConverter. Args: keras_model: tf.Keras.Model. trackable_obj: tf.AutoTrackable object associated with `funcs`. A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. `from_saved_model`). """ super(TFLiteKerasModelConverterV2, self).__init__() self._keras_model = keras_model self._trackable_obj = trackable_obj self.experimental_lower_to_saved_model = True @convert_phase( Component.PREPARE_TF_MODEL, SubComponent.CONVERT_KERAS_TO_SAVED_MODEL ) def _convert_keras_to_saved_model(self, output_dir): """Save Keras model to the SavedModel format. Args: output_dir: The output directory to save the SavedModel. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. """ try: def _is_keras_3(): """Returns true if _keras_model is a Keras 3+ model.""" try: import keras # pylint: disable=g-import-not-at-top return keras.__version__.startswith("3") and isinstance( self._keras_model, keras.layers.Layer ) except ImportError: return False if _is_keras_3(): import keras # pylint: disable=g-import-not-at-top # Keras 3 model `export` by default saves model.__call__ with # training=True. Need to export the model call with training=False for # inference only and TFLite conversion. export_archive = keras.export.ExportArchive() export_archive.track(self._keras_model) if isinstance( self._keras_model, (keras.src.models.Functional, keras.src.models.Sequential), ): input_signature = nest.map_structure( lambda x: tensor_spec.TensorSpec( x.shape, dtype=x.dtype, name=x.name ), self._keras_model.inputs, ) if isinstance(input_signature, list) and len(input_signature) > 1: input_signature = [input_signature] else: save_spec = _get_save_spec(self._keras_model) if not save_spec: raise ValueError( "The model provided has never been called. " "It must be called at least once before export." ) input_signature = [save_spec] inference_fn = functools.partial( self._keras_model.__call__, training=False ) export_archive.add_endpoint("serve", inference_fn, input_signature) export_archive.write_out(output_dir) else: _save.save( self._keras_model, output_dir, options=_save_options.SaveOptions(save_debug_info=True), ) except Exception: # pylint: disable=broad-except # When storing the given keras model to a saved model is failed, let's # use original keras model conversion pipeline. return None, None, None self.saved_model_dir = output_dir self._saved_model_tags = set([_tag_constants.SERVING]) self._saved_model_exported_names = [ _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY ] self._parse_saved_model_args( always_enable_saved_model_import=self.experimental_lower_to_saved_model ) if self.saved_model_dir: graph_def, input_tensors, output_tensors = self._load_saved_model( self.saved_model_dir, self._saved_model_tags ) self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) return graph_def, input_tensors, output_tensors return None, None, None @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL) def _freeze_keras_model(self): """Freeze Keras model to frozen graph. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. frozen_func: The frozen ConcreteFunction. """ input_signature = None # If the model's call is not a `tf.function`, then we need to first get its # input signature from `model_input_signature` method. We can't directly # call `trace_model_call` because otherwise the batch dimension is set # to None. # Once we have better support for dynamic shapes, we can remove this. if not isinstance(self._keras_model.call, _def_function.Function): # Pass `keep_original_batch_size=True` will ensure that we get an input # signature including the batch dimension specified by the user. # TODO(b/169898786): Use the Keras public API when TFLite moves out of TF input_signature = _model_input_signature( self._keras_model, keep_original_batch_size=True ) # TODO(b/169898786): Use the Keras public API when TFLite moves out of TF func = _trace_model_call(self._keras_model, input_signature) concrete_func = func.get_concrete_function() self._funcs = [concrete_func] frozen_func, graph_def = ( _convert_to_constants.convert_variables_to_constants_v2_as_graph( self._funcs[0], lower_control_flow=False ) ) input_tensors = [ tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource ] output_tensors = frozen_func.outputs return graph_def, input_tensors, output_tensors, frozen_func def _convert_as_saved_model(self): """Converts a Keras model as a saved model. Returns: The converted data in serialized format. """ temp_dir = tempfile.mkdtemp() try: graph_def, input_tensors, output_tensors = ( self._convert_keras_to_saved_model(temp_dir) ) if self.saved_model_dir: return super(TFLiteKerasModelConverterV2, self).convert( graph_def, input_tensors, output_tensors ) finally: shutil.rmtree(temp_dir, True) @_export_metrics def convert(self): """Converts a keras model based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters. """ saved_model_convert_result = self._convert_as_saved_model() if saved_model_convert_result: return saved_model_convert_result graph_def, input_tensors, output_tensors, frozen_func = ( self._freeze_keras_model() ) graph_def = self._optimize_tf_model( graph_def, input_tensors, output_tensors, frozen_func ) return super(TFLiteKerasModelConverterV2, self).convert( graph_def, input_tensors, output_tensors )
TFLiteKerasModelConverterV2
python
ray-project__ray
python/ray/autoscaler/_private/load_metrics.py
{ "start": 2046, "end": 14657 }
class ____: """Container for cluster load metrics. Metrics here are updated from raylet heartbeats. The autoscaler queries these metrics to determine when to scale up, and which nodes can be removed. """ def __init__(self): self.last_heartbeat_time_by_ip = {} self.static_resources_by_ip = {} self.dynamic_resources_by_ip = {} self.node_id_by_ip = {} self.waiting_bundles = [] self.infeasible_bundles = [] self.pending_placement_groups = [] self.resource_requests = [] self.cluster_full_of_actors_detected = False self.ray_nodes_last_used_time_by_ip = {} def __bool__(self): """A load metrics instance is Falsey iff the autoscaler process has not received a resource message from the GCS. """ return bool(self.node_id_by_ip) def update( self, ip: str, node_id: bytes, static_resources: Dict[str, Dict], dynamic_resources: Dict[str, Dict], node_idle_duration_s: float, waiting_bundles: List[Dict[str, float]] = None, infeasible_bundles: List[Dict[str, float]] = None, pending_placement_groups: List[PlacementGroupTableData] = None, cluster_full_of_actors_detected: bool = False, ): self.static_resources_by_ip[ip] = static_resources self.node_id_by_ip[ip] = node_id self.cluster_full_of_actors_detected = cluster_full_of_actors_detected if not waiting_bundles: waiting_bundles = [] if not infeasible_bundles: infeasible_bundles = [] if not pending_placement_groups: pending_placement_groups = [] # We are not guaranteed to have a corresponding dynamic resource # for every static resource because dynamic resources are based on # the available resources in the heartbeat, which does not exist # if it is zero. Thus, we have to update dynamic resources here. dynamic_resources_update = dynamic_resources.copy() for resource_name, capacity in self.static_resources_by_ip[ip].items(): if resource_name not in dynamic_resources_update: dynamic_resources_update[resource_name] = 0.0 self.dynamic_resources_by_ip[ip] = dynamic_resources_update now = time.time() self.ray_nodes_last_used_time_by_ip[ip] = now - node_idle_duration_s self.last_heartbeat_time_by_ip[ip] = now self.waiting_bundles = waiting_bundles self.infeasible_bundles = infeasible_bundles self.pending_placement_groups = pending_placement_groups def mark_active(self, ip): assert ip is not None, "IP should be known at this time" logger.debug("Node {} is newly setup, treating as active".format(ip)) self.last_heartbeat_time_by_ip[ip] = time.time() def prune_active_ips(self, active_ips: List[str]): """The Raylet ips stored by LoadMetrics are obtained by polling the GCS in Monitor.update_load_metrics(). On the other hand, the autoscaler gets a list of node ips from its NodeProvider. This method removes from LoadMetrics the ips unknown to the autoscaler. Args: active_ips (List[str]): The node ips known to the autoscaler. """ active_ips = set(active_ips) def prune(mapping, should_log): unwanted_ips = set(mapping) - active_ips for unwanted_ip in unwanted_ips: if should_log: logger.info("LoadMetrics: " f"Removed ip: {unwanted_ip}.") del mapping[unwanted_ip] if unwanted_ips and should_log: logger.info( "LoadMetrics: " "Removed {} stale ip mappings: {} not in {}".format( len(unwanted_ips), unwanted_ips, active_ips ) ) assert not (unwanted_ips & set(mapping)) prune(self.ray_nodes_last_used_time_by_ip, should_log=True) prune(self.static_resources_by_ip, should_log=False) prune(self.node_id_by_ip, should_log=False) prune(self.dynamic_resources_by_ip, should_log=False) prune(self.last_heartbeat_time_by_ip, should_log=False) def get_node_resources(self): """Return a list of node resources (static resource sizes). Example: >>> from ray.autoscaler._private.load_metrics import LoadMetrics >>> metrics = LoadMetrics(...) # doctest: +SKIP >>> metrics.get_node_resources() # doctest: +SKIP [{"CPU": 1}, {"CPU": 4, "GPU": 8}] # for two different nodes """ return self.static_resources_by_ip.values() def get_static_node_resources_by_ip(self) -> Dict[NodeIP, ResourceDict]: """Return a dict of node resources for every node ip. Example: >>> from ray.autoscaler._private.load_metrics import LoadMetrics >>> metrics = LoadMetrics(...) # doctest: +SKIP >>> metrics.get_static_node_resources_by_ip() # doctest: +SKIP {127.0.0.1: {"CPU": 1}, 127.0.0.2: {"CPU": 4, "GPU": 8}} """ return self.static_resources_by_ip def get_resource_utilization(self): return self.dynamic_resources_by_ip def _get_resource_usage(self): resources_used = {} resources_total = {} for ip, max_resources in self.static_resources_by_ip.items(): avail_resources = self.dynamic_resources_by_ip[ip] for resource_id, amount in max_resources.items(): used = amount - avail_resources[resource_id] if resource_id not in resources_used: resources_used[resource_id] = 0.0 resources_total[resource_id] = 0.0 resources_used[resource_id] += used resources_total[resource_id] += amount used = max(0, used) return resources_used, resources_total def get_resource_demand_vector(self, clip=True): if clip: # Bound the total number of bundles to # 2xMAX_RESOURCE_DEMAND_VECTOR_SIZE. This guarantees the resource # demand scheduler bin packing algorithm takes a reasonable amount # of time to run. return ( self.waiting_bundles[:AUTOSCALER_MAX_RESOURCE_DEMAND_VECTOR_SIZE] + self.infeasible_bundles[:AUTOSCALER_MAX_RESOURCE_DEMAND_VECTOR_SIZE] ) else: return self.waiting_bundles + self.infeasible_bundles def get_resource_requests(self): return self.resource_requests def get_pending_placement_groups(self): return self.pending_placement_groups def resources_avail_summary(self) -> str: """Return a concise string of cluster size to report to event logs. For example, "3 CPUs, 4 GPUs". """ total_resources = ( reduce(add_resources, self.static_resources_by_ip.values()) if self.static_resources_by_ip else {} ) out = "{} CPUs".format(int(total_resources.get("CPU", 0))) if "GPU" in total_resources: out += ", {} GPUs".format(int(total_resources["GPU"])) if "TPU" in total_resources: out += ", {} TPUs".format(int(total_resources["TPU"])) return out def summary(self): available_resources = ( reduce(add_resources, self.dynamic_resources_by_ip.values()) if self.dynamic_resources_by_ip else {} ) total_resources = ( reduce(add_resources, self.static_resources_by_ip.values()) if self.static_resources_by_ip else {} ) usage_dict = {} for key in total_resources: if key in ["memory", "object_store_memory"]: total = total_resources[key] available = available_resources[key] usage_dict[key] = (total - available, total) else: total = total_resources[key] usage_dict[key] = (total - available_resources[key], total) summarized_demand_vector = freq_of_dicts( self.get_resource_demand_vector(clip=False) ) summarized_resource_requests = freq_of_dicts(self.get_resource_requests()) def placement_group_serializer(pg): bundles = tuple( frozenset(bundle.unit_resources.items()) for bundle in pg.bundles ) return (bundles, pg.strategy) def placement_group_deserializer(pg_tuple): # We marshal this as a dictionary so that we can easily json.dumps # it later. # TODO (Alex): Would there be a benefit to properly # marshalling this (into a protobuf)? bundles = list(map(dict, pg_tuple[0])) return { "bundles": freq_of_dicts(bundles), "strategy": PlacementStrategy.Name(pg_tuple[1]), } summarized_placement_groups = freq_of_dicts( self.get_pending_placement_groups(), serializer=placement_group_serializer, deserializer=placement_group_deserializer, ) nodes_summary = freq_of_dicts(self.static_resources_by_ip.values()) usage_by_node = None if AUTOSCALER_REPORT_PER_NODE_STATUS: usage_by_node = {} for ip, totals in self.static_resources_by_ip.items(): available = self.dynamic_resources_by_ip.get(ip, {}) usage_by_node[ip] = {} for resource, total in totals.items(): usage_by_node[ip][resource] = ( total - available.get(resource, 0), total, ) return LoadMetricsSummary( usage=usage_dict, resource_demand=summarized_demand_vector, pg_demand=summarized_placement_groups, request_demand=summarized_resource_requests, node_types=nodes_summary, usage_by_node=usage_by_node, ) def set_resource_requests(self, requested_resources): if requested_resources is not None: assert isinstance(requested_resources, list), requested_resources self.resource_requests = [ request for request in requested_resources if len(request) > 0 ] def info_string(self): return " - " + "\n - ".join( ["{}: {}".format(k, v) for k, v in sorted(self._info().items())] ) def _info(self): resources_used, resources_total = self._get_resource_usage() now = time.time() idle_times = [now - t for t in self.ray_nodes_last_used_time_by_ip.values()] heartbeat_times = [now - t for t in self.last_heartbeat_time_by_ip.values()] most_delayed_heartbeats = sorted( self.last_heartbeat_time_by_ip.items(), key=lambda pair: pair[1] )[:5] most_delayed_heartbeats = {ip: (now - t) for ip, t in most_delayed_heartbeats} def format_resource(key, value): if key in ["object_store_memory", "memory"]: return "{} GiB".format(round(value / (1024 * 1024 * 1024), 2)) else: return round(value, 2) return { "ResourceUsage": ", ".join( [ "{}/{} {}".format( format_resource(rid, resources_used[rid]), format_resource(rid, resources_total[rid]), rid, ) for rid in sorted(resources_used) if not rid.startswith("node:") ] ), "NodeIdleSeconds": "Min={} Mean={} Max={}".format( int(min(idle_times)) if idle_times else -1, int(float(sum(idle_times)) / len(idle_times)) if idle_times else -1, int(max(idle_times)) if idle_times else -1, ), "TimeSinceLastHeartbeat": "Min={} Mean={} Max={}".format( int(min(heartbeat_times)) if heartbeat_times else -1, int(float(sum(heartbeat_times)) / len(heartbeat_times)) if heartbeat_times else -1, int(max(heartbeat_times)) if heartbeat_times else -1, ), "MostDelayedHeartbeats": most_delayed_heartbeats, }
LoadMetrics
python
huggingface__transformers
src/transformers/models/vits/modeling_vits.py
{ "start": 17587, "end": 18544 }
class ____(nn.Module): def __init__(self, config: VitsConfig): super().__init__() self.out_channels = config.flow_size self.conv_pre = nn.Conv1d(config.spectrogram_bins, config.hidden_size, 1) self.wavenet = VitsWaveNet(config, num_layers=config.posterior_encoder_num_wavenet_layers) self.conv_proj = nn.Conv1d(config.hidden_size, self.out_channels * 2, 1) def forward(self, inputs, padding_mask, global_conditioning=None): inputs = self.conv_pre(inputs) * padding_mask inputs = self.wavenet(inputs, padding_mask, global_conditioning) stats = self.conv_proj(inputs) * padding_mask mean, log_stddev = torch.split(stats, self.out_channels, dim=1) sampled = (mean + torch.randn_like(mean) * torch.exp(log_stddev)) * padding_mask return sampled, mean, log_stddev # Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
VitsPosteriorEncoder
python
viewflow__viewflow
viewflow/fsm/base.py
{ "start": 7802, "end": 9142 }
class ____: """Base transition definition descriptor.""" do_not_call_in_templates = True def __init__(self, state: StateValue, func: TransitionFunction): # noqa D102 self._state = state self._func = func self._transitions: Dict[StateValue, Transition] = {} def __get__( self, instance: object, owner: Optional[Type[object]] = None ) -> TransitionMethod | TransitionBoundMethod: if instance: return TransitionBoundMethod(self._state, self._func, self, instance) else: assert owner is not None # make mypy happy return TransitionMethod(self._state, self._func, self, owner) def add_transition(self, transition: Transition) -> None: self._transitions[transition.source] = transition def get_transitions(self) -> Iterable[Transition]: """List of all transitions.""" return self._transitions.values() def get_transition(self, source_state: StateValue) -> Optional[Transition]: """Get a transition of a source_state. Returns None if there is no outgoing transitions. """ transition = self._transitions.get(source_state, None) if transition is None: transition = self._transitions.get(State.ANY, None) return transition
TransitionDescriptor
python
getsentry__sentry
tests/snuba/test_snuba.py
{ "start": 3915, "end": 7966 }
class ____(TestCase, SnubaTestCase): def test_simple(self) -> None: one_min_ago = before_now(minutes=1).isoformat() event_1 = self.store_event( data={"fingerprint": ["group-1"], "message": "hello", "timestamp": one_min_ago}, project_id=self.project.id, ) event_2 = self.store_event( data={"fingerprint": ["group-2"], "message": "hello", "timestamp": one_min_ago}, project_id=self.project.id, ) results = snuba.bulk_raw_query( [ snuba.SnubaQueryParams( start=timezone.now() - timedelta(days=1), end=timezone.now(), selected_columns=["event_id", "group_id", "timestamp"], filter_keys={"project_id": [self.project.id], "group_id": [event_1.group.id]}, tenant_ids={"referrer": "testing.test", "organization_id": 1}, ), snuba.SnubaQueryParams( start=timezone.now() - timedelta(days=1), end=timezone.now(), selected_columns=["event_id", "group_id", "timestamp"], filter_keys={"project_id": [self.project.id], "group_id": [event_2.group.id]}, tenant_ids={"referrer": "testing.test", "organization_id": 1}, ), ], ) assert [{(item["group_id"], item["event_id"]) for item in r["data"]} for r in results] == [ {(event_1.group.id, event_1.event_id)}, {(event_2.group.id, event_2.event_id)}, ] @mock.patch("sentry.utils.snuba._bulk_snuba_query", side_effect=snuba._bulk_snuba_query) def test_cache(self, _bulk_snuba_query: mock.MagicMock) -> None: one_min_ago = before_now(minutes=1).isoformat() event_1 = self.store_event( data={"fingerprint": ["group-1"], "message": "hello", "timestamp": one_min_ago}, project_id=self.project.id, ) event_2 = self.store_event( data={"fingerprint": ["group-2"], "message": "hello", "timestamp": one_min_ago}, project_id=self.project.id, ) params = [ snuba.SnubaQueryParams( start=timezone.now() - timedelta(days=1), end=timezone.now(), selected_columns=["event_id", "group_id", "timestamp"], filter_keys={"project_id": [self.project.id], "group_id": [event_1.group.id]}, tenant_ids={"referrer": "testing.test", "organization_id": 1}, ), snuba.SnubaQueryParams( start=timezone.now() - timedelta(days=1), end=timezone.now(), selected_columns=["event_id", "group_id", "timestamp"], filter_keys={"project_id": [self.project.id], "group_id": [event_2.group.id]}, tenant_ids={"referrer": "testing.test", "organization_id": 1}, ), ] results = snuba.bulk_raw_query( copy.deepcopy(params), use_cache=True, ) assert [{(item["group_id"], item["event_id"]) for item in r["data"]} for r in results] == [ {(event_1.group.id, event_1.event_id)}, {(event_2.group.id, event_2.event_id)}, ] assert _bulk_snuba_query.call_count == 1 _bulk_snuba_query.reset_mock() # # Make sure this doesn't appear in the cached results self.store_event( data={"fingerprint": ["group-2"], "message": "hello there", "timestamp": one_min_ago}, project_id=self.project.id, ) results = snuba.bulk_raw_query( copy.deepcopy(params), use_cache=True, ) assert [{(item["group_id"], item["event_id"]) for item in r["data"]} for r in results] == [ {(event_1.group.id, event_1.event_id)}, {(event_2.group.id, event_2.event_id)}, ] assert _bulk_snuba_query.call_count == 0
BulkRawQueryTest
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_citation_config.py
{ "start": 156, "end": 211 }
class ____(BaseModel): enabled: bool
BetaCitationConfig
python
django__django
tests/generic_relations_regress/models.py
{ "start": 3694, "end": 3735 }
class ____(HasLinks): pass
HasLinkThing
python
pola-rs__polars
py-polars/src/polars/io/scan_options/_options.py
{ "start": 577, "end": 1798 }
class ____: """ Holds scan options that are generic over scan type. For internal use. Most of the options will parse into `UnifiedScanArgs`. """ row_index: tuple[str, int] | None = None # (i64, usize) pre_slice: tuple[int, int] | None = None cast_options: ScanCastOptions | None = None extra_columns: Literal["ignore", "raise"] = "raise" missing_columns: Literal["insert", "raise"] = "raise" include_file_paths: str | None = None # For path expansion glob: bool = True hidden_file_prefix: Sequence[str] | None = None # Hive # Note: `None` means auto. hive_partitioning: bool | None = None hive_schema: SchemaDict | None = None try_parse_hive_dates: bool = True rechunk: bool = False cache: bool = True # Cloud storage_options: list[tuple[str, str]] | None = None credential_provider: CredentialProviderBuilder | None = None retries: int = 2 column_mapping: ColumnMapping | None = None default_values: DefaultFieldValues | None = None deletion_files: DeletionFiles | None = None table_statistics: DataFrame | None = None # (physical, deleted) row_count: tuple[int, int] | None = None
ScanOptions
python
getsentry__sentry
src/sentry/monitors/endpoints/organization_monitor_schedule_sample_data.py
{ "start": 784, "end": 937 }
class ____(ConfigValidator): num_ticks = serializers.IntegerField(min_value=1, max_value=MAX_TICKS) @region_silo_endpoint
SampleScheduleConfigValidator
python
google__jax
jax/_src/xla_metadata_lib.py
{ "start": 709, "end": 1474 }
class ____: __slots__ = ['val', 'hash'] val: dict[str, Any] def __init__(self, val): self.val = val self.hash = hash(tuple(sorted(self.val.items()))) def __hash__(self): return self.hash def __eq__(self, other): return other is not None and self.val == other.val def filter_nones(d: dict) -> dict: return {k: v for k, v in d.items() if v is not None} def update_metadata(a, b: dict[str, Any]): if not b: return a if a is None or a is config_ext.unset: val = {} else: val = a.val.copy() val.update(b) return XlaMetadata(filter_nones(val)) def current_xla_metadata() -> dict[str, Any] | None: metadata = config.xla_metadata_context_manager.value return None if metadata is None else metadata.val
XlaMetadata