language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 80488, "end": 81197 }
class ____(FieldValues): """ Values for nested `DictField` with CharField as child. """ valid_inputs = [ ({0: {'a': 1, 'b': '2'}, 1: {3: 3}}, {'0': {'a': '1', 'b': '2'}, '1': {'3': '3'}}), ] invalid_inputs = [ ({0: {'a': 1, 'b': None}, 1: {'c': None}}, {'0': {'b': ['This field may not be null.']}, '1': {'c': ['This field may not be null.']}}), ({0: 'not a dict'}, {'0': ['Expected a dictionary of items but got type "str".']}), ] outputs = [ ({0: {'a': 1, 'b': '2'}, 1: {3: 3}}, {'0': {'a': '1', 'b': '2'}, '1': {'3': '3'}}), ] field = serializers.DictField(child=serializers.DictField(child=serializers.CharField()))
TestNestedDictField
python
kamyu104__LeetCode-Solutions
Python/count-numbers-with-unique-digits-ii.py
{ "start": 2184, "end": 2670 }
class ____(object): def numberCount(self, a, b): """ :type a: int :type b: int :rtype: int """ def check(x): lookup = 0 while x: if lookup&(1<<(x%10)): return False lookup |= (1<<(x%10)) x //= 10 return True return sum(check(x) for x in xrange(a, b+1)) # Time: O(blogb) # Space: O(logb) # brute force, hash table
Solution3
python
automl__auto-sklearn
autosklearn/metalearning/metafeatures/metafeatures.py
{ "start": 23227, "end": 23544 }
class ____(MetaFeature): def _calculate(self, X, y, logger, feat_type): skews = helper_functions.get_value("Skewnesses") minimum = np.nanmin(skews) if len(skews) > 0 else 0 return minimum if np.isfinite(minimum) else 0 @metafeatures.define("SkewnessMax", dependency="Skewnesses")
SkewnessMin
python
pydantic__pydantic
pydantic/v1/networks.py
{ "start": 12331, "end": 12433 }
class ____(AnyUrl): allowed_schemes = {'file'} host_required = False __slots__ = ()
FileUrl
python
realpython__materials
queue/src/queues.py
{ "start": 1033, "end": 1100 }
class ____: priority: float count: int value: Any
Element
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/zenrows_web/base.py
{ "start": 318, "end": 15707 }
class ____(BasePydanticReader): """ ZenRows Web Reader. Read web pages using ZenRows Universal Scraper API with advanced features like: - JavaScript rendering for dynamic content - Anti-bot bypass - Premium residential proxies with geo-location - Custom headers and session management - Advanced data extraction with CSS selectors - Multiple output formats (HTML, Markdown, Text, PDF) - Screenshot capabilities Args: api_key (str): ZenRows API key. Get one at https://app.zenrows.com/register js_render (Optional[bool]): Enable JavaScript rendering with a headless browser. Default False. js_instructions (Optional[str]): Execute custom JavaScript on the page to interact with elements. premium_proxy (Optional[bool]): Use residential IPs to bypass anti-bot protection. Default False. proxy_country (Optional[str]): Set the country of the IP used for the request (requires Premium Proxies). session_id (Optional[int]): Maintain the same IP for multiple requests for up to 10 minutes. custom_headers (Optional[Dict[str, str]]): Include custom headers in your request to mimic browser behavior. wait_for (Optional[str]): Wait for a specific CSS Selector to appear in the DOM before returning content. wait (Optional[int]): Wait a fixed amount of milliseconds after page load. block_resources (Optional[str]): Block specific resources (images, fonts, etc.) from loading. response_type (Optional[Literal["markdown", "plaintext", "pdf"]]): Convert HTML to other formats. css_extractor (Optional[str]): Extract specific elements using CSS selectors (JSON format). autoparse (Optional[bool]): Automatically extract structured data from HTML. Default False. screenshot (Optional[str]): Capture an above-the-fold screenshot of the page. screenshot_fullpage (Optional[str]): Capture a full-page screenshot. screenshot_selector (Optional[str]): Capture a screenshot of a specific element using CSS Selector. original_status (Optional[bool]): Return the original HTTP status code from the target page. Default False. allowed_status_codes (Optional[str]): Returns content even if target page fails with specified status codes. json_response (Optional[bool]): Capture network requests in JSON format. Default False. screenshot_format (Optional[Literal["png", "jpeg"]]): Choose between png and jpeg formats for screenshots. screenshot_quality (Optional[int]): For JPEG format, set quality from 1 to 100. outputs (Optional[str]): Specify which data types to extract from the scraped HTML. """ is_remote: bool = True api_key: str = Field(description="ZenRows API key") js_render: Optional[bool] = Field( default=False, description="Enable JavaScript rendering with a headless browser. Essential for modern web apps, SPAs, and sites with dynamic content.", ) js_instructions: Optional[str] = Field( default=None, description="Execute custom JavaScript on the page to interact with elements, scroll, click buttons, or manipulate content.", ) premium_proxy: Optional[bool] = Field( default=False, description="Use residential IPs to bypass anti-bot protection. Essential for accessing protected sites.", ) proxy_country: Optional[str] = Field( default=None, description="Set the country of the IP used for the request (requires Premium Proxies). Use for accessing geo-restricted content.", ) session_id: Optional[int] = Field( default=None, description="Maintain the same IP for multiple requests for up to 10 minutes. Essential for multi-step processes.", ) custom_headers: Optional[Dict[str, str]] = Field( default=None, description="Include custom headers in your request to mimic browser behavior.", ) wait_for: Optional[str] = Field( default=None, description="Wait for a specific CSS Selector to appear in the DOM before returning content.", ) wait: Optional[int] = Field( default=None, description="Wait a fixed amount of milliseconds after page load." ) block_resources: Optional[str] = Field( default=None, description="Block specific resources (images, fonts, etc.) from loading to speed up scraping.", ) response_type: Optional[Literal["markdown", "plaintext", "pdf"]] = Field( default=None, description="Convert HTML to other formats. Options: markdown, plaintext, pdf.", ) css_extractor: Optional[str] = Field( default=None, description="Extract specific elements using CSS selectors (JSON format).", ) autoparse: Optional[bool] = Field( default=False, description="Automatically extract structured data from HTML." ) screenshot: Optional[str] = Field( default=None, description="Capture an above-the-fold screenshot of the page." ) screenshot_fullpage: Optional[str] = Field( default=None, description="Capture a full-page screenshot." ) screenshot_selector: Optional[str] = Field( default=None, description="Capture a screenshot of a specific element using CSS Selector.", ) original_status: Optional[bool] = Field( default=False, description="Return the original HTTP status code from the target page.", ) allowed_status_codes: Optional[str] = Field( default=None, description="Returns the content even if the target page fails with specified status codes.", ) json_response: Optional[bool] = Field( default=False, description="Capture network requests in JSON format, including XHR or Fetch data.", ) screenshot_format: Optional[Literal["png", "jpeg"]] = Field( default=None, description="Choose between png (default) and jpeg formats for screenshots.", ) screenshot_quality: Optional[int] = Field( default=None, description="For JPEG format, set quality from 1 to 100.", ) outputs: Optional[str] = Field( default=None, description="Specify which data types to extract from the scraped HTML.", ) _base_url: str = PrivateAttr(default="https://api.zenrows.com/v1/") @field_validator("css_extractor") @classmethod def validate_css_extractor(cls, v): """Validate that css_extractor is valid JSON if provided.""" if v is not None: try: json.loads(v) except json.JSONDecodeError: raise ValueError("css_extractor must be valid JSON") return v @field_validator("proxy_country") @classmethod def validate_proxy_country(cls, v): """Validate that proxy_country is a two-letter country code.""" if v is not None and len(v) != 2: raise ValueError("proxy_country must be a two-letter country code") return v def __init__(self, **kwargs): """Initialize ZenRows Web Reader.""" super().__init__(**kwargs) if not self.api_key: raise ValueError( "ZenRows API key is required. Get one at https://app.zenrows.com/register" ) @classmethod def class_name(cls) -> str: """Get the name identifier of the class.""" return "ZenRowsWebReader" def _prepare_request_params( self, url: str, extra_params: Optional[Dict] = None ) -> tuple[Dict[str, Any], Optional[Dict[str, str]]]: """Prepare request parameters for ZenRows API.""" params = {"url": url, "apikey": self.api_key} # Add all configured parameters if self.js_render: params["js_render"] = self.js_render if self.js_instructions: params["js_instructions"] = self.js_instructions if self.premium_proxy: params["premium_proxy"] = self.premium_proxy if self.proxy_country: params["proxy_country"] = self.proxy_country if self.session_id: params["session_id"] = self.session_id if self.wait_for: params["wait_for"] = self.wait_for if self.wait: params["wait"] = self.wait if self.block_resources: params["block_resources"] = self.block_resources if self.response_type: params["response_type"] = self.response_type if self.css_extractor: params["css_extractor"] = self.css_extractor if self.autoparse: params["autoparse"] = self.autoparse if self.screenshot: params["screenshot"] = self.screenshot if self.screenshot_fullpage: params["screenshot_fullpage"] = self.screenshot_fullpage if self.screenshot_selector: params["screenshot_selector"] = self.screenshot_selector if self.original_status: params["original_status"] = self.original_status if self.allowed_status_codes: params["allowed_status_codes"] = self.allowed_status_codes if self.json_response: params["json_response"] = self.json_response if self.screenshot_format: params["screenshot_format"] = self.screenshot_format if self.screenshot_quality: params["screenshot_quality"] = self.screenshot_quality if self.outputs: params["outputs"] = self.outputs # Add any extra parameters for this specific request if extra_params: params.update(extra_params) # Auto-enable js_render for parameters that require JavaScript rendering js_required_params = [ "screenshot", "screenshot_fullpage", "screenshot_selector", "js_instructions", "json_response", "wait", "wait_for", ] js_required = any(params.get(param) for param in js_required_params) if js_required: params["js_render"] = True # Special handling for screenshot variants screenshot_variants = ["screenshot_fullpage", "screenshot_selector"] if any(params.get(param) for param in screenshot_variants): params["screenshot"] = "true" # Auto-enable premium_proxy when proxy_country is specified if params.get("proxy_country"): params["premium_proxy"] = True # Handle custom headers request_headers = None if "custom_headers" in params and params["custom_headers"]: # Store the headers dictionary for the request request_headers = params["custom_headers"] # Set custom_headers to "true" to enable custom header support in the API params["custom_headers"] = "true" elif self.custom_headers: request_headers = self.custom_headers params["custom_headers"] = "true" else: # Remove custom_headers if not provided or empty params.pop("custom_headers", None) # Remove None values to avoid sending unnecessary parameters params = {k: v for k, v in params.items() if v is not None} return params, request_headers def _make_request( self, url: str, extra_params: Optional[Dict] = None ) -> requests.Response: """Make request to ZenRows API.""" params, request_headers = self._prepare_request_params(url, extra_params) response = requests.get( self._base_url, params=params, headers=request_headers, ) response.raise_for_status() return response def _extract_metadata( self, response: requests.Response, url: str ) -> Dict[str, Any]: """Extract metadata from ZenRows response.""" metadata = { "source_url": url, "scraped_at": time.time(), } # Extract ZenRows specific headers if "X-Request-Cost" in response.headers: metadata["request_cost"] = float(response.headers["X-Request-Cost"]) if "X-Request-Id" in response.headers: metadata["request_id"] = response.headers["X-Request-Id"] if "Zr-Final-Url" in response.headers: metadata["final_url"] = response.headers["Zr-Final-Url"] if "Concurrency-Remaining" in response.headers: metadata["concurrency_remaining"] = int( response.headers["Concurrency-Remaining"] ) if "Concurrency-Limit" in response.headers: metadata["concurrency_limit"] = int(response.headers["Concurrency-Limit"]) # Add response info metadata["status_code"] = response.status_code metadata["content_type"] = response.headers.get("Content-Type", "") metadata["content_length"] = len(response.content) # Add scraping configuration used metadata["zenrows_config"] = { "js_render": self.js_render, "premium_proxy": self.premium_proxy, "proxy_country": self.proxy_country, "session_id": self.session_id, "response_type": self.response_type, } return metadata def _process_response_content(self, response: requests.Response) -> str: """Process response content based on whether it's a screenshot or not.""" # Handle screenshot responses screenshot_params = ["screenshot", "screenshot_fullpage", "screenshot_selector"] if any(getattr(self, param, None) for param in screenshot_params): return response.content # For all other responses, return text return response.text def load_data( self, urls: Union[str, List[str]], extra_params: Optional[Dict] = None, **kwargs ) -> List[Document]: """ Load data from URLs using ZenRows API. Args: urls: Single URL string or list of URLs to scrape extra_params: Additional parameters for this specific request **kwargs: Additional keyword arguments (for compatibility) Returns: List of Document objects containing scraped content and metadata """ if isinstance(urls, str): urls = [urls] documents = [] for url in urls: try: response = self._make_request(url, extra_params) content = self._process_response_content(response) metadata = self._extract_metadata(response, url) # Create document document = Document( text=content, metadata=metadata, ) documents.append(document) except Exception as e: # Create error document for failed URLs error_metadata = { "source_url": url, "error": str(e), "scraped_at": time.time(), "status": "failed", } error_document = Document( text=f"Error scraping {url}: {e!s}", metadata=error_metadata, ) documents.append(error_document) return documents
ZenRowsWebReader
python
django__django
tests/admin_views/models.py
{ "start": 27786, "end": 27926 }
class ____(models.Model): book = models.ForeignKey(Book, models.CASCADE) author = models.ForeignKey(Author, models.CASCADE)
Authorship
python
apache__airflow
providers/pagerduty/tests/unit/pagerduty/hooks/test_pagerduty_events.py
{ "start": 2698, "end": 5090 }
class ____: def test_get_integration_key_from_password(self, events_connections): hook = PagerdutyEventsHook(pagerduty_events_conn_id=DEFAULT_CONN_ID) assert hook.integration_key == "events_token", "token initialised." def test_token_parameter_override(self, events_connections): hook = PagerdutyEventsHook(integration_key="override_key", pagerduty_events_conn_id=DEFAULT_CONN_ID) assert hook.integration_key == "override_key", "token initialised." @patch.object(pagerduty.EventsApiV2Client, "request") def test_create_change_event(self, mock_request, events_connections): """Test that create_change_event sends a valid change event and returns None""" hook = PagerdutyEventsHook(pagerduty_events_conn_id=DEFAULT_CONN_ID) mock_response_body = { "message": "Change event processed", "status": "success", } mock_response = httpx.Response( status_code=202, json=mock_response_body, request=httpx.Request("POST", "https://events.pagerduty.com/v2/change/enqueue"), ) mock_response.ok = True mock_request.return_value = mock_response resp = hook.create_change_event(summary="test", source="airflow") mock_request.assert_called_once() assert resp is None, "No response expected for change event" @patch.object(EventsApiV2Client, "request") def test_send_event_success(self, mock_request, events_connections): """Test that send_event returns dedup_key on success""" hook = PagerdutyEventsHook(pagerduty_events_conn_id=DEFAULT_CONN_ID) dedup_key = "samplekeyhere" mock_response_body = { "status": "success", "message": "Event processed", "dedup_key": dedup_key, } mock_response = httpx.Response( status_code=202, json=mock_response_body, request=httpx.Request("POST", "https://events.pagerduty.com/v2/enqueue"), ) mock_response.ok = True mock_request.return_value = mock_response resp = hook.send_event( summary="test", source="airflow_test", severity="error", dedup_key=dedup_key, ) mock_request.assert_called_once() assert resp == dedup_key
TestPagerdutyEventsHook
python
ZoranPandovski__al-go-rithms
data_structures/b_tree/Python/same_tree.py
{ "start": 153, "end": 1521 }
class ____(): def same_tree(self,root1,root2): if root1 == None and root2 == None: return True if (root1 == None or root2 == None): return False if root1.val == root2.val: if self.same_tree(root1.left,root2.left): if self.same_tree(root1.right,root2.right): return True return False ### Testcases ### # stree = SameTree() # root1 = None # root2 = None # obj = Tree() # # # # Create exact same tree # for i in range(10): # rand = random.randrange(1,20,2) # root1 = obj.insert(root1,rand) # root2 = obj.insert(root2,rand) # # # print stree.same_tree(root1,root2) # # # root1 = None # root2 = None # # # Create a tree with same structure but different data in it # for i in range(10): # rand1 = random.randrange(1,20,2) # rand2 = random.randrange(1,20,2) # root1 = obj.insert(root1,rand1) # root2 = obj.insert(root2,rand2) # # print stree.same_tree(root1,root2) # # # root1 = None # root2 = None # # # Create a tree with totally different tree structure # for i in range(4): # rand = random.randrange(1,20,4) # root1 = obj.insert(root1,rand) # for i in range(6): # rand = random.randrange(1,20,4) # root2 = obj.insert(root2, rand) # # print stree.same_tree(root1, root2)
SameTree
python
google__pytype
pytype/datatypes.py
{ "start": 5018, "end": 5164 }
class ____(Exception): def __init__(self, existing_name): super().__init__() self.existing_name = existing_name
AliasingDictConflictError
python
tensorflow__tensorflow
tensorflow/lite/tools/optimize/sparsity/format_converter_wrapper_pybind11_test.py
{ "start": 910, "end": 2708 }
class ____(absltest.TestCase): def test_bcsr_fp32(self): """Same as FormatConverterTest::BlockTestD0S1 but via pybind11.""" # pyformat: disable dense_matrix = [1.0, 0.0, 2.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 6.0] # pyformat: enable dense_shape = [4, 4] traversal_order = [0, 1, 2, 3] dim_types = [ format_converter.TfLiteDimensionType.TF_LITE_DIM_DENSE, format_converter.TfLiteDimensionType.TF_LITE_DIM_SPARSE_CSR ] block_size = [2, 2] block_map = [0, 1] converter = format_converter.FormatConverterFp32(dense_shape, traversal_order, dim_types, block_size, block_map) converter.DenseToSparse(np.asarray(dense_matrix, dtype=np.float32).data) dim_metadata = converter.GetDimMetadata() self.assertEqual([2], dim_metadata[0]) self.assertEmpty(dim_metadata[1]) # rows are dense. self.assertEqual([0, 2, 3], dim_metadata[2]) # array segments. self.assertEqual([0, 1, 1], dim_metadata[3]) # array indices. self.assertEqual([2], dim_metadata[4]) self.assertEmpty(dim_metadata[5]) # sub block rows are dense. self.assertEqual([2], dim_metadata[6]) self.assertEmpty(dim_metadata[7]) # sub block columns are dense. expected_data = [1.0, 0.0, 0.0, 4.0, 2.0, 3.0, 0.0, 0.0, 5.0, 0.0, 0.0, 6.0] sparse_data = converter.GetData() self.assertTrue(np.allclose(expected_data, sparse_data)) converter.SparseToDense(np.asarray(sparse_data, dtype=np.float32).data) self.assertTrue(np.allclose(dense_matrix, converter.GetData())) if __name__ == '__main__': absltest.main()
FormatConverterTest
python
numpy__numpy
numpy/f2py/tests/test_return_logical.py
{ "start": 61, "end": 1385 }
class ____(util.F2PyTest): def check_function(self, t): assert t(True) == 1 assert t(False) == 0 assert t(0) == 0 assert t(None) == 0 assert t(0.0) == 0 assert t(0j) == 0 assert t(1j) == 1 assert t(234) == 1 assert t(234.6) == 1 assert t(234.6 + 3j) == 1 assert t("234") == 1 assert t("aaa") == 1 assert t("") == 0 assert t([]) == 0 assert t(()) == 0 assert t({}) == 0 assert t(t) == 1 assert t(-234) == 1 assert t(10**100) == 1 assert t([234]) == 1 assert t((234, )) == 1 assert t(array(234)) == 1 assert t(array([234])) == 1 assert t(array([[234]])) == 1 assert t(array([127], "b")) == 1 assert t(array([234], "h")) == 1 assert t(array([234], "i")) == 1 assert t(array([234], "l")) == 1 assert t(array([234], "f")) == 1 assert t(array([234], "d")) == 1 assert t(array([234 + 3j], "F")) == 1 assert t(array([234], "D")) == 1 assert t(array(0)) == 0 assert t(array([0])) == 0 assert t(array([[0]])) == 0 assert t(array([0j])) == 0 assert t(array([1])) == 1 pytest.raises(ValueError, t, array([0, 0]))
TestReturnLogical
python
dagster-io__dagster
python_modules/dagster/dagster/components/testing/test_cases.py
{ "start": 3890, "end": 4751 }
class ____: """Pytest test class for testing translation of asset attributes. You can subclass this class and implement a test_translation function using the various fixtures in order to comprehensively test asset translation options for your component. """ @pytest.fixture(params=test_cases, ids=[case.name for case in test_cases]) def translation_test_case(self, request): return request.param @pytest.fixture def attributes(self, translation_test_case: TranslationTestCase): return translation_test_case.attributes @pytest.fixture def assertion(self, translation_test_case: TranslationTestCase): return translation_test_case.assertion @pytest.fixture def key_modifier(self, translation_test_case: TranslationTestCase): return translation_test_case.key_modifier
TestTranslation
python
scrapy__scrapy
tests/test_crawler.py
{ "start": 21804, "end": 21926 }
class ____(scrapy.Spider): name = "no_request" async def start(self): return yield
NoRequestsSpider
python
openai__openai-python
src/openai/_exceptions.py
{ "start": 3750, "end": 3891 }
class ____(APIStatusError): status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride]
UnprocessableEntityError
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/solverScoring4.py
{ "start": 456, "end": 1181 }
class ____(Generic[T]): @staticmethod def resolve(resolve_value: S) -> "Promise[S]": ... def __init__(self, executor_func: TA2[T]) -> None: ... def then(self, onfullfilled: TA1[T, R]) -> "Promise[R]": ... Promise.resolve(1).then(lambda result: reveal_type(result, expected_text="int")) Promise.resolve(1).then(lambda result: "abc").then( lambda result: reveal_type(result, expected_text="str") ) Promise.resolve(None).then( lambda result: Promise.resolve("abc" if 1 < 2 else 123) ).then(lambda result: reveal_type(result, expected_text="str | int")) Promise.resolve(None).then(lambda result: "abc" if 1 < 2 else 123).then( lambda result: reveal_type(result, expected_text="int | str") )
Promise
python
tensorflow__tensorflow
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm_test.py
{ "start": 1252, "end": 5152 }
class ____(test.TestCase, parameterized.TestCase): def test_min_max_max(self): calib_opts = stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX ) statistics = calib_stats_pb2.CalibrationStatistics() statistics.min_max_statistics.global_min = 1.0 statistics.min_max_statistics.global_max = 5.0 min_value, max_value = calibration_algorithm.get_min_max_value( statistics, calib_opts ) self.assertAllEqual((min_value, max_value), (1.0, 5.0)) def test_average_min_max(self): calib_opts = stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX ) statistics = calib_stats_pb2.CalibrationStatistics() statistics.average_min_max_statistics.min_sum = 5.0 statistics.average_min_max_statistics.max_sum = 50.0 statistics.average_min_max_statistics.num_samples = 5 min_value, max_value = calibration_algorithm.get_min_max_value( statistics, calib_opts ) self.assertAllEqual((min_value, max_value), (1.0, 10.0)) @parameterized.named_parameters( { "testcase_name": "with_histogram_percentile", "calibration_options": stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE, calibration_parameters=stablehlo_quant_config_pb2.CalibrationOptions.CalibrationParameters( min_percentile=0.001, max_percentile=99.999 ), ), }, { "testcase_name": "with_histogram_mse_bruteforce", "calibration_options": stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE, calibration_parameters=stablehlo_quant_config_pb2.CalibrationOptions.CalibrationParameters(), ), }, { "testcase_name": "with_histogram_mse_max_frequency", "calibration_options": stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY, calibration_parameters=stablehlo_quant_config_pb2.CalibrationOptions.CalibrationParameters(), ), }, { "testcase_name": "with_histogram_mse_symmetric", "calibration_options": stablehlo_quant_config_pb2.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC, calibration_parameters=stablehlo_quant_config_pb2.CalibrationOptions.CalibrationParameters(), ), }, ) def test_histogram_calibration_methods(self, calibration_options): statistics = calib_stats_pb2.CalibrationStatistics() statistics.histogram_statistics.lower_bound = 0.0 statistics.histogram_statistics.bin_width = 1.0 hist_freq = np.zeros(501, dtype=np.int32) # Advanced calibration methods that use histograms detect outliers, so they # don't use the outliers as min/max values. hist_freq[0] = 1 hist_freq[-1] = 1 # The majority of the data exists around the center. hist_freq[250] = 1000 for i in range(1, 201): hist_freq[250 - i] = 1000 - i hist_freq[250 + i] = 1000 - i statistics.histogram_statistics.hist_freq.extend(hist_freq.tolist()) # Histogram calibration methods should remove outliers. min_value, max_value = calibration_algorithm.get_min_max_value( statistics, calibration_options ) # Since the min/max values may differ slightly for each calibration method, # also check the nearby values. self.assertAllInRange(min_value, 49, 51) self.assertAllInRange(max_value, 449, 451) if __name__ == "__main__": test.main()
CalibrationAlgorithmTest
python
PyCQA__pylint
tests/functional/n/not_callable.py
{ "start": 3503, "end": 3917 }
class ____: a = ADescriptor() AggregateCls().a() # Make sure not-callable isn't raised for descriptors # astroid can't process descriptors correctly so # pylint needs to ignore not-callable for them # right now # Test for https://github.com/pylint-dev/pylint/issues/1699 import multiprocessing multiprocessing.current_process() # Make sure not-callable isn't raised for uninferable properties
AggregateCls
python
sphinx-doc__sphinx
sphinx/transforms/post_transforms/code.py
{ "start": 1254, "end": 2639 }
class ____(nodes.NodeVisitor): def __init__(self, document: nodes.document, default_language: str) -> None: self.default_setting = HighlightSetting(default_language, False, sys.maxsize) self.settings: list[HighlightSetting] = [] super().__init__(document) def unknown_visit(self, node: Node) -> None: pass def unknown_departure(self, node: Node) -> None: pass def visit_document(self, node: Node) -> None: self.settings.append(self.default_setting) def depart_document(self, node: Node) -> None: self.settings.pop() def visit_start_of_file(self, node: Node) -> None: self.settings.append(self.default_setting) def depart_start_of_file(self, node: Node) -> None: self.settings.pop() def visit_highlightlang(self, node: addnodes.highlightlang) -> None: self.settings[-1] = HighlightSetting( node['lang'], node['force'], node['linenothreshold'] ) def visit_literal_block(self, node: nodes.literal_block) -> None: setting = self.settings[-1] if 'language' not in node: node['language'] = setting.language node['force'] = setting.force if 'linenos' not in node: lines = node.astext().count('\n') node['linenos'] = lines >= setting.lineno_threshold - 1
HighlightLanguageVisitor
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol53.py
{ "start": 4603, "end": 4829 }
class ____(Proto_ContraGeneric): # This should not generate a reportIncompatibleMethodOverride error # but does currently. def m[T: Impl_ContraGenericExplicit3](self: T, x: T) -> None: ...
Impl_ContraGenericExplicit3
python
django__django
tests/model_forms/models.py
{ "start": 3760, "end": 3982 }
class ____(models.Model): description = models.CharField(max_length=20) file = models.FileField(storage=temp_storage, upload_to="tests", max_length=15) def __str__(self): return self.description
TextFile
python
python-attrs__attrs
tests/dataclass_transform_example.py
{ "start": 74, "end": 172 }
class ____: a: str b: int reveal_type(Define.__init__) # noqa: F821 @attr.define()
Define
python
lazyprogrammer__machine_learning_examples
rl2/mountaincar/pg_tf_random.py
{ "start": 775, "end": 1388 }
class ____: def __init__(self, M1, M2, f=tf.nn.tanh, use_bias=True, zeros=False): if zeros: W = np.zeros((M1, M2)).astype(np.float32) self.W = tf.Variable(W) else: self.W = tf.Variable(tf.random_normal(shape=(M1, M2))) self.params = [self.W] self.use_bias = use_bias if use_bias: self.b = tf.Variable(np.zeros(M2).astype(np.float32)) self.params.append(self.b) self.f = f def forward(self, X): if self.use_bias: a = tf.matmul(X, self.W) + self.b else: a = tf.matmul(X, self.W) return self.f(a) # approximates pi(a | s)
HiddenLayer
python
getsentry__sentry
src/sentry/web/frontend/reactivate_account.py
{ "start": 370, "end": 950 }
class ____(BaseView): # auth check is managed by view code auth_required = False @method_decorator(never_cache) def handle(self, request: HttpRequest) -> HttpResponseBase: if not request.user.is_authenticated: return self.handle_auth_required(request) if request.POST.get("op") == "confirm": user_service.update_user(user_id=request.user.id, attrs=dict(is_active=True)) return self.redirect(auth.get_login_redirect(request)) return self.respond("sentry/reactivate-account.html", {})
ReactivateAccountView
python
sqlalchemy__sqlalchemy
test/dialect/mysql/test_compiler.py
{ "start": 57831, "end": 59023 }
class ____(fixtures.TestBase, RegexpCommon): __dialect__ = "mariadb" def test_regexp_match_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="i'g"), "mytable.myid REGEXP CONCAT('(?', 'i''g', ')', %s)", checkpositional=("pattern",), ) def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), "mytable.myid REGEXP CONCAT('(?', 'ig', ')', %s)", checkpositional=("pattern",), ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), "mytable.myid NOT REGEXP CONCAT('(?', 'ig', ')', %s)", checkpositional=("pattern",), ) def test_regexp_replace_flags(self): self.assert_compile( self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), "REGEXP_REPLACE(mytable.myid, CONCAT('(?', 'ig', ')', %s), %s)", checkpositional=("pattern", "replacement"), )
RegexpTestMariaDb
python
numpy__numpy
numpy/_core/tests/test_multiarray.py
{ "start": 379714, "end": 383973 }
class ____: # all these tests use the WRITEBACKIFCOPY mechanism def test_argmax_with_out(self): mat = np.eye(5) out = np.empty(5, dtype='i2') res = np.argmax(mat, 0, out=out) assert_equal(res, range(5)) def test_argmin_with_out(self): mat = -np.eye(5) out = np.empty(5, dtype='i2') res = np.argmin(mat, 0, out=out) assert_equal(res, range(5)) def test_insert_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous # uses arr_insert np.place(a, a > 2, [44, 55]) assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) # hit one of the failing paths assert_raises(ValueError, np.place, a, a > 20, []) def test_put_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous np.put(a, [0, 2], [44, 55]) assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) def test_putmask_noncontiguous(self): a = np.arange(6).reshape(2, 3).T # force non-c-contiguous # uses arr_putmask np.putmask(a, a > 2, a**2) assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) def test_take_mode_raise(self): a = np.arange(6, dtype='int') out = np.empty(2, dtype='int') np.take(a, [0, 2], out=out, mode='raise') assert_equal(out, np.array([0, 2])) def test_choose_mod_raise(self): a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) out = np.empty((3, 3), dtype='int') choices = [-10, 10] np.choose(a, choices, out=out, mode='raise') assert_equal(out, np.array([[ 10, -10, 10], [-10, 10, -10], [ 10, -10, 10]])) def test_flatiter__array__(self): a = np.arange(9).reshape(3, 3) b = a.T.flat c = b.__array__() # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics del c def test_dot_out(self): # if HAVE_CBLAS, will use WRITEBACKIFCOPY a = np.arange(9, dtype=float).reshape(3, 3) b = np.dot(a, a, out=a) assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) def test_view_assign(self): from numpy._core._multiarray_tests import ( npy_create_writebackifcopy, npy_resolve, ) arr = np.arange(9).reshape(3, 3).T arr_wb = npy_create_writebackifcopy(arr) assert_(arr_wb.flags.writebackifcopy) assert_(arr_wb.base is arr) arr_wb[...] = -100 npy_resolve(arr_wb) # arr changes after resolve, even though we assigned to arr_wb assert_equal(arr, -100) # after resolve, the two arrays no longer reference each other assert_(arr_wb.ctypes.data != 0) assert_equal(arr_wb.base, None) # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, -100) @pytest.mark.leaks_references( reason="increments self in dealloc; ignore since deprecated path.") def test_dealloc_warning(self): arr = np.arange(9).reshape(3, 3) v = arr.T with pytest.warns(RuntimeWarning): _multiarray_tests.npy_abuse_writebackifcopy(v) def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( npy_create_writebackifcopy, npy_discard, ) arr = np.arange(9).reshape(3, 3).T orig = arr.copy() if HAS_REFCOUNT: arr_cnt = sys.getrefcount(arr) arr_wb = npy_create_writebackifcopy(arr) assert_(arr_wb.flags.writebackifcopy) assert_(arr_wb.base is arr) arr_wb[...] = -100 npy_discard(arr_wb) # arr remains unchanged after discard assert_equal(arr, orig) # after discard, the two arrays no longer reference each other assert_(arr_wb.ctypes.data != 0) assert_equal(arr_wb.base, None) if HAS_REFCOUNT: assert_equal(arr_cnt, sys.getrefcount(arr)) # assigning to arr_wb does not get transferred to arr arr_wb[...] = 100 assert_equal(arr, orig)
TestWritebackIfCopy
python
pandas-dev__pandas
pandas/tests/indexes/numeric/test_indexing.py
{ "start": 20093, "end": 20643 }
class ____: @pytest.mark.parametrize("dtype", [np.float64, np.int64, np.uint64]) def test_contains_none(self, dtype): # GH#35788 should return False, not raise TypeError index = Index([0, 1, 2, 3, 4], dtype=dtype) assert None not in index def test_contains_float64_nans(self): index = Index([1.0, 2.0, np.nan], dtype=np.float64) assert np.nan in index def test_contains_float64_not_nans(self): index = Index([1.0, 2.0, np.nan], dtype=np.float64) assert 1.0 in index
TestContains
python
numpy__numpy
numpy/ma/tests/test_extras.py
{ "start": 21664, "end": 34228 }
class ____: def test_compress_nd(self): # Tests compress_nd x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) m = np.zeros((3, 4, 5)).astype(bool) m[1, 1, 1] = True x = array(x, mask=m) # axis=None a = compress_nd(x) assert_equal(a, [[[ 0, 2, 3, 4], [10, 12, 13, 14], [15, 17, 18, 19]], [[40, 42, 43, 44], [50, 52, 53, 54], [55, 57, 58, 59]]]) # axis=0 a = compress_nd(x, 0) assert_equal(a, [[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[40, 41, 42, 43, 44], [45, 46, 47, 48, 49], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) # axis=1 a = compress_nd(x, 1) assert_equal(a, [[[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[20, 21, 22, 23, 24], [30, 31, 32, 33, 34], [35, 36, 37, 38, 39]], [[40, 41, 42, 43, 44], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) a2 = compress_nd(x, (1,)) a3 = compress_nd(x, -2) a4 = compress_nd(x, (-2,)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=2 a = compress_nd(x, 2) assert_equal(a, [[[ 0, 2, 3, 4], [ 5, 7, 8, 9], [10, 12, 13, 14], [15, 17, 18, 19]], [[20, 22, 23, 24], [25, 27, 28, 29], [30, 32, 33, 34], [35, 37, 38, 39]], [[40, 42, 43, 44], [45, 47, 48, 49], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (2,)) a3 = compress_nd(x, -1) a4 = compress_nd(x, (-1,)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=(0, 1) a = compress_nd(x, (0, 1)) assert_equal(a, [[[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], [[40, 41, 42, 43, 44], [50, 51, 52, 53, 54], [55, 56, 57, 58, 59]]]) a2 = compress_nd(x, (0, -2)) assert_equal(a, a2) # axis=(1, 2) a = compress_nd(x, (1, 2)) assert_equal(a, [[[ 0, 2, 3, 4], [10, 12, 13, 14], [15, 17, 18, 19]], [[20, 22, 23, 24], [30, 32, 33, 34], [35, 37, 38, 39]], [[40, 42, 43, 44], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (-2, 2)) a3 = compress_nd(x, (1, -1)) a4 = compress_nd(x, (-2, -1)) assert_equal(a, a2) assert_equal(a, a3) assert_equal(a, a4) # axis=(0, 2) a = compress_nd(x, (0, 2)) assert_equal(a, [[[ 0, 2, 3, 4], [ 5, 7, 8, 9], [10, 12, 13, 14], [15, 17, 18, 19]], [[40, 42, 43, 44], [45, 47, 48, 49], [50, 52, 53, 54], [55, 57, 58, 59]]]) a2 = compress_nd(x, (0, -1)) assert_equal(a, a2) def test_compress_rowcols(self): # Tests compress_rowcols x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(compress_rowcols(x), [[8]]) assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) assert_equal(compress_rowcols(x).size, 0) assert_equal(compress_rowcols(x, 0).size, 0) assert_equal(compress_rowcols(x, 1).size, 0) def test_mask_rowcols(self): # Tests mask_rowcols. x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1).mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1).mask, [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) assert_(mask_rowcols(x).all() is masked) assert_(mask_rowcols(x, 0).all() is masked) assert_(mask_rowcols(x, 1).all() is masked) assert_(mask_rowcols(x).mask.all()) assert_(mask_rowcols(x, 0).mask.all()) assert_(mask_rowcols(x, 1).mask.all()) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize(["func", "rowcols_axis"], [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): # Test deprecation of the axis argument to `mask_rows` and `mask_cols` x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) with pytest.warns(DeprecationWarning): res = func(x, axis=axis) assert_equal(res, mask_rowcols(x, rowcols_axis)) def test_dot(self): # Tests dot product n = np.arange(1, 7) # m = [1, 0, 0, 0, 0, 0] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [1, 0]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 1] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 1], [1, 1]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) assert_equal(c, dot(a, b)) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # m = [0, 0, 0, 0, 0, 0] a = masked_array(n, mask=m).reshape(2, 3) b = masked_array(n, mask=m).reshape(3, 2) c = dot(a, b) assert_equal(c.mask, nomask) c = dot(b, a) assert_equal(c.mask, nomask) # a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [0, 0]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[0, 0], [1, 1]]) c = dot(a, b) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 0], [1, 1]]) c = dot(a, b, strict=False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) c = dot(b, a, strict=True) assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) c = dot(b, a, strict=False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # a = masked_array(np.arange(8).reshape(2, 2, 2), mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) b = masked_array(np.arange(8).reshape(2, 2, 2), mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) c = dot(a, b, strict=True) assert_equal(c.mask, [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) c = dot(a, b, strict=False) assert_equal(c.mask, [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) c = dot(b, a, strict=False) assert_equal(c.mask, [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) # a = masked_array(np.arange(8).reshape(2, 2, 2), mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) b = 5. c = dot(a, b, strict=True) assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) c = dot(a, b, strict=False) assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) c = dot(b, a, strict=True) assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) c = dot(b, a, strict=False) assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) # a = masked_array(np.arange(8).reshape(2, 2, 2), mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) b = masked_array(np.arange(2), mask=[0, 1]) c = dot(a, b, strict=True) assert_equal(c.mask, [[1, 1], [1, 1]]) c = dot(a, b, strict=False) assert_equal(c.mask, [[1, 0], [0, 0]]) def test_dot_returns_maskedarray(self): # See gh-6611 a = np.eye(3) b = array(a) assert_(type(dot(a, a)) is MaskedArray) assert_(type(dot(a, b)) is MaskedArray) assert_(type(dot(b, a)) is MaskedArray) assert_(type(dot(b, b)) is MaskedArray) def test_dot_out(self): a = array(np.eye(3)) out = array(np.zeros((3, 3))) res = dot(a, a, out=out) assert_(res is out) assert_equal(a, res)
TestCompressFunctions
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_relationship.py
{ "start": 82192, "end": 85752 }
class ____( AssertsCompiledSQL, fixtures.MappedTest ): __dialect__ = "default" run_create_tables = None run_deletes = None @classmethod def define_tables(cls, metadata): Table( "a", metadata, Column("id", Integer, primary_key=True), Column("name", String), ) Table( "b", metadata, Column("id", Integer, ForeignKey("a.id"), primary_key=True), ) Table( "c", metadata, Column("id", Integer, ForeignKey("a.id"), primary_key=True), Column("bid", Integer, ForeignKey("b.id")), ) Table( "d", metadata, Column("id", Integer, ForeignKey("a.id"), primary_key=True), Column("cid", Integer, ForeignKey("c.id")), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(A): pass class C(A): pass class D(A): pass @classmethod def setup_mappers(cls): A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d cls.mapper_registry.map_imperatively(A, a) cls.mapper_registry.map_imperatively(B, b, inherits=A) cls.mapper_registry.map_imperatively(C, c, inherits=A) cls.mapper_registry.map_imperatively(D, d, inherits=A) def _two_join_fixture(self): B, C, D = (self.classes.B, self.classes.C, self.classes.D) s = fixture_session() return ( s.query(B.name, C.name, D.name) .select_from(B) .join(C, C.bid == B.id) .join(D, D.cid == C.id) ) def test_two_joins_adaption(self): a, c, d = self.tables.a, self.tables.c, self.tables.d with ( _aliased_join_warning(r"C\(c\)"), _aliased_join_warning(r"D\(d\)"), ): q = self._two_join_fixture()._compile_state() btoc = q.from_clauses[0].left ac_adapted = btoc.right.element.left c_adapted = btoc.right.element.right is_(ac_adapted.element, a) is_(c_adapted.element, c) ctod = q.from_clauses[0].right ad_adapted = ctod.element.left d_adapted = ctod.element.right is_(ad_adapted.element, a) is_(d_adapted.element, d) bname, cname, dname = q._entities adapter = q._get_current_adapter() b_name_adapted = adapter(bname.column, False) c_name_adapted = adapter(cname.column, False) d_name_adapted = adapter(dname.column, False) assert bool(b_name_adapted == a.c.name) assert bool(c_name_adapted == ac_adapted.c.name) assert bool(d_name_adapted == ad_adapted.c.name) def test_two_joins_sql(self): q = self._two_join_fixture() with ( _aliased_join_warning(r"C\(c\)"), _aliased_join_warning(r"D\(d\)"), ): self.assert_compile( q, "SELECT a.name AS a_name, a_1.name AS a_1_name, " "a_2.name AS a_2_name " "FROM a JOIN b ON a.id = b.id JOIN " "(a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id) " "ON c_1.bid = b.id " "JOIN (a AS a_2 JOIN d AS d_1 ON a_2.id = d_1.id) " "ON d_1.cid = c_1.id", )
MultipleAdaptUsesEntityOverTableTest
python
PrefectHQ__prefect
tests/server/schemas/test_filters.py
{ "start": 177, "end": 1984 }
class ____: def test_applies_level_le_filter(self, db): log_filter = LogFilter(level={"le_": 10}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.level <= 10)) def test_applies_level_ge_filter(self, db): log_filter = LogFilter(level={"ge_": 10}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.level >= 10)) def test_applies_timestamp_filter_before(self, db): log_filter = LogFilter(timestamp={"before_": NOW}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.timestamp <= NOW)) def test_applies_timestamp_filter_after(self, db): log_filter = LogFilter(timestamp={"after_": NOW}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.timestamp >= NOW)) def test_applies_flow_run_id_filter(self, db): flow_run_id = uuid4() log_filter = LogFilter(flow_run_id={"any_": [flow_run_id]}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.flow_run_id.in_([flow_run_id]))) def test_applies_task_run_id_filter(self, db): task_run_id = uuid4() log_filter = LogFilter(task_run_id={"any_": [task_run_id]}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare(sa.and_(db.Log.task_run_id.in_([task_run_id]))) def test_applies_multiple_conditions(self, db): task_run_id = uuid4() log_filter = LogFilter(task_run_id={"any_": [task_run_id]}, level={"ge_": 20}) sql_filter = log_filter.as_sql_filter() assert sql_filter.compare( sa.and_(db.Log.task_run_id.in_([task_run_id]), db.Log.level >= 20) )
TestLogFilters
python
pytorch__pytorch
torch/fx/experimental/proxy_tensor.py
{ "start": 55063, "end": 55977 }
class ____(TorchFunctionMode): def __init__(self, tracer: _ProxyTracer) -> None: self.tracer = tracer def __torch_function__( self, func: OpOverload, types: tuple[torch._C._TensorMeta, ...], args: tuple[object, ...] = (), kwargs: Optional[dict[str, object]] = None, ) -> object: kwargs = kwargs or {} # pyrefly: ignore [bad-assignment] self.tracer.torch_fn_metadata = func self.tracer.torch_fn_counts[func] = self.tracer.torch_fn_counts.get(func, 0) + 1 return func(*args, **kwargs) _temp_remove_metadata_torch_function_mode = _make_temp_remove_mode_context_manager( TorchFunctionMetadataMode ) # This mode is **only** used for pre_dispatch tracing. # In particular, we need to make sure that autograd/autocast API's # that do not desugar into dispatcher operators stay in the graph.
TorchFunctionMetadataMode
python
networkx__networkx
networkx/algorithms/planarity.py
{ "start": 5997, "end": 6978 }
class ____: """Represents a different constraint between two intervals. The edges in the left interval must have a different orientation than the one in the right interval. """ def __init__(self, left=Interval(), right=Interval()): self.left = left self.right = right def swap(self): """Swap left and right intervals""" temp = self.left self.left = self.right self.right = temp def lowest(self, planarity_state): """Returns the lowest lowpoint of a conflict pair""" if self.left.empty(): return planarity_state.lowpt[self.right.low] if self.right.empty(): return planarity_state.lowpt[self.left.low] return min( planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low] ) def top_of_stack(l): """Returns the element on top of the stack.""" if not l: return None return l[-1]
ConflictPair
python
celery__celery
t/smoke/tests/test_canvas.py
{ "start": 945, "end": 2902 }
class ____: def test_sanity(self, celery_setup: CeleryTestSetup): queue = celery_setup.worker.worker_queue sig = chain( identity.si("chain_task1").set(queue=queue), identity.si("chain_task2").set(queue=queue), ) | identity.si("test_chain").set(queue=queue) res = sig.apply_async() assert res.get(timeout=RESULT_TIMEOUT) == "test_chain" def test_chain_gets_last_task_id_with_failing_tasks_in_chain(self, celery_setup: CeleryTestSetup): """https://github.com/celery/celery/issues/8786""" queue = celery_setup.worker.worker_queue sig = chain( identity.si("start").set(queue=queue), group( identity.si("a").set(queue=queue), fail.si().set(queue=queue), ), identity.si("break").set(queue=queue), identity.si("end").set(queue=queue), ) res = sig.apply_async() celery_setup.worker.assert_log_does_not_exist("ValueError: task_id must not be empty. Got None instead.") with pytest.raises(ExpectedException): res.get(timeout=RESULT_TIMEOUT) def test_upgrade_to_chord_inside_chains(self, celery_setup: CeleryTestSetup): redis_key = str(uuid.uuid4()) queue = celery_setup.worker.worker_queue group1 = group(redis_echo.si("a", redis_key), redis_echo.si("a", redis_key)) group2 = group(redis_echo.si("a", redis_key), redis_echo.si("a", redis_key)) chord1 = group1 | group2 chain1 = chain(chord1, (redis_echo.si("a", redis_key) | redis_echo.si("b", redis_key).set(queue=queue))) chain1.apply_async(queue=queue).get(timeout=RESULT_TIMEOUT) redis_connection = get_redis_connection() actual = redis_connection.lrange(redis_key, 0, -1) assert actual.count(b"a") == 5 assert actual.count(b"b") == 1 redis_connection.delete(redis_key)
test_chain
python
django__django
django/db/models/functions/mixins.py
{ "start": 1981, "end": 2382 }
class ____: def _resolve_output_field(self): source_fields = self.get_source_fields() if any(isinstance(s, DecimalField) for s in source_fields): return DecimalField() if any(isinstance(s, IntegerField) for s in source_fields): return FloatField() return super()._resolve_output_field() if source_fields else FloatField()
NumericOutputFieldMixin
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 37158, "end": 44948 }
class ____(Node): # Item in a function declaration argument list. # # base_type CBaseTypeNode # declarator CDeclaratorNode # not_none boolean Tagged with 'not None' # or_none boolean Tagged with 'or None' # accept_none boolean Resolved boolean for not_none/or_none # default ExprNode or None # default_value PyObjectConst constant for default value # annotation ExprNode or None Py3 function arg annotation # is_self_arg boolean Is the "self" arg of an extension type method # is_type_arg boolean Is the "class" arg of an extension type classmethod # kw_only boolean Is a keyword-only argument # is_dynamic boolean Non-literal arg stored inside CyFunction # pos_only boolean Is a positional-only argument # type_from_annotation boolean Was the type deduced from an annotation # # name_cstring property that converts the name to a cstring taking care of unicode # and quoting it # defaults_class_key None or string Name used to lookup this arg in the defaults class child_attrs = ["base_type", "declarator", "default", "annotation"] outer_attrs = ["default", "annotation"] is_self_arg = 0 is_type_arg = 0 is_generic = 1 is_special_method_optional = False kw_only = 0 pos_only = 0 not_none = 0 or_none = 0 type = None name_declarator = None default_value = None annotation = None is_dynamic = 0 defaults_class_key = None type_from_annotation = False def declared_name(self): return self.declarator.declared_name() @property def name_cstring(self): return self.name.as_c_string_literal() @property def hdr_cname(self): # done lazily - needs self.entry to be set to get the class-mangled # name, which means it has to be generated relatively late if self.needs_conversion: return punycodify_name(Naming.arg_prefix + self.entry.name) else: return punycodify_name(Naming.var_prefix + self.entry.name) def analyse(self, env, nonempty=0, is_self_arg=False): if is_self_arg: self.base_type.is_self_arg = self.is_self_arg = is_self_arg if self.type is not None: return self.name_declarator, self.type # The parser may misinterpret names as types. We fix that here. if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '': if nonempty: if self.base_type.is_basic_c_type: # char, short, long called "int" type = self.base_type.analyse(env, could_be_name=True) arg_name = type.empty_declaration_code() else: arg_name = self.base_type.name self.declarator.name = EncodedString(arg_name) self.base_type.name = None self.base_type.is_basic_c_type = False could_be_name = True else: could_be_name = False self.base_type.is_arg = True base_type = self.base_type.analyse(env, could_be_name=could_be_name) base_arg_name = getattr(self.base_type, 'arg_name', None) if base_arg_name: self.declarator.name = base_arg_name # The parser is unable to resolve the ambiguity of [] as part of the # type (e.g. in buffers) or empty declarator (as with arrays). # This is only arises for empty multi-dimensional arrays. if (base_type.is_array and isinstance(self.base_type, TemplatedTypeNode) and isinstance(self.declarator, CArrayDeclaratorNode)): declarator = self.declarator while isinstance(declarator.base, CArrayDeclaratorNode): declarator = declarator.base declarator.base = self.base_type.array_declarator base_type = base_type.base_type # inject type declaration from annotations # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis if (self.annotation and env and env.directives['annotation_typing'] # CSimpleBaseTypeNode has a name attribute; CAnalysedBaseTypeNode # (and maybe other options) doesn't and getattr(self.base_type, "name", None) is None): arg_type = self.inject_type_from_annotations(env) if arg_type is not None: base_type = arg_type return self.declarator.analyse(base_type, env, nonempty=nonempty) def inject_type_from_annotations(self, env): annotation = self.annotation if not annotation: return None modifiers, arg_type = annotation.analyse_type_annotation(env, assigned_value=self.default) if arg_type is not None: self.base_type = CAnalysedBaseTypeNode( annotation.pos, type=arg_type, is_arg=True) if arg_type: if "typing.Optional" in modifiers: # "x: Optional[...]" => explicitly allow 'None' arg_type = arg_type.resolve() if arg_type and not arg_type.can_be_optional(): # We probably already reported this as "cannot be applied to non-Python type". # error(annotation.pos, "Only Python type arguments can use typing.Optional[...]") pass else: self.or_none = True elif arg_type is py_object_type: # exclude ": object" from the None check - None is a generic object. self.or_none = True elif self.default and self.default.is_none and (arg_type.can_be_optional() or arg_type.equivalent_type): # "x: ... = None" => implicitly allow 'None' if not arg_type.can_be_optional(): arg_type = arg_type.equivalent_type if not self.or_none: warning(self.pos, "PEP-484 recommends 'typing.Optional[...]' for arguments that can be None.") self.or_none = True elif not self.or_none and arg_type.can_be_optional(): self.not_none = True if arg_type: self.type_from_annotation = True return arg_type def calculate_default_value_code(self, code): if self.default_value is None: if self.default: if self.default.is_literal: # will not output any code, just assign the result_code self.default.generate_evaluation_code(code) return self.type.cast_code(self.default.result()) self.default_value = code.get_argument_default_const(self.type) return self.default_value def annotate(self, code): if self.default: self.default.annotate(code) def generate_assignment_code(self, code, target=None, overloaded_assignment=False): default = self.default if default is None or default.is_literal: return if target is None: target = self.calculate_default_value_code(code) default.generate_evaluation_code(code) default.make_owned_reference(code) result = default.result() if overloaded_assignment else default.result_as(self.type) code.putln("%s = %s;" % (target, result)) code.put_giveref(default.result(), self.type) default.generate_post_assignment_code(code) default.free_temps(code)
CArgDeclNode
python
ray-project__ray
python/ray/tune/error.py
{ "start": 696, "end": 816 }
class ____(_SubCategoryTuneError): """Error that happens when starting a tune trial.""" pass
_TuneStartTrialError
python
astropy__astropy
astropy/extern/configobj/configobj.py
{ "start": 3302, "end": 4845 }
class ____(object): def build(self, o): if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return list(map(self.build, o.getChildren())) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = next(i) return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = list(map(self.build_Const, o.getChildren())) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real+imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return -self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s # this is supposed to be safe import ast return ast.literal_eval(s)
Builder
python
sanic-org__sanic
sanic/mixins/startup.py
{ "start": 2476, "end": 52748 }
class ____(metaclass=SanicMeta): _app_registry: ClassVar[dict[str, Sanic]] name: str asgi: bool config: Config listeners: dict[str, list[ListenerType[Any]]] state: ApplicationState websocket_enabled: bool multiplexer: WorkerMultiplexer test_mode: ClassVar[bool] start_method: ClassVar[StartMethod] = _default START_METHOD_SET: ClassVar[bool] = False def setup_loop(self) -> None: """Set up the event loop. An internal method that sets up the event loop to uvloop if possible, or a Windows selector loop if on Windows. Returns: None """ if not self.asgi: if self.config.USE_UVLOOP is True or ( isinstance(self.config.USE_UVLOOP, Default) and not OS_IS_WINDOWS ): try_use_uvloop() elif OS_IS_WINDOWS: try_windows_loop() @property def m(self) -> WorkerMultiplexer: """Interface for interacting with the worker processes This is a shortcut for `app.multiplexer`. It is available only in a worker process using the Sanic server. It allows you to interact with the worker processes, such as sending messages and commands. See [Access to the multiplexer](/en/guide/deployment/manager#access-to-the-multiplexer) for more information. Returns: WorkerMultiplexer: The worker multiplexer instance Examples: ```python app.m.restart() # restarts the worker app.m.terminate() # terminates the worker app.m.scale(4) # scales the number of workers to 4 ``` """ # noqa: E501 return self.multiplexer def make_coffee(self, *args, **kwargs): """ Try for yourself! `sanic server:app --coffee` ``` ▄████████▄ ██ ██▀▀▄ ███████████ █ ███████████▄▄▀ ▀███████▀ ``` """ self.state.coffee = True self.run(*args, **kwargs) def run( self, host: Optional[str] = None, port: Optional[int] = None, *, dev: bool = False, debug: bool = False, auto_reload: Optional[bool] = None, version: HTTPVersion = HTTP.VERSION_1, ssl: Union[None, SSLContext, dict, str, list, tuple] = None, sock: Optional[socket] = None, workers: int = 1, protocol: Optional[type[Protocol]] = None, backlog: int = 100, register_sys_signals: bool = True, access_log: Optional[bool] = None, unix: Optional[str] = None, loop: Optional[AbstractEventLoop] = None, reload_dir: Optional[Union[list[str], str]] = None, noisy_exceptions: Optional[bool] = None, motd: bool = True, fast: bool = False, verbosity: int = 0, motd_display: Optional[dict[str, str]] = None, auto_tls: bool = False, single_process: bool = False, ) -> None: """Run the HTTP Server and listen until keyboard interrupt or term signal. On termination, drain connections before closing. .. note:: When you need control over running the Sanic instance, this is the method to use. However, in most cases the preferred method is to use the CLI command: ```sh sanic server:app` ``` If you are using this method to run Sanic, make sure you do the following: 1. Use `if __name__ == "__main__"` to guard the code. 2. Do **NOT** define the app instance inside the `if` block. See [Dynamic Applications](/en/guide/deployment/app-loader) for more information about the second point. Args: host (Optional[str]): Address to host on. port (Optional[int]): Port to host on. dev (bool): Run the server in development mode. debug (bool): Enables debug output (slows server). auto_reload (Optional[bool]): Reload app whenever its source code is changed. Enabled by default in debug mode. version (HTTPVersion): HTTP Version. ssl (Union[None, SSLContext, dict, str, list, tuple]): SSLContext, or location of certificate and key for SSL encryption of worker(s). sock (Optional[socket]): Socket for the server to accept connections from. workers (int): Number of processes received before it is respected. protocol (Optional[Type[Protocol]]): Subclass of asyncio Protocol class. backlog (int): A number of unaccepted connections that the system will allow before refusing new connections. register_sys_signals (bool): Register SIG* events. access_log (Optional[bool]): Enables writing access logs (slows server). unix (Optional[str]): Unix socket to listen on instead of TCP port. loop (Optional[AbstractEventLoop]): AsyncIO event loop. reload_dir (Optional[Union[List[str], str]]): Directory to watch for code changes, if auto_reload is True. noisy_exceptions (Optional[bool]): Log exceptions that are normally considered to be quiet/silent. motd (bool): Display Message of the Day. fast (bool): Enable fast mode. verbosity (int): Verbosity level. motd_display (Optional[Dict[str, str]]): Customize Message of the Day display. auto_tls (bool): Enable automatic TLS certificate handling. single_process (bool): Enable single process mode. Returns: None Raises: RuntimeError: Raised when attempting to serve HTTP/3 as a secondary server. RuntimeError: Raised when attempting to use both `fast` and `workers`. RuntimeError: Raised when attempting to use `single_process` with `fast`, `workers`, or `auto_reload`. TypeError: Raised when attempting to use `loop` with `create_server`. ValueError: Raised when `PROXIES_COUNT` is negative. Examples: ```python from sanic import Sanic, Request, json app = Sanic("TestApp") @app.get("/") async def handler(request: Request): return json({"foo": "bar"}) if __name__ == "__main__": app.run(port=9999, dev=True) ``` """ # noqa: E501 self.prepare( host=host, port=port, dev=dev, debug=debug, auto_reload=auto_reload, version=version, ssl=ssl, sock=sock, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, access_log=access_log, unix=unix, loop=loop, reload_dir=reload_dir, noisy_exceptions=noisy_exceptions, motd=motd, fast=fast, verbosity=verbosity, motd_display=motd_display, auto_tls=auto_tls, single_process=single_process, ) if single_process: serve = self.__class__.serve_single else: serve = self.__class__.serve serve(primary=self) # type: ignore def prepare( self, host: Optional[str] = None, port: Optional[int] = None, *, dev: bool = False, debug: bool = False, auto_reload: Optional[bool] = None, version: HTTPVersion = HTTP.VERSION_1, ssl: Union[None, SSLContext, dict, str, list, tuple] = None, sock: Optional[socket] = None, workers: int = 1, protocol: Optional[type[Protocol]] = None, backlog: int = 100, register_sys_signals: bool = True, access_log: Optional[bool] = None, unix: Optional[str] = None, loop: Optional[AbstractEventLoop] = None, reload_dir: Optional[Union[list[str], str]] = None, noisy_exceptions: Optional[bool] = None, motd: bool = True, fast: bool = False, verbosity: int = 0, motd_display: Optional[dict[str, str]] = None, coffee: bool = False, auto_tls: bool = False, single_process: bool = False, ) -> None: """Prepares one or more Sanic applications to be served simultaneously. This low-level API is typically used when you need to run multiple Sanic applications at the same time. Once prepared, `Sanic.serve()` should be called in the `if __name__ == "__main__"` block. .. note:: "Preparing" and "serving" with this function is equivalent to using `app.run` for a single instance. This should only be used when running multiple applications at the same time. Args: host (Optional[str], optional): Hostname to listen on. Defaults to `None`. port (Optional[int], optional): Port to listen on. Defaults to `None`. dev (bool, optional): Development mode. Defaults to `False`. debug (bool, optional): Debug mode. Defaults to `False`. auto_reload (Optional[bool], optional): Auto reload feature. Defaults to `None`. version (HTTPVersion, optional): HTTP version to use. Defaults to `HTTP.VERSION_1`. ssl (Union[None, SSLContext, dict, str, list, tuple], optional): SSL configuration. Defaults to `None`. sock (Optional[socket], optional): Socket to bind to. Defaults to `None`. workers (int, optional): Number of worker processes. Defaults to `1`. protocol (Optional[Type[Protocol]], optional): Custom protocol class. Defaults to `None`. backlog (int, optional): Maximum number of pending connections. Defaults to `100`. register_sys_signals (bool, optional): Register system signals. Defaults to `True`. access_log (Optional[bool], optional): Access log. Defaults to `None`. unix (Optional[str], optional): Unix socket. Defaults to `None`. loop (Optional[AbstractEventLoop], optional): Event loop. Defaults to `None`. reload_dir (Optional[Union[List[str], str]], optional): Reload directory. Defaults to `None`. noisy_exceptions (Optional[bool], optional): Display exceptions. Defaults to `None`. motd (bool, optional): Display message of the day. Defaults to `True`. fast (bool, optional): Fast mode. Defaults to `False`. verbosity (int, optional): Verbosity level. Defaults to `0`. motd_display (Optional[Dict[str, str]], optional): Custom MOTD display. Defaults to `None`. coffee (bool, optional): Coffee mode. Defaults to `False`. auto_tls (bool, optional): Auto TLS. Defaults to `False`. single_process (bool, optional): Single process mode. Defaults to `False`. Raises: RuntimeError: Raised when attempting to serve HTTP/3 as a secondary server. RuntimeError: Raised when attempting to use both `fast` and `workers`. RuntimeError: Raised when attempting to use `single_process` with `fast`, `workers`, or `auto_reload`. TypeError: Raised when attempting to use `loop` with `create_server`. ValueError: Raised when `PROXIES_COUNT` is negative. Examples: ```python if __name__ == "__main__": app.prepare() app.serve() ``` """ # noqa: E501 if version == 3 and self.state.server_info: raise RuntimeError( "Serving HTTP/3 instances as a secondary server is " "not supported. There can only be a single HTTP/3 worker " "and it must be the first instance prepared." ) if dev: debug = True auto_reload = True if debug and access_log is None: access_log = True self.state.verbosity = verbosity if not self.state.auto_reload: self.state.auto_reload = bool(auto_reload) if fast and workers != 1: raise RuntimeError("You cannot use both fast=True and workers=X") if single_process and (fast or (workers > 1) or auto_reload): raise RuntimeError( "Single process cannot be run with multiple workers " "or auto-reload" ) if register_sys_signals is False and not single_process: raise RuntimeError( "Cannot run Sanic.serve with register_sys_signals=False. " "Use Sanic.serve_single." ) if motd_display: self.config.MOTD_DISPLAY.update(motd_display) if reload_dir: if isinstance(reload_dir, str): reload_dir = [reload_dir] for directory in reload_dir: direc = Path(directory) if not direc.is_dir(): logger.warning( f"Directory {directory} could not be located" ) self.state.reload_dirs.add(Path(directory)) if loop is not None: raise TypeError( "loop is not a valid argument. To use an existing loop, " "change to create_server().\nSee more: " "https://sanic.readthedocs.io/en/latest/sanic/deploying.html" "#asynchronous-support" ) if sock is None: host, port = self.get_address(host, port, version, auto_tls) if protocol is None: protocol = ( WebSocketProtocol if self.websocket_enabled else HttpProtocol ) # Set explicitly passed configuration values for attribute, value in { "ACCESS_LOG": access_log, "AUTO_RELOAD": auto_reload, "MOTD": motd, "NOISY_EXCEPTIONS": noisy_exceptions, }.items(): if value is not None: setattr(self.config, attribute, value) if fast: self.state.fast = True try: workers = len(os.sched_getaffinity(0)) except AttributeError: # no cov workers = os.cpu_count() or 1 if coffee: self.state.coffee = True server_settings = self._helper( host=host, port=port, debug=debug, version=version, ssl=ssl, sock=sock, unix=unix, workers=workers, protocol=protocol, backlog=backlog, register_sys_signals=register_sys_signals, auto_tls=auto_tls, ) self.state.server_info.append( ApplicationServerInfo(settings=server_settings) ) # if self.config.USE_UVLOOP is True or ( # self.config.USE_UVLOOP is _default and not OS_IS_WINDOWS # ): # try_use_uvloop() async def create_server( self, host: Optional[str] = None, port: Optional[int] = None, *, debug: bool = False, ssl: Union[None, SSLContext, dict, str, list, tuple] = None, sock: Optional[socket] = None, protocol: Optional[type[Protocol]] = None, backlog: int = 100, access_log: Optional[bool] = None, unix: Optional[str] = None, return_asyncio_server: bool = True, asyncio_server_kwargs: Optional[dict[str, Any]] = None, noisy_exceptions: Optional[bool] = None, ) -> Optional[AsyncioServer]: """ Low level API for creating a Sanic Server instance. This method will create a Sanic Server instance, but will not start it. This is useful for integrating Sanic into other systems. But, you should take caution when using it as it is a low level API and does not perform any of the lifecycle events. .. note:: This does not support multiprocessing and is not the preferred way to run a Sanic application. Proceed with caution. You will need to start the server yourself as shown in the example below. You are responsible for the lifecycle of the server, including app startup using `await app.startup()`. No events will be triggered for you, so you will need to trigger them yourself if wanted. Args: host (Optional[str]): Address to host on. port (Optional[int]): Port to host on. debug (bool): Enables debug output (slows server). ssl (Union[None, SSLContext, dict, str, list, tuple]): SSLContext, or location of certificate and key for SSL encryption of worker(s). sock (Optional[socket]): Socket for the server to accept connections from. protocol (Optional[Type[Protocol]]): Subclass of `asyncio.Protocol` class. backlog (int): Number of unaccepted connections that the system will allow before refusing new connections. access_log (Optional[bool]): Enables writing access logs (slows server). return_asyncio_server (bool): _DEPRECATED_ asyncio_server_kwargs (Optional[Dict[str, Any]]): Key-value arguments for asyncio/uvloop `create_server` method. noisy_exceptions (Optional[bool]): Log exceptions that are normally considered to be quiet/silent. Returns: Optional[AsyncioServer]: AsyncioServer if `return_asyncio_server` is `True` else `None`. Examples: ```python import asyncio import uvloop from sanic import Sanic, response app = Sanic("Example") @app.route("/") async def test(request): return response.json({"answer": "42"}) async def main(): server = await app.create_server() await server.startup() await server.serve_forever() if __name__ == "__main__": asyncio.set_event_loop(uvloop.new_event_loop()) asyncio.run(main()) ``` """ if sock is None: host, port = host, port = self.get_address(host, port) if protocol is None: protocol = ( WebSocketProtocol if self.websocket_enabled else HttpProtocol ) # Set explicitly passed configuration values for attribute, value in { "ACCESS_LOG": access_log, "NOISY_EXCEPTIONS": noisy_exceptions, }.items(): if value is not None: setattr(self.config, attribute, value) if not return_asyncio_server: return_asyncio_server = True deprecation( "The `return_asyncio_server` argument is deprecated and " "ignored. It will be removed in v24.3.", 24.3, ) server_settings = self._helper( host=host, port=port, debug=debug, ssl=ssl, sock=sock, unix=unix, loop=get_event_loop(), protocol=protocol, backlog=backlog, run_async=return_asyncio_server, ) if not isinstance(self.config.USE_UVLOOP, Default): error_logger.warning( "You are trying to change the uvloop configuration, but " "this is only effective when using the run(...) method. " "When using the create_server(...) method Sanic will use " "the already existing loop." ) main_start = server_settings.pop("main_start", None) main_stop = server_settings.pop("main_stop", None) if main_start or main_stop: logger.warning( "Listener events for the main process are not available " "with create_server()" ) return await serve( asyncio_server_kwargs=asyncio_server_kwargs, **server_settings ) def stop(self, terminate: bool = True, unregister: bool = False) -> None: """This kills the Sanic server, cleaning up after itself. Args: terminate (bool): Force kill all requests immediately without allowing them to finish processing. unregister (bool): Unregister the app from the global registry. Returns: None """ if terminate and hasattr(self, "multiplexer"): self.multiplexer.terminate() if self.state.stage is not ServerStage.STOPPED: self.shutdown_tasks(timeout=0) # type: ignore for task in all_tasks(): with suppress(AttributeError): if task.get_name() == "RunServer": task.cancel() get_event_loop().stop() if unregister: self.__class__.unregister_app(self) # type: ignore def _helper( self, host: Optional[str] = None, port: Optional[int] = None, debug: bool = False, version: HTTPVersion = HTTP.VERSION_1, ssl: Union[None, SSLContext, dict, str, list, tuple] = None, sock: Optional[socket] = None, unix: Optional[str] = None, workers: int = 1, loop: Optional[AbstractEventLoop] = None, protocol: type[Protocol] = HttpProtocol, backlog: int = 100, register_sys_signals: bool = True, run_async: bool = False, auto_tls: bool = False, ) -> dict[str, Any]: """Helper function used by `run` and `create_server`.""" if self.config.PROXIES_COUNT and self.config.PROXIES_COUNT < 0: raise ValueError( "PROXIES_COUNT cannot be negative. " "https://sanic.readthedocs.io/en/latest/sanic/config.html" "#proxy-configuration" ) if not self.state.is_debug: self.state.mode = Mode.DEBUG if debug else Mode.PRODUCTION setup_logging(self.state.is_debug, self.config.NO_COLOR) if isinstance(version, int): version = HTTP(version) ssl = process_to_context(ssl) if version is HTTP.VERSION_3 or auto_tls: if TYPE_CHECKING: self = cast(Sanic, self) ssl = get_ssl_context(self, ssl) self.state.host = host or "" self.state.port = port or 0 self.state.workers = workers self.state.ssl = ssl self.state.unix = unix self.state.sock = sock server_settings = { "protocol": protocol, "host": host, "port": port, "version": version, "sock": sock, "unix": unix, "ssl": ssl, "app": self, "signal": ServerSignal(), "loop": loop, "register_sys_signals": register_sys_signals, "backlog": backlog, } self.motd(server_settings=server_settings) if ( is_atty() and not self.state.is_debug and not os.environ.get("SANIC_IGNORE_PRODUCTION_WARNING") ): error_logger.warning( f"{Colors.YELLOW}Sanic is running in PRODUCTION mode. " "Consider using '--debug' or '--dev' while actively " f"developing your application.{Colors.END}" ) # Register start/stop events for event_name, settings_name, reverse in ( ("main_process_start", "main_start", False), ("main_process_stop", "main_stop", True), ): listeners = self.listeners[event_name].copy() if reverse: listeners.reverse() # Prepend sanic to the arguments when listeners are triggered listeners = [partial(listener, self) for listener in listeners] server_settings[settings_name] = listeners # type: ignore if run_async: server_settings["run_async"] = True return server_settings def motd( self, server_settings: Optional[dict[str, Any]] = None, ) -> None: """Outputs the message of the day (MOTD). It generally can only be called once per process, and is usually called by the `run` method in the main process. Args: server_settings (Optional[Dict[str, Any]], optional): Settings for the server. Defaults to `None`. Returns: None """ if ( os.environ.get("SANIC_WORKER_NAME") or os.environ.get("SANIC_MOTD_OUTPUT") or os.environ.get("SANIC_WORKER_PROCESS") or os.environ.get("SANIC_SERVER_RUNNING") ): return serve_location = self.get_server_location(server_settings) if self.config.MOTD: logo = get_logo(coffee=self.state.coffee) display, extra = self.get_motd_data(server_settings) MOTD.output(logo, serve_location, display, extra) def get_motd_data( self, server_settings: Optional[dict[str, Any]] = None ) -> tuple[dict[str, Any], dict[str, Any]]: """Retrieves the message of the day (MOTD) data. Args: server_settings (Optional[Dict[str, Any]], optional): Settings for the server. Defaults to `None`. Returns: Tuple[Dict[str, Any], Dict[str, Any]]: A tuple containing two dictionaries with the relevant MOTD data. """ mode = [f"{self.state.mode},"] if self.state.fast: mode.append("goin' fast") if self.state.asgi: mode.append("ASGI") else: if self.state.workers == 1: mode.append("single worker") else: mode.append(f"w/ {self.state.workers} workers") if server_settings: server = ", ".join( ( self.state.server, server_settings["version"].display(), # type: ignore ) ) else: server = "ASGI" if self.asgi else "unknown" # type: ignore display = { "app": self.name, "mode": " ".join(mode), "server": server, "python": platform.python_version(), "platform": platform.platform(), } extra = {} if self.config.AUTO_RELOAD: reload_display = "enabled" if self.state.reload_dirs: reload_display += ", ".join( [ "", *( str(path.absolute()) for path in self.state.reload_dirs ), ] ) display["auto-reload"] = reload_display packages = [] for package_name in SANIC_PACKAGES: module_name = package_name.replace("-", "_") try: module = import_module(module_name) packages.append(f"{package_name}=={module.__version__}") # type: ignore except ImportError: # no cov ... if packages: display["packages"] = ", ".join(packages) if self.config.MOTD_DISPLAY: extra.update(self.config.MOTD_DISPLAY) return display, extra @property def serve_location(self) -> str: """Retrieve the server location. Returns: str: The server location. """ try: server_settings = self.state.server_info[0].settings return self.get_server_location(server_settings) except IndexError: location = "ASGI" if self.asgi else "unknown" # type: ignore return f"http://<{location}>" @staticmethod def get_server_location( server_settings: Optional[dict[str, Any]] = None, ) -> str: """Using the server settings, retrieve the server location. Args: server_settings (Optional[Dict[str, Any]], optional): Settings for the server. Defaults to `None`. Returns: str: The server location. """ serve_location = "" proto = "http" if not server_settings: return serve_location host = server_settings["host"] port = server_settings["port"] if server_settings.get("ssl") is not None: proto = "https" if server_settings.get("unix"): serve_location = f"{server_settings['unix']} {proto}://..." elif server_settings.get("sock"): host, port, *_ = server_settings["sock"].getsockname() if not serve_location and host and port: # colon(:) is legal for a host only in an ipv6 address display_host = f"[{host}]" if ":" in host else host serve_location = f"{proto}://{display_host}:{port}" return serve_location @staticmethod def get_address( host: Optional[str], port: Optional[int], version: HTTPVersion = HTTP.VERSION_1, auto_tls: bool = False, ) -> tuple[str, int]: """Retrieve the host address and port, with default values based on the given parameters. Args: host (Optional[str]): Host IP or FQDN for the service to use. Defaults to `"127.0.0.1"`. port (Optional[int]): Port number. Defaults to `8443` if version is 3 or `auto_tls=True`, else `8000` version (HTTPVersion, optional): HTTP Version. Defaults to `HTTP.VERSION_1` (HTTP/1.1). auto_tls (bool, optional): Automatic TLS flag. Defaults to `False`. Returns: Tuple[str, int]: Tuple containing the host and port """ # noqa: E501 host = host or "127.0.0.1" port = port or (8443 if (version == 3 or auto_tls) else 8000) return host, port @classmethod def should_auto_reload(cls) -> bool: """Check if any applications have auto-reload enabled. Returns: bool: `True` if any applications have auto-reload enabled, else `False`. """ return any(app.state.auto_reload for app in cls._app_registry.values()) @classmethod def _get_startup_method(cls) -> str: return ( cls.start_method if not isinstance(cls.start_method, Default) else "spawn" ) @classmethod def _set_startup_method(cls) -> None: if cls.START_METHOD_SET and not cls.test_mode: return method = cls._get_startup_method() try: set_start_method(method, force=cls.test_mode) except RuntimeError: ctx = get_context() actual = ctx.get_start_method() if actual != method: raise RuntimeError( f"Start method '{method}' was requested, but '{actual}' " "was already set.\nFor more information, see: " "https://sanic.dev/en/guide/running/manager.html#overcoming-a-coderuntimeerrorcode" ) from None else: raise cls.START_METHOD_SET = True @classmethod def _get_context(cls) -> BaseContext: method = cls._get_startup_method() logger.debug("Creating multiprocessing context using '%s'", method) actual = get_start_method() if method != actual: raise RuntimeError( f"Start method '{method}' was requested, but '{actual}' " "was already set.\nFor more information, see: " "https://sanic.dev/en/guide/running/manager.html#overcoming-a-coderuntimeerrorcode" ) from None return get_context() @classmethod def serve( cls, primary: Optional[Sanic] = None, *, app_loader: Optional[AppLoader] = None, factory: Optional[Callable[[], Sanic]] = None, ) -> None: """Serve one or more Sanic applications. This is the main entry point for running Sanic applications. It should be called in the `if __name__ == "__main__"` block. Args: primary (Optional[Sanic], optional): The primary Sanic application to serve. Defaults to `None`. app_loader (Optional[AppLoader], optional): An AppLoader instance to use for loading applications. Defaults to `None`. factory (Optional[Callable[[], Sanic]], optional): A factory function to use for loading applications. Defaults to `None`. Raises: RuntimeError: Raised when no applications are found. RuntimeError: Raised when no server information is found for the primary application. RuntimeError: Raised when attempting to use `loop` with `create_server`. RuntimeError: Raised when attempting to use `single_process` with `fast`, `workers`, or `auto_reload`. RuntimeError: Raised when attempting to serve HTTP/3 as a secondary server. RuntimeError: Raised when attempting to use both `fast` and `workers`. TypeError: Raised when attempting to use `loop` with `create_server`. ValueError: Raised when `PROXIES_COUNT` is negative. Examples: ```python if __name__ == "__main__": app.prepare() Sanic.serve() ``` """ cls._set_startup_method() os.environ["SANIC_MOTD_OUTPUT"] = "true" apps = list(cls._app_registry.values()) if factory: primary = factory() else: if not primary: if app_loader: primary = app_loader.load() if not primary: try: primary = apps[0] except IndexError: raise RuntimeError( "Did not find any applications." ) from None # This exists primarily for unit testing if not primary.state.server_info: # no cov for app in apps: app.state.server_info.clear() return try: primary_server_info = primary.state.server_info[0] except IndexError: raise RuntimeError( f"No server information found for {primary.name}. Perhaps you " "need to run app.prepare(...)?" ) from None socks = [] sync_manager = Manager() worker_state: Mapping[str, Any] = {"state": "NONE"} setup_ext(primary) exit_code = 0 try: primary_server_info.settings.pop("main_start", None) primary_server_info.settings.pop("main_stop", None) main_start = primary.listeners.get("main_process_start") main_stop = primary.listeners.get("main_process_stop") app = primary_server_info.settings.pop("app") app.setup_loop() loop = new_event_loop() trigger_events(main_start, loop, primary) socks = [ sock for sock in [ configure_socket(server_info.settings) for app in apps for server_info in app.state.server_info ] if sock ] primary_server_info.settings["run_multiple"] = True monitor_sub, monitor_pub = Pipe(True) worker_state = sync_manager.dict() kwargs: dict[str, Any] = { **primary_server_info.settings, "monitor_publisher": monitor_pub, "worker_state": worker_state, } if not app_loader: if factory: app_loader = AppLoader(factory=factory) else: app_loader = AppLoader( factory=partial(cls.get_app, app.name) # type: ignore ) kwargs["app_name"] = app.name kwargs["app_loader"] = app_loader kwargs["server_info"] = {} kwargs["passthru"] = { "auto_reload": app.auto_reload, "state": { "verbosity": app.state.verbosity, "mode": app.state.mode, }, "config": { "ACCESS_LOG": app.config.ACCESS_LOG, "NOISY_EXCEPTIONS": app.config.NOISY_EXCEPTIONS, }, "shared_ctx": app.shared_ctx.__dict__, } for app in apps: kwargs["server_info"][app.name] = [] for server_info in app.state.server_info: server_info.settings = { k: v for k, v in server_info.settings.items() if k not in ("main_start", "main_stop", "app", "ssl") } kwargs["server_info"][app.name].append(server_info) ssl = kwargs.get("ssl") if isinstance(ssl, SanicSSLContext): kwargs["ssl"] = ssl.sanic manager = WorkerManager( primary.state.workers, worker_serve, kwargs, cls._get_context(), (monitor_pub, monitor_sub), worker_state, ) if cls.should_auto_reload(): reload_dirs: set[Path] = primary.state.reload_dirs.union( *(app.state.reload_dirs for app in apps) ) reloader = Reloader(monitor_pub, 0, reload_dirs, app_loader) manager.manage("Reloader", reloader, {}, transient=False) inspector = None if primary.config.INSPECTOR: display, extra = primary.get_motd_data() packages = [ pkg.strip() for pkg in display["packages"].split(",") ] module = import_module("sanic") sanic_version = f"sanic=={module.__version__}" # type: ignore app_info = { **display, "packages": [sanic_version, *packages], "extra": extra, } inspector = primary.inspector_class( monitor_pub, app_info, worker_state, primary.config.INSPECTOR_HOST, primary.config.INSPECTOR_PORT, primary.config.INSPECTOR_API_KEY, primary.config.INSPECTOR_TLS_KEY, primary.config.INSPECTOR_TLS_CERT, ) manager.manage("Inspector", inspector, {}, transient=False) primary._inspector = inspector primary._manager = manager ready = primary.listeners["main_process_ready"] trigger_events(ready, loop, primary) manager.run() except ServerKilled: exit_code = 1 except BaseException: kwargs = primary_server_info.settings error_logger.exception( "Experienced exception while trying to serve" ) raise finally: logger.info("Server Stopped") for app in apps: app.state.server_info.clear() app.router.reset() app.signal_router.reset() for sock in socks: try: sock.shutdown(SHUT_RDWR) except OSError: ... sock.close() socks = [] trigger_events(main_stop, loop, primary) loop.close() cls._cleanup_env_vars() cls._cleanup_apps() limit = 100 while cls._get_process_states(worker_state): sleep(0.1) limit -= 1 if limit <= 0: error_logger.warning( "Worker shutdown timed out. " "Some processes may still be running." ) break sync_manager.shutdown() unix = kwargs.get("unix") if unix: remove_unix_socket(unix) logger.debug(get_goodbye()) if exit_code: os._exit(exit_code) @staticmethod def _get_process_states(worker_state) -> list[str]: return [ state for s in worker_state.values() if ( (state := s.get("state")) and state not in ("TERMINATED", "FAILED", "COMPLETED", "NONE") ) ] @classmethod def serve_single(cls, primary: Optional[Sanic] = None) -> None: """Serve a single process of a Sanic application. Similar to `serve`, but only serves a single process. When used, certain features are disabled, such as `fast`, `workers`, `multiplexer`, `auto_reload`, and the Inspector. It is almost never needed to use this method directly. Instead, you should use the CLI: ```sh sanic app.sanic:app --single-process ``` Or, if you need to do it programmatically, you should use the `single_process` argument of `run`: ```python app.run(single_process=True) ``` Args: primary (Optional[Sanic], optional): The primary Sanic application to serve. Defaults to `None`. Raises: RuntimeError: Raised when no applications are found. RuntimeError: Raised when no server information is found for the primary application. RuntimeError: Raised when attempting to serve HTTP/3 as a secondary server. RuntimeError: Raised when attempting to use both `fast` and `workers`. ValueError: Raised when `PROXIES_COUNT` is negative. """ os.environ["SANIC_MOTD_OUTPUT"] = "true" apps = list(cls._app_registry.values()) if not primary: try: primary = apps[0] except IndexError: raise RuntimeError("Did not find any applications.") # This exists primarily for unit testing if not primary.state.server_info: # no cov for app in apps: app.state.server_info.clear() return primary_server_info = primary.state.server_info[0] primary.before_server_start(partial(primary._start_servers, apps=apps)) kwargs = { k: v for k, v in primary_server_info.settings.items() if k not in ( "main_start", "main_stop", "app", ) } kwargs["app_name"] = primary.name kwargs["app_loader"] = None sock = configure_socket(kwargs) kwargs["server_info"] = {} kwargs["server_info"][primary.name] = [] for server_info in primary.state.server_info: server_info.settings = { k: v for k, v in server_info.settings.items() if k not in ("main_start", "main_stop", "app") } kwargs["server_info"][primary.name].append(server_info) try: worker_serve(monitor_publisher=None, **kwargs) except BaseException: error_logger.exception( "Experienced exception while trying to serve" ) raise finally: logger.info("Server Stopped") for app in apps: app.state.server_info.clear() app.router.reset() app.signal_router.reset() if sock: sock.close() cls._cleanup_env_vars() cls._cleanup_apps() async def _start_servers( self, primary: Sanic, _, apps: list[Sanic], ) -> None: for app in apps: if ( app.name is not primary.name and app.state.workers != primary.state.workers and app.state.server_info ): message = ( f"The primary application {repr(primary)} is running " f"with {primary.state.workers} worker(s). All " "application instances will run with the same number. " f"You requested {repr(app)} to run with " f"{app.state.workers} worker(s), which will be ignored " "in favor of the primary application." ) if is_atty(): message = "".join( [ Colors.YELLOW, message, Colors.END, ] ) error_logger.warning(message, exc_info=True) for server_info in app.state.server_info: if server_info.stage is not ServerStage.SERVING: app.state.primary = False handlers = [ *server_info.settings.pop("main_start", []), *server_info.settings.pop("main_stop", []), ] if handlers: # no cov error_logger.warning( f"Sanic found {len(handlers)} listener(s) on " "secondary applications attached to the main " "process. These will be ignored since main " "process listeners can only be attached to your " "primary application: " f"{repr(primary)}" ) if not server_info.settings["loop"]: server_info.settings["loop"] = get_running_loop() serve_args: dict[str, Any] = { **server_info.settings, "run_async": True, "reuse_port": bool(primary.state.workers - 1), } if "app" not in serve_args: serve_args["app"] = app try: server_info.server = await serve(**serve_args) except OSError as e: # no cov first_message = ( "An OSError was detected on startup. " "The encountered error was: " ) second_message = str(e) if is_atty(): message_parts = [ Colors.YELLOW, first_message, Colors.RED, second_message, Colors.END, ] else: message_parts = [first_message, second_message] message = "".join(message_parts) error_logger.warning(message, exc_info=True) continue primary.add_task( self._run_server(app, server_info), name="RunServer" ) async def _run_server( self, app: StartupMixin, server_info: ApplicationServerInfo, ) -> None: # no cov try: # We should never get to this point without a server # This is primarily to keep mypy happy if not server_info.server: # no cov raise RuntimeError("Could not locate AsyncioServer") if app.state.stage is ServerStage.STOPPED: server_info.stage = ServerStage.SERVING await server_info.server.startup() await server_info.server.before_start() await server_info.server.after_start() await server_info.server.serve_forever() except CancelledError: # We should never get to this point without a server # This is primarily to keep mypy happy if not server_info.server: # no cov raise RuntimeError("Could not locate AsyncioServer") await server_info.server.before_stop() await server_info.server.close() await server_info.server.after_stop() finally: server_info.stage = ServerStage.STOPPED server_info.server = None @staticmethod def _cleanup_env_vars(): variables = ( "SANIC_RELOADER_PROCESS", "SANIC_IGNORE_PRODUCTION_WARNING", "SANIC_WORKER_NAME", "SANIC_MOTD_OUTPUT", "SANIC_WORKER_PROCESS", "SANIC_SERVER_RUNNING", ) for var in variables: try: del os.environ[var] except KeyError: ... @classmethod def _cleanup_apps(cls): for app in cls._app_registry.values(): app.state.server_info.clear() app.router.reset() app.signal_router.reset()
StartupMixin
python
ansible__ansible
test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol2/plugins/doc_fragments/version_added.py
{ "start": 156, "end": 251 }
class ____(object): DOCUMENTATION = r""" options: {} version_added: 1.0.0 """
ModuleDocFragment
python
scipy__scipy
scipy/linalg/tests/test_batch.py
{ "start": 843, "end": 28552 }
class ____: # Test batch support for most linalg functions def batch_test(self, fun, arrays, *, core_dim=2, n_out=1, kwargs=None, dtype=None, broadcast=True, check_kwargs=True): # Check that all outputs of batched call `fun(A, **kwargs)` are the same # as if we loop over the separate vectors/matrices in `A`. Also check # that `fun` accepts `A` by position or keyword and that results are # identical. This is important because the name of the array argument # is manually specified to the decorator, and it's easy to mess up. # However, this makes it hard to test positional arguments passed # after the array, so we test that separately for a few functions to # make sure the decorator is working as it should. kwargs = {} if kwargs is None else kwargs parameters = list(inspect.signature(fun).parameters.keys()) arrays = (arrays,) if not isinstance(arrays, tuple) else arrays # Identical results when passing argument by keyword or position res2 = fun(*arrays, **kwargs) if check_kwargs: res1 = fun(**dict(zip(parameters, arrays)), **kwargs) for out1, out2 in zip(res1, res2): # even a single array is iterable... np.testing.assert_equal(out1, out2) # Check results vs looping over res = (res2,) if n_out == 1 else res2 # This is not the general behavior (only batch dimensions get # broadcasted by the decorator) but it's easier for testing. if broadcast: arrays = np.broadcast_arrays(*arrays) batch_shape = arrays[0].shape[:-core_dim] for i in range(batch_shape[0]): for j in range(batch_shape[1]): arrays_ij = (array[i, j] for array in arrays) ref = fun(*arrays_ij, **kwargs) ref = ((np.asarray(ref),) if n_out == 1 else tuple(np.asarray(refk) for refk in ref)) for k in range(n_out): assert_allclose(res[k][i, j], ref[k]) assert np.shape(res[k][i, j]) == ref[k].shape for k in range(len(ref)): out_dtype = ref[k].dtype if dtype is None else dtype assert res[k].dtype == out_dtype return res2 # return original, non-tuplized result @pytest.mark.parametrize('dtype', floating) def test_expm_cond(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = rng.random((5, 3, 4, 4)).astype(dtype) self.batch_test(linalg.expm_cond, A) @pytest.mark.parametrize('dtype', floating) def test_issymmetric(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_nearly_hermitian((5, 3, 4, 4), dtype, 3e-4, rng) res = self.batch_test(linalg.issymmetric, A, kwargs=dict(atol=1e-3)) assert not np.all(res) # ensure test is not trivial: not all True or False; assert np.any(res) # also confirms that `atol` is passed to issymmetric @pytest.mark.parametrize('dtype', floating) def test_ishermitian(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_nearly_hermitian((5, 3, 4, 4), dtype, 3e-4, rng) res = self.batch_test(linalg.ishermitian, A, kwargs=dict(atol=1e-3)) assert not np.all(res) # ensure test is not trivial: not all True or False; assert np.any(res) # also confirms that `atol` is passed to ishermitian @pytest.mark.parametrize('dtype', floating) def test_diagsvd(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = rng.random((5, 3, 4)).astype(dtype) res1 = self.batch_test(linalg.diagsvd, A, kwargs=dict(M=6, N=4), core_dim=1) # test that `M, N` can be passed by position res2 = linalg.diagsvd(A, 6, 4) np.testing.assert_equal(res1, res2) @pytest.mark.parametrize('fun', [linalg.inv, linalg.sqrtm, linalg.signm, linalg.sinm, linalg.cosm, linalg.tanhm, linalg.sinhm, linalg.coshm, linalg.tanhm, linalg.pinv, linalg.pinvh, linalg.orth]) @pytest.mark.parametrize('dtype', floating) def test_matmat(self, fun, dtype): # matrix in, matrix out rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) # sqrtm can return complex output for real input resulting in i/o type # mismatch. Nudge the eigenvalues to positive side to avoid this. if fun == linalg.sqrtm: A = A + 3*np.eye(4, dtype=dtype) self.batch_test(fun, A) @pytest.mark.parametrize('dtype', floating) def test_null_space(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 6), dtype=dtype, rng=rng) self.batch_test(linalg.null_space, A) @pytest.mark.parametrize('dtype', floating) def test_funm(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 4, 3, 3), dtype=dtype, rng=rng) self.batch_test(linalg.funm, A, kwargs=dict(func=np.sin)) @pytest.mark.parametrize('dtype', floating) def test_fractional_matrix_power(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 4, 3, 3), dtype=dtype, rng=rng) res1 = self.batch_test(linalg.fractional_matrix_power, A, kwargs={'t':1.5}) # test that `t` can be passed by position res2 = linalg.fractional_matrix_power(A, 1.5) np.testing.assert_equal(res1, res2) @pytest.mark.parametrize('dtype', floating) def test_logm(self, dtype): # One test failed absolute tolerance with default random seed rng = np.random.default_rng(89940026998903887141749720079406074936) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) A = A + 3*np.eye(4) # avoid complex output for real input res1 = self.batch_test(linalg.logm, A) # test that `disp` can be passed by position res2 = linalg.logm(A) for res1i, res2i in zip(res1, res2): np.testing.assert_equal(res1i, res2i) @pytest.mark.parametrize('dtype', floating) def test_pinv(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) self.batch_test(linalg.pinv, A, n_out=2, kwargs=dict(return_rank=True)) @pytest.mark.parametrize('dtype', floating) def test_matrix_balance(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) self.batch_test(linalg.matrix_balance, A, n_out=2) self.batch_test(linalg.matrix_balance, A, n_out=2, kwargs={'separate':True}) @pytest.mark.parametrize('dtype', floating) def test_bandwidth(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((4, 4), dtype=dtype, rng=rng) A = np.asarray([np.triu(A, k) for k in range(-3, 3)]).reshape((2, 3, 4, 4)) self.batch_test(linalg.bandwidth, A, n_out=2) @pytest.mark.parametrize('fun_n_out', [(linalg.cholesky, 1), (linalg.ldl, 3), (linalg.cho_factor, 2)]) @pytest.mark.parametrize('dtype', floating) def test_ldl_cholesky(self, fun_n_out, dtype): rng = np.random.default_rng(8342310302941288912051) fun, n_out = fun_n_out A = get_nearly_hermitian((5, 3, 4, 4), dtype, 0, rng) # exactly Hermitian A = A + 4*np.eye(4, dtype=dtype) # ensure positive definite for Cholesky self.batch_test(fun, A, n_out=n_out) @pytest.mark.parametrize('compute_uv', [False, True]) @pytest.mark.parametrize('dtype', floating) def test_svd(self, compute_uv, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 2, 4), dtype=dtype, rng=rng) n_out = 3 if compute_uv else 1 self.batch_test(linalg.svd, A, n_out=n_out, kwargs=dict(compute_uv=compute_uv)) @pytest.mark.parametrize('fun', [linalg.polar, linalg.qr, linalg.rq]) @pytest.mark.parametrize('dtype', floating) def test_polar_qr_rq(self, fun, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 2, 4), dtype=dtype, rng=rng) self.batch_test(fun, A, n_out=2) @pytest.mark.parametrize('cdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_qr_multiply(self, cdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) c = get_random(cdim, dtype=dtype, rng=rng) res = linalg.qr_multiply(A, c, mode='left') q, r = linalg.qr(A) ref = q @ c atol = 1e-6 if dtype in {np.float32, np.complex64} else 1e-12 assert_allclose(res[0], ref, atol=atol) assert_allclose(res[1], r, atol=atol) @pytest.mark.parametrize('uvdim', [[(5,), (3,)], [(4, 5, 2), (4, 3, 2)]]) @pytest.mark.parametrize('dtype', floating) def test_qr_update(self, uvdim, dtype): rng = np.random.default_rng(8342310302941288912051) udim, vdim = uvdim A = get_random((4, 5, 3), dtype=dtype, rng=rng) u = get_random(udim, dtype=dtype, rng=rng) v = get_random(vdim, dtype=dtype, rng=rng) q, r = linalg.qr(A) res = linalg.qr_update(q, r, u, v) for i in range(4): qi, ri = q[i], r[i] ui, vi = (u, v) if u.ndim == 1 else (u[i], v[i]) ref_i = linalg.qr_update(qi, ri, ui, vi) assert_allclose(res[0][i], ref_i[0]) assert_allclose(res[1][i], ref_i[1]) @pytest.mark.parametrize('udim', [(5,), (4, 3, 5)]) @pytest.mark.parametrize('kdim', [(), (4,)]) @pytest.mark.parametrize('dtype', floating) def test_qr_insert(self, udim, kdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((4, 5, 5), dtype=dtype, rng=rng) u = get_random(udim, dtype=dtype, rng=rng) k = rng.integers(0, 5, size=kdim) q, r = linalg.qr(A) res = linalg.qr_insert(q, r, u, k) for i in range(4): qi, ri = q[i], r[i] ki = k if k.ndim == 0 else k[i] ui = u if u.ndim == 1 else u[i] ref_i = linalg.qr_insert(qi, ri, ui, ki) assert_allclose(res[0][i], ref_i[0]) assert_allclose(res[1][i], ref_i[1]) @pytest.mark.parametrize('kdim', [(), (4,)]) @pytest.mark.parametrize('dtype', floating) def test_qr_delete(self, kdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((4, 5, 5), dtype=dtype, rng=rng) k = rng.integers(0, 4, size=kdim) q, r = linalg.qr(A) res = linalg.qr_delete(q, r, k) for i in range(4): qi, ri = q[i], r[i] ki = k if k.ndim == 0 else k[i] ref_i = linalg.qr_delete(qi, ri, ki) assert_allclose(res[0][i], ref_i[0]) assert_allclose(res[1][i], ref_i[1]) @pytest.mark.parametrize('fun', [linalg.schur, linalg.lu_factor]) @pytest.mark.parametrize('dtype', floating) def test_schur_lu(self, fun, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) self.batch_test(fun, A, n_out=2) @pytest.mark.parametrize('calc_q', [False, True]) @pytest.mark.parametrize('dtype', floating) def test_hessenberg(self, calc_q, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) n_out = 2 if calc_q else 1 self.batch_test(linalg.hessenberg, A, n_out=n_out, kwargs=dict(calc_q=calc_q)) @pytest.mark.parametrize('eigvals_only', [False, True]) @pytest.mark.parametrize('dtype', floating) def test_eig_banded(self, eigvals_only, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) n_out = 1 if eigvals_only else 2 self.batch_test(linalg.eig_banded, A, n_out=n_out, kwargs=dict(eigvals_only=eigvals_only)) @pytest.mark.parametrize('dtype', floating) def test_eigvals_banded(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 4), dtype=dtype, rng=rng) self.batch_test(linalg.eigvals_banded, A) @pytest.mark.parametrize('two_in', [False, True]) @pytest.mark.parametrize('fun_n_nout', [(linalg.eigh, 1), (linalg.eigh, 2), (linalg.eigvalsh, 1), (linalg.eigvals, 1)]) @pytest.mark.parametrize('dtype', floating) def test_eigh(self, two_in, fun_n_nout, dtype): rng = np.random.default_rng(8342310302941288912051) fun, n_out = fun_n_nout A = get_nearly_hermitian((1, 3, 4, 4), dtype, 0, rng) # exactly Hermitian B = get_nearly_hermitian((2, 1, 4, 4), dtype, 0, rng) # exactly Hermitian B = B + 4*np.eye(4).astype(dtype) # needs to be positive definite args = (A, B) if two_in else (A,) kwargs = dict(eigvals_only=True) if (n_out == 1 and fun==linalg.eigh) else {} self.batch_test(fun, args, n_out=n_out, kwargs=kwargs) @pytest.mark.parametrize('compute_expm', [False, True]) @pytest.mark.parametrize('dtype', floating) def test_expm_frechet(self, compute_expm, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((1, 3, 4, 4), dtype=dtype, rng=rng) E = get_random((2, 1, 4, 4), dtype=dtype, rng=rng) n_out = 2 if compute_expm else 1 self.batch_test(linalg.expm_frechet, (A, E), n_out=n_out, kwargs=dict(compute_expm=compute_expm)) @pytest.mark.parametrize('dtype', floating) def test_subspace_angles(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((1, 3, 4, 3), dtype=dtype, rng=rng) B = get_random((2, 1, 4, 3), dtype=dtype, rng=rng) self.batch_test(linalg.subspace_angles, (A, B)) # just to show that A and B don't need to be broadcastable M, N, K = 4, 5, 3 A = get_random((1, 3, M, N), dtype=dtype, rng=rng) B = get_random((2, 1, M, K), dtype=dtype, rng=rng) assert linalg.subspace_angles(A, B).shape == (2, 3, min(N, K)) @pytest.mark.parametrize('fun', [linalg.svdvals]) @pytest.mark.parametrize('dtype', floating) def test_svdvals(self, fun, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 4, 5), dtype=dtype, rng=rng) self.batch_test(fun, A) @pytest.mark.parametrize('fun_n_out', [(linalg.orthogonal_procrustes, 2), (linalg.khatri_rao, 1), (linalg.solve_continuous_lyapunov, 1), (linalg.solve_discrete_lyapunov, 1), (linalg.qz, 4), (linalg.ordqz, 6)]) @pytest.mark.parametrize('dtype', floating) def test_two_generic_matrix_inputs(self, fun_n_out, dtype): rng = np.random.default_rng(8342310302941288912051) fun, n_out = fun_n_out A = get_random((2, 3, 4, 4), dtype=dtype, rng=rng) B = get_random((2, 3, 4, 4), dtype=dtype, rng=rng) self.batch_test(fun, (A, B), n_out=n_out) @pytest.mark.parametrize('dtype', floating) def test_cossin(self, dtype): rng = np.random.default_rng(8342310302941288912051) p, q = 3, 4 X = get_random((2, 3, 10, 10), dtype=dtype, rng=rng) x11, x12, x21, x22 = (X[..., :p, :q], X[..., :p, q:], X[..., p:, :q], X[..., p:, q:]) res = linalg.cossin(X, p, q) ref = linalg.cossin((x11, x12, x21, x22)) for res_i, ref_i in zip(res, ref): np.testing.assert_equal(res_i, ref_i) for j in range(2): for k in range(3): ref_jk = linalg.cossin(X[j, k], p, q) for res_i, ref_ijk in zip(res, ref_jk): np.testing.assert_equal(res_i[j, k], ref_ijk) @pytest.mark.parametrize('dtype', floating) def test_sylvester(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) B = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) C = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) self.batch_test(linalg.solve_sylvester, (A, B, C)) @pytest.mark.parametrize('fun', [linalg.solve_continuous_are, linalg.solve_discrete_are]) @pytest.mark.parametrize('dtype', floating) def test_are(self, fun, dtype): rng = np.random.default_rng(8342310302941288912051) a = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) b = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) q = get_nearly_hermitian((2, 3, 5, 5), dtype=dtype, atol=0, rng=rng) r = get_nearly_hermitian((2, 3, 5, 5), dtype=dtype, atol=0, rng=rng) a = a + 5*np.eye(5) # making these positive definite seems to help b = b + 5*np.eye(5) q = q + 5*np.eye(5) r = r + 5*np.eye(5) e = np.eye(5) s = np.zeros((5, 5)) self.batch_test(fun, (a, b, q, r)) self.batch_test(fun, (a, b, q, r, e)) self.batch_test(fun, (a, b, q, r, e, s)) res = fun(a, b, q, r) ref = fun(a, b, q, r, s=s) np.testing.assert_allclose(res, ref) @pytest.mark.parametrize('dtype', floating) def test_rsf2cs(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 4, 4), dtype=dtype, rng=rng) T, Z = linalg.schur(A) self.batch_test(linalg.rsf2csf, (T, Z), n_out=2) @pytest.mark.parametrize('dtype', floating) def test_cholesky_banded(self, dtype): rng = np.random.default_rng(8342310302941288912051) ab = get_random((5, 4, 3, 6), dtype=dtype, rng=rng) ab[..., -1, :] = 10 # make diagonal dominant self.batch_test(linalg.cholesky_banded, ab) @pytest.mark.parametrize('dtype', floating) def test_block_diag(self, dtype): rng = np.random.default_rng(8342310302941288912051) a = get_random((1, 3, 1, 3), dtype=dtype, rng=rng) b = get_random((2, 1, 3, 6), dtype=dtype, rng=rng) c = get_random((1, 1, 3, 2), dtype=dtype, rng=rng) # batch_test doesn't have the logic to broadcast just the batch shapes, # so do it manually. a2 = np.broadcast_to(a, (2, 3, 1, 3)) b2 = np.broadcast_to(b, (2, 3, 3, 6)) c2 = np.broadcast_to(c, (2, 3, 3, 2)) ref = self.batch_test(linalg.block_diag, (a2, b2, c2), check_kwargs=False, broadcast=False) # Check that `block_diag` broadcasts the batch shapes as expected. res = linalg.block_diag(a, b, c) assert_allclose(res, ref) @pytest.mark.parametrize('fun_n_out', [(linalg.eigh_tridiagonal, 2), (linalg.eigvalsh_tridiagonal, 1)]) @pytest.mark.parametrize('dtype', real_floating) # "Only real arrays currently supported" def test_eigh_tridiagonal(self, fun_n_out, dtype): rng = np.random.default_rng(8342310302941288912051) fun, n_out = fun_n_out d = get_random((3, 4, 5), dtype=dtype, rng=rng) e = get_random((3, 4, 4), dtype=dtype, rng=rng) self.batch_test(fun, (d, e), core_dim=1, n_out=n_out, broadcast=False) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_solve(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.solve(A, b) if len(bdim) == 1: x = x[..., np.newaxis] b = b[..., np.newaxis] assert_allclose(A @ x - b, 0, atol=2e-6) assert_allclose(x, np.linalg.solve(A, b), atol=3e-6) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_lu_solve(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) b = get_random(bdim, dtype=dtype, rng=rng) lu_and_piv = linalg.lu_factor(A) x = linalg.lu_solve(lu_and_piv, b) if len(bdim) == 1: x = x[..., np.newaxis] b = b[..., np.newaxis] assert_allclose(A @ x - b, 0, atol=2e-6) assert_allclose(x, np.linalg.solve(A, b), atol=3e-6) @pytest.mark.parametrize('l_and_u', [(1, 1), ([2, 1, 0], [0, 1 , 2])]) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_solve_banded(self, l_and_u, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) l, u = l_and_u ab = get_random((2, 3, 3, 5), dtype=dtype, rng=rng) b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.solve_banded((l, u), ab, b) for i in range(2): for j in range(3): bij = b if len(bdim) <= 2 else b[i, j] lj = l if np.ndim(l) == 0 else l[j] uj = u if np.ndim(u) == 0 else u[j] xij = linalg.solve_banded((lj, uj), ab[i, j], bij) assert_allclose(x[i, j], xij) @pytest.mark.parametrize('separate_r', [False, True]) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_solve_toeplitz(self, separate_r, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) c = get_random((2, 3, 5), dtype=dtype, rng=rng) r = get_random((2, 3, 5), dtype=dtype, rng=rng) c_or_cr = (c, r) if separate_r else c b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.solve_toeplitz(c_or_cr, b) for i in range(2): for j in range(3): bij = b if len(bdim) <= 2 else b[i, j] c_or_cr_ij = (c[i, j], r[i, j]) if separate_r else c[i, j] xij = linalg.solve_toeplitz(c_or_cr_ij, bij) assert_allclose(x[i, j], xij) @pytest.mark.parametrize('separate_r', [False, True]) @pytest.mark.parametrize('xdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_matmul_toeplitz(self, separate_r, xdim, dtype): rng = np.random.default_rng(8342310302941288912051) c = get_random((2, 3, 5), dtype=dtype, rng=rng) r = get_random((2, 3, 5), dtype=dtype, rng=rng) c_or_cr = (c, r) if separate_r else c x = get_random(xdim, dtype=dtype, rng=rng) res = linalg.matmul_toeplitz(c_or_cr, x) if separate_r: ref = linalg.toeplitz(c, r) @ x else: ref = linalg.toeplitz(c) @ x atol = 1e-6 if dtype in {np.float32, np.complex64} else 1e-12 assert_allclose(res, ref, atol=atol) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_cho_solve(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_nearly_hermitian((2, 3, 5, 5), dtype=dtype, atol=0, rng=rng) A = A + 5*np.eye(5) c_and_lower = linalg.cho_factor(A) b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.cho_solve(c_and_lower, b) if len(bdim) == 1: x = x[..., np.newaxis] b = b[..., np.newaxis] assert_allclose(A @ x - b, 0, atol=1e-6) assert_allclose(x, np.linalg.solve(A, b), atol=2e-6) @pytest.mark.parametrize('lower', [False, True]) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_cho_solve_banded(self, lower, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 3, 5), dtype=dtype, rng=rng) row_diag = 0 if lower else -1 A[:, :, row_diag] = 10 cb = linalg.cholesky_banded(A, lower=lower) b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.cho_solve_banded((cb, lower), b) for i in range(2): for j in range(3): bij = b if len(bdim) <= 2 else b[i, j] xij = linalg.cho_solve_banded((cb[i, j], lower), bij) assert_allclose(x[i, j], xij) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_solveh_banded(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 3, 5), dtype=dtype, rng=rng) A[:, :, -1] = 10 b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.solveh_banded(A, b) for i in range(2): for j in range(3): bij = b if len(bdim) <= 2 else b[i, j] xij = linalg.solveh_banded(A[i, j], bij) assert_allclose(x[i, j], xij) @pytest.mark.parametrize('bdim', [(5,), (5, 4), (2, 3, 5, 4)]) @pytest.mark.parametrize('dtype', floating) def test_solve_triangular(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 5, 5), dtype=dtype, rng=rng) A = np.tril(A) b = get_random(bdim, dtype=dtype, rng=rng) x = linalg.solve_triangular(A, b, lower=True) if len(bdim) == 1: x = x[..., np.newaxis] b = b[..., np.newaxis] atol = 1e-10 if dtype in (np.complex128, np.float64) else 2e-4 assert_allclose(A @ x - b, 0, atol=atol) assert_allclose(x, np.linalg.solve(A, b), atol=5*atol) @pytest.mark.parametrize('bdim', [(4,), (4, 3), (2, 3, 4, 3)]) @pytest.mark.parametrize('dtype', floating) def test_lstsq(self, bdim, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((2, 3, 4, 5), dtype=dtype, rng=rng) b = get_random(bdim, dtype=dtype, rng=rng) res = linalg.lstsq(A, b) x = res[0] if len(bdim) == 1: x = x[..., np.newaxis] b = b[..., np.newaxis] assert_allclose(A @ x - b, 0, atol=2e-6) assert len(res) == 4 @pytest.mark.parametrize('dtype', floating) def test_clarkson_woodruff_transform(self, dtype): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 6), dtype=dtype, rng=rng) self.batch_test(linalg.clarkson_woodruff_transform, A, kwargs=dict(sketch_size=3, rng=311224)) def test_clarkson_woodruff_transform_sparse(self): rng = np.random.default_rng(8342310302941288912051) A = get_random((5, 3, 4, 6), dtype=np.float64, rng=rng) A = sparse.coo_array(A) message = "Batch support for sparse arrays is not available." with pytest.raises(NotImplementedError, match=message): linalg.clarkson_woodruff_transform(A, sketch_size=3, rng=rng)
TestBatch
python
pennersr__django-allauth
allauth/socialaccount/providers/discogs/views.py
{ "start": 186, "end": 357 }
class ____(OAuth): url = "https://api.discogs.com/oauth/identity" def get_user_info(self): data = self.query(self.url).json() return data
DiscogsAPI
python
huggingface__transformers
src/transformers/models/ibert/modeling_ibert.py
{ "start": 40738, "end": 43148 }
class ____(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
IBertForTokenClassification
python
google__jax
jax/experimental/array_serialization/serialization.py
{ "start": 5745, "end": 9809 }
class ____: def __init__(self, timeout_secs=300): self._timeout_secs = timeout_secs self._timeout_in_ms = self._timeout_secs * 1000 self._commit_futures = None self._thread = None self._exception = None if jax.process_count() > 1 and distributed.global_state.client is None: raise ValueError(_DISTRIBUTED_SYSTEM_MSG) self._client = distributed.global_state.client self._count = None def __del__(self): if self._thread is not None and self._thread.is_alive(): logger.warning('Please add `.wait_until_finished()` in the main thread ' 'before your program finishes because there is a ' 'possibility of losing errors raised if the ' 'this class is deleted before writing is completed.') def _thread_func(self): try: current_process = jax.process_index() process_count = jax.process_count() logger.info('Starting commit to storage layer by process: %s', current_process) thread_start_time = time.time() for future in self._commit_futures: future.result() logger.info('Finished committing to storage layer by process: %s', current_process) key_for_barrier = None if process_count > 1: assert self._client is not None # All processes will wait at the barrier. When all processes are at the # barrier, the barrier will be satisfied. If not, then it will timeout. key_for_barrier = _get_key(self._count) logger.info('Key used for barrier is %s for process %s', key_for_barrier, current_process) self._client.wait_at_barrier(key_for_barrier, self._timeout_in_ms) logger.info('Finished waiting at barrier for process %s', current_process) if current_process == 0: if self._on_commit_callback is not None: self._on_commit_callback() logger.info('on_commit_callback successfully ran!') if process_count > 1: assert self._client is not None self._client.key_value_set(key_for_barrier, _CHECKPOINT_SUCCESS) logger.info('Process 0 successfully set key %s in the kv store', key_for_barrier) jax.monitoring.record_event_duration_secs( '/jax/checkpoint/write/async/thread_duration_sec', time.time() - thread_start_time) except Exception as e: # pylint: disable=broad-except self._exception = e def _start_async_commit(self, on_commit_callback): self._count = next(_module_unique_count) self._on_commit_callback = on_commit_callback self._thread = threading.Thread(target=self._thread_func) self._thread.start() def check_for_errors(self): if self._exception is not None: # Clears self._exception so it is only raised once. exception = self._exception self._exception = None if (isinstance(exception, _jax.JaxRuntimeError) and 'DEADLINE_EXCEEDED: Barrier timed out' in str(exception)): raise BarrierTimeoutError( '\n'.join([str(exception), _BARRIER_TIMED_OUT_MSG])) raise exception # pylint: disable=raising-bad-type def wait_until_finished(self): if self._thread is not None: self._thread.join() self._thread = None logger.info('Thread joined successfully') self.check_for_errors() logger.info('Error check finished successfully') if jax.process_count() > 1 and self._count is not None: assert self._client is not None # Block until process 0 writes success value to the key value store. # If it fails to write it, then `blocking_key_value_get` will time out. get_key = _get_key(self._count) self._client.blocking_key_value_get(get_key, self._timeout_in_ms) logger.info('blocking_key_value_get on key %s was successfully ' 'completed.', get_key) def _add_futures(self, futures: Sequence[ts.Future]): self._commit_futures = futures
AsyncManager
python
sanic-org__sanic
sanic/http/http3.py
{ "start": 8548, "end": 8674 }
class ____(Receiver): # noqa """WebTransport receiver implementation.""" async def run(self): ...
WebTransportReceiver
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/psycopg.py
{ "start": 21848, "end": 23365 }
class ____(AsyncAdapt_dbapi_connection): _connection: AsyncConnection __slots__ = () _cursor_cls = AsyncAdapt_psycopg_cursor _ss_cursor_cls = AsyncAdapt_psycopg_ss_cursor def add_notice_handler(self, handler): self._connection.add_notice_handler(handler) @property def info(self): return self._connection.info @property def adapters(self): return self._connection.adapters @property def closed(self): return self._connection.closed @property def broken(self): return self._connection.broken @property def read_only(self): return self._connection.read_only @property def deferrable(self): return self._connection.deferrable @property def autocommit(self): return self._connection.autocommit @autocommit.setter def autocommit(self, value): self.set_autocommit(value) def set_autocommit(self, value): await_(self._connection.set_autocommit(value)) def set_isolation_level(self, value): await_(self._connection.set_isolation_level(value)) def set_read_only(self, value): await_(self._connection.set_read_only(value)) def set_deferrable(self, value): await_(self._connection.set_deferrable(value)) def cursor(self, name=None, /): if name: return AsyncAdapt_psycopg_ss_cursor(self, name) else: return AsyncAdapt_psycopg_cursor(self)
AsyncAdapt_psycopg_connection
python
Pylons__pyramid
tests/test_viewderivers.py
{ "start": 63103, "end": 68209 }
class ____(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): self.config = None testing.tearDown() def test_add_single_deriver(self): response = DummyResponse() response.deriv = False view = lambda *arg: response def deriv(view, info): self.assertFalse(response.deriv) response.deriv = True return view result = self.config._derive_view(view) self.assertFalse(response.deriv) self.config.add_view_deriver(deriv, 'test_deriv') result = self.config._derive_view(view) # noqa: F841 self.assertTrue(response.deriv) def test_override_deriver(self): flags = {} class AView: def __init__(self): self.response = DummyResponse() def deriv1(view, info): flags['deriv1'] = True return view def deriv2(view, info): flags['deriv2'] = True return view view1 = AView() self.config.add_view_deriver(deriv1, 'test_deriv') result = self.config._derive_view(view1) self.assertTrue(flags.get('deriv1')) self.assertFalse(flags.get('deriv2')) flags.clear() view2 = AView() self.config.add_view_deriver(deriv2, 'test_deriv') result = self.config._derive_view(view2) # noqa: F841 self.assertFalse(flags.get('deriv1')) self.assertTrue(flags.get('deriv2')) def test_override_mapped_view(self): from pyramid.viewderivers import VIEW response = DummyResponse() view = lambda *arg: response flags = {} def deriv1(view, info): flags['deriv1'] = True return view result = self.config._derive_view(view) self.assertFalse(flags.get('deriv1')) flags.clear() self.config.add_view_deriver( deriv1, name='mapped_view', under='rendered_view', over=VIEW ) result = self.config._derive_view(view) # noqa: F841 self.assertTrue(flags.get('deriv1')) def test_add_multi_derivers_ordered(self): from pyramid.viewderivers import INGRESS response = DummyResponse() view = lambda *arg: response response.deriv = [] def deriv1(view, info): response.deriv.append('deriv1') return view def deriv2(view, info): response.deriv.append('deriv2') return view def deriv3(view, info): response.deriv.append('deriv3') return view self.config.add_view_deriver(deriv1, 'deriv1') self.config.add_view_deriver(deriv2, 'deriv2', INGRESS, 'deriv1') self.config.add_view_deriver(deriv3, 'deriv3', 'deriv2', 'deriv1') result = self.config._derive_view(view) # noqa: F841 self.assertEqual(response.deriv, ['deriv1', 'deriv3', 'deriv2']) def test_add_deriver_without_name(self): from pyramid.interfaces import IViewDerivers def deriv1(view, info): # pragma: no cover pass self.config.add_view_deriver(deriv1) derivers = self.config.registry.getUtility(IViewDerivers) self.assertTrue('deriv1' in derivers.names) def test_add_deriver_reserves_ingress(self): from pyramid.exceptions import ConfigurationError from pyramid.viewderivers import INGRESS def deriv1(view, info): # pragma: no cover pass self.assertRaises( ConfigurationError, self.config.add_view_deriver, deriv1, INGRESS ) def test_add_deriver_enforces_ingress_is_first(self): from pyramid.exceptions import ConfigurationError from pyramid.viewderivers import INGRESS def deriv1(view, info): # pragma: no cover pass try: self.config.add_view_deriver(deriv1, over=INGRESS) except ConfigurationError as ex: self.assertTrue('cannot be over INGRESS' in ex.args[0]) else: # pragma: no cover raise AssertionError def test_add_deriver_enforces_view_is_last(self): from pyramid.exceptions import ConfigurationError from pyramid.viewderivers import VIEW def deriv1(view, info): # pragma: no cover pass try: self.config.add_view_deriver(deriv1, under=VIEW) except ConfigurationError as ex: self.assertTrue('cannot be under VIEW' in ex.args[0]) else: # pragma: no cover raise AssertionError def test_add_deriver_enforces_mapped_view_is_last(self): from pyramid.exceptions import ConfigurationError def deriv1(view, info): # pragma: no cover pass try: self.config.add_view_deriver(deriv1, 'deriv1', under='mapped_view') except ConfigurationError as ex: self.assertTrue('cannot be under "mapped_view"' in ex.args[0]) else: # pragma: no cover raise AssertionError
TestAddDeriver
python
fastai__fastai
fastai/torch_core.py
{ "start": 19471, "end": 20265 }
class ____(TensorImageBase): _show_args = ArrayMask._show_args def show(self, ctx=None, **kwargs): codes = getattr(self, 'codes', None) if codes is not None: kwargs = merge({'vmin': 0, 'vmax': len(codes)}, kwargs) return super().show(ctx=ctx, **kwargs) # %% ../nbs/00_torch_core.ipynb 110 for o in Tensor.__getitem__, Tensor.__ne__,Tensor.__eq__,Tensor.add,Tensor.sub,Tensor.mul,Tensor.div,Tensor.__rsub__,Tensor.__radd__,Tensor.matmul,Tensor.bmm: TensorBase.register_func(o, TensorMask, TensorImageBase) TensorBase.register_func(o, TensorImageBase, TensorMask) TensorMask.register_func(torch.einsum, str, TensorImageBase, TensorMask) TensorMask.register_func(torch.einsum, str, TensorMask, TensorImageBase) # %% ../nbs/00_torch_core.ipynb 117
TensorMask
python
getsentry__sentry
tests/sentry/api/endpoints/test_system_health.py
{ "start": 82, "end": 428 }
class ____(APITestCase): def test_simple(self) -> None: self.login_as(user=self.user, superuser=True) url = reverse("sentry-api-0-system-health") response = self.client.get(url) assert response.status_code == 200 assert "problems" in response.data assert "healthy" in response.data
SystemHealthTest
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_assorted_poly.py
{ "start": 5342, "end": 10293 }
class ____(fixtures.MappedTest): """test self-referential relationships on polymorphic mappers""" @classmethod def define_tables(cls, metadata): global people, managers, data people = Table( "people", metadata, Column( "person_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("name", String(50)), Column("type", String(30)), ) managers = Table( "managers", metadata, Column( "person_id", Integer, ForeignKey("people.person_id"), primary_key=True, ), Column("manager_id", Integer, ForeignKey("people.person_id")), Column("status", String(30)), ) data = Table( "data", metadata, Column( "person_id", Integer, ForeignKey("managers.person_id"), primary_key=True, ), Column("data", String(30)), ) @classmethod def setup_classes(cls): class Person(cls.Comparable): pass class Manager(Person): pass @testing.combinations( ("join1",), ("join2",), ("join3",), argnames="jointype" ) @testing.combinations( ("usedata", True), ("nodata", False), id_="ia", argnames="usedata" ) def test_relationshiponsubclass(self, jointype, usedata): Person, Manager = self.classes("Person", "Manager") if jointype == "join1": poly_union = polymorphic_union( { "person": people.select() .where(people.c.type == "person") .subquery(), "manager": join( people, managers, people.c.person_id == managers.c.person_id, ), }, None, ) polymorphic_on = poly_union.c.type elif jointype == "join2": poly_union = polymorphic_union( { "person": people.select() .where(people.c.type == "person") .subquery(), "manager": managers.join( people, people.c.person_id == managers.c.person_id ), }, None, ) polymorphic_on = poly_union.c.type elif jointype == "join3": poly_union = None polymorphic_on = people.c.type if usedata: class Data: def __init__(self, data): self.data = data self.mapper_registry.map_imperatively(Data, data) self.mapper_registry.map_imperatively( Person, people, with_polymorphic=("*", poly_union), polymorphic_identity="person", polymorphic_on=polymorphic_on, ) if usedata: self.mapper_registry.map_imperatively( Manager, managers, inherits=Person, inherit_condition=people.c.person_id == managers.c.person_id, polymorphic_identity="manager", properties={ "colleague": relationship( Person, primaryjoin=managers.c.manager_id == people.c.person_id, lazy="select", uselist=False, ), "data": relationship(Data, uselist=False), }, ) else: self.mapper_registry.map_imperatively( Manager, managers, inherits=Person, inherit_condition=people.c.person_id == managers.c.person_id, polymorphic_identity="manager", properties={ "colleague": relationship( Person, primaryjoin=managers.c.manager_id == people.c.person_id, lazy="select", uselist=False, ) }, ) sess = fixture_session() p = Person(name="person1") m = Manager(name="manager1") m.colleague = p if usedata: m.data = Data("ms data") sess.add(m) sess.flush() sess.expunge_all() p = sess.get(Person, p.person_id) m = sess.get(Manager, m.person_id) assert m.colleague is p if usedata: assert m.data.data == "ms data"
RelationshipTest2
python
PrefectHQ__prefect
tests/server/orchestration/api/test_block_types.py
{ "start": 15856, "end": 16614 }
class ____: async def test_read_block_documents_for_block_type( self, client, block_type_x, block_document ): response = await client.get( f"/block_types/slug/{block_type_x.slug}/block_documents" ) assert response.status_code == status.HTTP_200_OK read_block_documents = parse_obj_as(List[BlockDocument], response.json()) assert [block_doc.id for block_doc in read_block_documents] == [ block_document.id ] async def test_read_block_documents_for_nonexistent_block_type(self, client): response = await client.get("/block_types/slug/nonsense/block_documents") assert response.status_code == status.HTTP_404_NOT_FOUND
TestReadBlockDocumentsForBlockType
python
more-itertools__more-itertools
tests/test_more.py
{ "start": 132547, "end": 134498 }
class ____(TestCase): def test_default_pred(self): iterable = [0, 1, 1, 0, 1, 0, 0] for it in (iterable[:], iter(iterable)): actual = list(mi.rlocate(it)) expected = [4, 2, 1] self.assertEqual(actual, expected) def test_no_matches(self): iterable = [0, 0, 0] for it in (iterable[:], iter(iterable)): actual = list(mi.rlocate(it)) expected = [] self.assertEqual(actual, expected) def test_custom_pred(self): iterable = ['0', 1, 1, '0', 1, '0', '0'] pred = lambda x: x == '0' for it in (iterable[:], iter(iterable)): actual = list(mi.rlocate(it, pred)) expected = [6, 5, 3, 0] self.assertEqual(actual, expected) def test_efficient_reversal(self): iterable = range(9**9) # Is efficiently reversible target = 9**9 - 2 pred = lambda x: x == target # Find-able from the right actual = next(mi.rlocate(iterable, pred)) self.assertEqual(actual, target) def test_window_size(self): iterable = ['0', 1, 1, '0', 1, '0', '0'] pred = lambda *args: args == ('0', 1) for it in (iterable, iter(iterable)): actual = list(mi.rlocate(it, pred, window_size=2)) expected = [3, 0] self.assertEqual(actual, expected) def test_window_size_large(self): iterable = [1, 2, 3, 4] pred = lambda a, b, c, d, e: True for it in (iterable, iter(iterable)): actual = list(mi.rlocate(iterable, pred, window_size=5)) expected = [0] self.assertEqual(actual, expected) def test_window_size_zero(self): iterable = [1, 2, 3, 4] pred = lambda: True for it in (iterable, iter(iterable)): with self.assertRaises(ValueError): list(mi.locate(iterable, pred, window_size=0))
RlocateTests
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF052_0.py
{ "start": 3159, "end": 3815 }
class ____: connected: list[Node] def recurse(self, *, _seen: set[Node] | None = None): if _seen is None: _seen = set() elif self in _seen: return _seen.add(self) for other in self.connected: other.recurse(_seen=_seen) def foo(): _dummy_var = 42 def bar(): dummy_var = 43 print(_dummy_var) def foo(): # Unfixable because both possible candidates for the new name are shadowed # in the scope of one of the references to the variable _dummy_var = 42 def bar(): dummy_var = 43 dummy_var_ = 44 print(_dummy_var)
Node
python
getsentry__sentry
src/sentry/migrations/0955_org_option_json_field.py
{ "start": 244, "end": 1763 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = True dependencies = [ ("sentry", "0954_user_option_json_field"), ] operations = [ migrations.SeparateDatabaseAndState( database_operations=[mod.to_jsonb("sentry_organizationoptions", "value")], state_operations=[ migrations.AlterField( model_name="organizationoption", name="value", field=models.JSONField(null=True), ), ], ) ]
Migration
python
sympy__sympy
sympy/core/logic.py
{ "start": 9826, "end": 10476 }
class ____(Logic): def __new__(cls, arg): if isinstance(arg, str): return Logic.__new__(cls, arg) elif isinstance(arg, bool): return not arg elif isinstance(arg, Not): return arg.args[0] elif isinstance(arg, Logic): # XXX this is a hack to expand right from the beginning arg = arg._eval_propagate_not() return arg else: raise ValueError('Not: unknown argument %r' % (arg,)) @property def arg(self): return self.args[0] Logic.op_2class['&'] = And Logic.op_2class['|'] = Or Logic.op_2class['!'] = Not
Not
python
getsentry__sentry
tests/sentry/uptime/subscriptions/test_tasks.py
{ "start": 18302, "end": 22678 }
class ____(UptimeTestCase): def test(self) -> None: self.run_test( mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE, detector_state=DetectorPriorityLevel.HIGH, update_date=timezone.now() - timedelta(days=8), expected_status=ObjectStatus.DISABLED, expected_priority_level=DetectorPriorityLevel.OK, ) def test_manual(self) -> None: self.run_test( mode=UptimeMonitorMode.MANUAL, detector_state=DetectorPriorityLevel.HIGH, update_date=timezone.now() - timedelta(days=8), expected_status=ObjectStatus.ACTIVE, expected_priority_level=DetectorPriorityLevel.HIGH, ) def test_auto_young(self) -> None: self.run_test( mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE, detector_state=DetectorPriorityLevel.HIGH, update_date=timezone.now() - timedelta(days=4), expected_status=ObjectStatus.ACTIVE, expected_priority_level=DetectorPriorityLevel.HIGH, ) def test_auto_not_failed(self) -> None: self.run_test( mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE, detector_state=DetectorPriorityLevel.OK, update_date=timezone.now() - timedelta(days=8), expected_status=ObjectStatus.ACTIVE, expected_priority_level=DetectorPriorityLevel.OK, ) def test_already_disabled(self) -> None: """Test that already disabled detectors are not processed""" detector = self.create_uptime_detector( mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE, detector_state=DetectorPriorityLevel.HIGH, enabled=False, ) # Update the detector state date to simulate an old failure detector_state = detector.detectorstate_set.first() assert detector_state is not None detector_state.update(date_updated=timezone.now() - timedelta(days=8)) with ( self.tasks(), mock.patch( "sentry.uptime.subscriptions.subscriptions.disable_uptime_detector" ) as mock_disable, ): broken_monitor_checker() # Should not be called since detector is already disabled mock_disable.assert_not_called() def test_handle_disable_detector_exceptions(self) -> None: detector = self.create_uptime_detector( mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE, detector_state=DetectorPriorityLevel.HIGH, ) # Update the detector state date to simulate an old failure detector_state = detector.detectorstate_set.first() assert detector_state is not None detector_state.update(date_updated=timezone.now() - timedelta(days=8)) with ( self.tasks(), mock.patch( "sentry.uptime.subscriptions.subscriptions.disable_uptime_detector", side_effect=Exception("Test exception"), ), mock.patch("sentry.uptime.subscriptions.tasks.logger") as logger, ): # Does not raise broken_monitor_checker() logger.exception.assert_called_once() def run_test( self, mode: UptimeMonitorMode, detector_state: DetectorPriorityLevel, update_date: datetime, expected_status: int, expected_priority_level: DetectorPriorityLevel, ): detector = self.create_uptime_detector( mode=mode, detector_state=detector_state, ) # Update detector state date to match the test scenario state = detector.detectorstate_set.first() assert state is not None state.update(date_updated=update_date) with self.tasks(): broken_monitor_checker() detector.refresh_from_db() if expected_status == ObjectStatus.ACTIVE: assert detector.enabled else: assert not detector.enabled state = detector.detectorstate_set.first() assert state is not None assert state.priority_level == expected_priority_level if expected_priority_level == DetectorPriorityLevel.HIGH: assert state.is_triggered else: assert not state.is_triggered
BrokenMonitorCheckerTest
python
great-expectations__great_expectations
great_expectations/core/run_identifier.py
{ "start": 3905, "end": 4382 }
class ____(Schema): run_name = fields.Str() run_time = fields.AwareDateTime(format="iso", default_timezone=datetime.timezone.utc) @pre_dump def prepare_dump(self, data, **kwargs): data = deepcopy(data) data.set_run_time_tz(tz=None) # sets to system local tz return data @post_load def make_run_identifier(self, data, **kwargs): return RunIdentifier(**data) runIdentifierSchema = RunIdentifierSchema()
RunIdentifierSchema
python
kamyu104__LeetCode-Solutions
Python/number-of-same-end-substrings.py
{ "start": 70, "end": 629 }
class ____(object): def sameEndSubstringCount(self, s, queries): """ :type s: str :type queries: List[List[int]] :rtype: List[int] """ prefix = [[0]*26] for i in xrange(len(s)): prefix.append(prefix[-1][:]) prefix[-1][ord(s[i])-ord('a')] += 1 result = [0]*len(queries) for i, (l, r) in enumerate(queries): for j in xrange(26): cnt = prefix[r+1][j]-prefix[l][j] result[i] += (1+cnt)*cnt//2 return result
Solution
python
joke2k__faker
faker/providers/bank/en_IN/__init__.py
{ "start": 42, "end": 1194 }
class ____(BankProvider): """Implement bank provider for ``en_IN`` locale. Source: https://en.wikipedia.org/wiki/List_of_banks_in_India """ banks = ( "Bank of Baroda", "Bank of India", "Bank of Maharashtra", "Canara Bank", "Central Bank of India", "Indian Bank", "Indian Overseas Bank", "Punjab National Bank", "Punjab and Sind Bank", "Union Bank of India", "UCO Bank", "State Bank of India", "Axis Bank", "Bandhan Bank", "CSB Bank", "City Union Bank", "DCB Bank", "Dhanlaxmi Bank", "Federal Bank", "HDFC Bank", "ICICI Bank", "IDBI Bank", "IDFC First Bank", "IndusInd Bank", "Jammu & Kashmir Bank", "Karnataka Bank", "Karur Vysya Bank", "Kotak Mahindra Bank", "Nainital Bank", "RBL Bank", "South Indian Bank", "Tamilnad Mercantile Bank", "Yes Bank", ) def bank(self) -> str: """Generate a bank name.""" return self.random_element(self.banks)
Provider
python
vyperlang__vyper
vyper/exceptions.py
{ "start": 8904, "end": 9029 }
class ____(VyperException): """Attempt to perform an action between multiple objects of incompatible types."""
TypeMismatch
python
ray-project__ray
python/ray/tests/test_minimal_install.py
{ "start": 924, "end": 3154 }
class ____: model_fields = {} def __init__(self, *args, **kwargs): pass def __init_subclass__(self, *args, **kwargs): pass def _make_mock_pydantic_modules(pydantic_version: str) -> Dict: """Make a mock for the `pydantic` module. This module requires special handling to: - Make `BaseModel` a class object so type hints work. - Set the `__version__` attribute appropriately. - Also mock `pydantic.v1` for `pydantic >= 2.0`. - Also mock `pydantic.dataclasses`. Returns a dict of mocked modules. """ mock_modules = { "pydantic": mock.MagicMock(), "pydantic.dataclasses": mock.MagicMock(), } mock_modules["pydantic"].BaseModel = MockBaseModel if packaging.version.parse(pydantic_version) >= packaging.version.parse("1.9.0"): mock_modules["pydantic"].__version__ = pydantic_version if packaging.version.parse(pydantic_version) >= packaging.version.parse("2.0.0"): mock_modules["pydantic.v1"] = mock_modules["pydantic"] return mock_modules @pytest.mark.parametrize("pydantic_version", ["1.8.0", "1.9.0", "2.0.0"]) @pytest.mark.skipif( os.environ.get("RAY_MINIMAL", "0") != "1", reason="Skip unless running in a minimal install.", ) def test_module_import_with_various_non_minimal_deps(pydantic_version: str): optional_modules = [ "opencensus", "prometheus_client", "aiohttp", "aiohttp_cors", "pydantic", "grpc", ] for i in range(len(optional_modules)): for install_modules in itertools.combinations(optional_modules, i): print(install_modules) mock_modules = {} for mod in install_modules: if mod == "pydantic": mock_modules.update(**_make_mock_pydantic_modules(pydantic_version)) else: mock_modules[mod] = mock.MagicMock() with mock.patch.dict("sys.modules", mock_modules): from ray.dashboard.utils import DashboardHeadModule, get_all_modules get_all_modules(DashboardHeadModule) if __name__ == "__main__": sys.exit(pytest.main(["-sv", __file__]))
MockBaseModel
python
PyCQA__pylint
tests/functional/s/super/super_checks.py
{ "start": 2114, "end": 2283 }
class ____: not_a_method = 42 def function(self, param): return param + self.not_a_method def __getattr__(self, attr): return attr
BaseClass
python
ansible__ansible
test/lib/ansible_test/_internal/become.py
{ "start": 164, "end": 683 }
class ____(metaclass=abc.ABCMeta): """Base class for become implementations.""" @classmethod def name(cls) -> str: """The name of this plugin.""" return cls.__name__.lower() @property @abc.abstractmethod def method(self) -> str: """The name of the Ansible become plugin that is equivalent to this.""" @abc.abstractmethod def prepare_command(self, command: list[str]) -> list[str]: """Return the given command, if any, with privilege escalation."""
Become
python
GoogleCloudPlatform__python-docs-samples
appengine/standard_python3/bundled-services/mail/wsgi/main.py
{ "start": 3915, "end": 4372 }
class ____: def __call__(self, environ, start_response): path = environ.get("PATH_INFO", "") for regex, callable in routes.items(): match = re.search(regex, path) if match is not None: return callable(environ, start_response) start_response("404 Not Found", [("Content-Type", "text/plain")]) return ["Not found".encode("utf-8")] app = wrap_wsgi_app(WSGIApplication())
WSGIApplication
python
paramiko__paramiko
paramiko/agent.py
{ "start": 4003, "end": 6000 }
class ____(threading.Thread): """ Class in charge of communication between two channels. """ def __init__(self, agent): threading.Thread.__init__(self, target=self.run) self._agent = agent self._exit = False def run(self): try: (r, addr) = self.get_connection() # Found that r should be either # a socket from the socket library or None self.__inr = r # The address should be an IP address as a string? or None self.__addr = addr self._agent.connect() if not isinstance(self._agent, int) and ( self._agent._conn is None or not hasattr(self._agent._conn, "fileno") ): raise AuthenticationException("Unable to connect to SSH agent") self._communicate() except: # XXX Not sure what to do here ... raise or pass ? raise def _communicate(self): import fcntl oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL) fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK) while not self._exit: events = select([self._agent._conn, self.__inr], [], [], 0.5) for fd in events[0]: if self._agent._conn == fd: data = self._agent._conn.recv(512) if len(data) != 0: self.__inr.send(data) else: self._close() break elif self.__inr == fd: data = self.__inr.recv(512) if len(data) != 0: self._agent._conn.send(data) else: self._close() break time.sleep(io_sleep) def _close(self): self._exit = True self.__inr.close() self._agent._conn.close()
AgentProxyThread
python
tensorflow__tensorflow
tensorflow/python/compiler/xla/xla.py
{ "start": 20716, "end": 22975 }
class ____(object): """A placeholder to capture an object.""" def __init__(self): self._object = None def capture(self, o): if self._object: raise RuntimeError( 'InternalError: _CapturedObject can capture only once. Please file ' 'bug.') self._object = o def get(self): return self._object def check_function_argument_count(func, input_arity, infeed_queue): """Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot. """ def format_error(complaint, quantity): return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's') num_args_supplied = input_arity if infeed_queue is not None: num_args_supplied += infeed_queue.number_of_tuple_elements arg_spec = tf_inspect.getargspec(func) num_func_args = len(arg_spec.args) if arg_spec.defaults is None: num_func_defaults = 0 else: num_func_defaults = len(arg_spec.defaults) min_func_args = num_func_args - num_func_defaults if num_args_supplied < min_func_args: # The required number of arguments is not enough to call the function. if num_func_defaults == 0 and arg_spec.varargs is None: return format_error('exactly', num_func_args) else: return format_error('at least', min_func_args) if arg_spec.varargs is None and num_args_supplied > num_func_args: # The required number of arguments is too many to call the function. if num_func_defaults == 0: return format_error('exactly', num_func_args) else: return format_error('at most', num_func_args) # Reaching here means either # 1) There are varargs, func can accept any number of arguments greater than # the minimum. # 2) Number of supplied arguments falls in range of acceptable argument count # of func. return None
_CapturedObject
python
kubernetes-client__python
kubernetes/client/models/v1_eviction.py
{ "start": 383, "end": 6690 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'delete_options': 'V1DeleteOptions', 'kind': 'str', 'metadata': 'V1ObjectMeta' } attribute_map = { 'api_version': 'apiVersion', 'delete_options': 'deleteOptions', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, delete_options=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1Eviction - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._delete_options = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version if delete_options is not None: self.delete_options = delete_options if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1Eviction. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1Eviction. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1Eviction. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1Eviction. # noqa: E501 :type: str """ self._api_version = api_version @property def delete_options(self): """Gets the delete_options of this V1Eviction. # noqa: E501 :return: The delete_options of this V1Eviction. # noqa: E501 :rtype: V1DeleteOptions """ return self._delete_options @delete_options.setter def delete_options(self, delete_options): """Sets the delete_options of this V1Eviction. :param delete_options: The delete_options of this V1Eviction. # noqa: E501 :type: V1DeleteOptions """ self._delete_options = delete_options @property def kind(self): """Gets the kind of this V1Eviction. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1Eviction. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1Eviction. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1Eviction. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1Eviction. # noqa: E501 :return: The metadata of this V1Eviction. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1Eviction. :param metadata: The metadata of this V1Eviction. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1Eviction): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1Eviction): return True return self.to_dict() != other.to_dict()
V1Eviction
python
pennersr__django-allauth
allauth/headless/account/inputs.py
{ "start": 6196, "end": 7054 }
class ____(inputs.Input): current_password = inputs.CharField(required=False) new_password = inputs.CharField() def __init__(self, *args, **kwargs): self.user = kwargs.pop("user") super().__init__(*args, **kwargs) self.fields["current_password"].required = self.user.has_usable_password() def clean_current_password(self): current_password = self.cleaned_data["current_password"] if current_password: if not self.user.check_password(current_password): raise get_account_adapter().validation_error("enter_current_password") return current_password def clean_new_password(self): new_password = self.cleaned_data["new_password"] adapter = get_account_adapter() return adapter.clean_password(new_password, user=self.user)
ChangePasswordInput
python
ray-project__ray
python/ray/llm/_internal/serve/config_generator/utils/models.py
{ "start": 629, "end": 1014 }
class ____(ServeModel): type: Literal["TextCompletion"] = TEXT_COMPLETION_MODEL_TYPE reference_model_id: Optional[str] = Field( None, description="This field only exists for custom user entered models whose serving defaults we don't have.", ) tensor_parallelism: int lora_config: Optional[TextCompletionLoraModelConfig] = None
TextCompletionModelConfig
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 260645, "end": 260990 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("CreatedCommitContribution", graphql_name="node")
CreatedCommitContributionEdge
python
ansible__ansible
test/integration/targets/support-callback_plugins/callback_plugins/callback_debug.py
{ "start": 227, "end": 731 }
class ____(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'callback_debug' def __init__(self, *args, **kwargs): super(CallbackModule, self).__init__(*args, **kwargs) self._display.display('__init__') for name in (cb for cb in dir(self) if cb.startswith('v2_')): setattr(self, name, functools.partial(self.handle_v2, name)) def handle_v2(self, name, *args, **kwargs): self._display.display(name)
CallbackModule
python
huggingface__transformers
tests/models/persimmon/test_modeling_persimmon.py
{ "start": 1415, "end": 2583 }
class ____(CausalLMModelTest, unittest.TestCase): model_tester_class = PersimmonModelTester pipeline_model_mapping = ( { "feature-extraction": PersimmonModel, "text-classification": PersimmonForSequenceClassification, "token-classification": PersimmonForTokenClassification, # TODO (ydshieh): check why these two fail. Fix them or skip them in a better way. # "text-generation": PersimmonForCausalLM, # "zero-shot": PersimmonForSequenceClassification, } if is_torch_available() else {} ) @unittest.skip("Persimmon applies key/query norm which doesn't work with packing") def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Persimmon applies key/query norm which doesn't work with packing") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Persimmon applies key/query norm which doesn't work with packing") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @require_torch
PersimmonModelTest
python
cython__cython
tests/run/test_patma.py
{ "start": 779, "end": 1296 }
class ____(unittest.TestCase): def test_refleaks(self): # Hunting for leaks using -R doesn't catch leaks in the compiler itself, # just the code under test. This test ensures that if there are leaks in # the pattern compiler, those runs will fail: with open(__file__) as file: compile(file.read(), __file__, "exec") """ # TestTracing also mainly removed - doesn't seem like a core test # except for one test that seems misplaced in CPython (which is below)
TestCompiler
python
pytorch__pytorch
torch/optim/lr_scheduler.py
{ "start": 17612, "end": 22331 }
class ____(LRScheduler): """Multiply the learning rate of each parameter group by the factor given in the specified function. When last_epoch=-1, set initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. lr_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in optimizer.param_groups. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # xdoctest: +SKIP >>> lmbda = lambda epoch: 0.95 >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/MultiplicativeLR.png """ def __init__( self, optimizer: Optimizer, lr_lambda: Union[Callable[[int], float], list[Callable[[int], float]]], last_epoch: int = -1, ) -> None: # noqa: D107 self.optimizer = optimizer self.lr_lambdas: list[Callable[[int], float]] if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): raise ValueError( f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}" ) self.lr_lambdas = list(lr_lambda) for lr_lambda in self.lr_lambdas: if not callable(lr_lambda): raise TypeError( f"lr_lambda should be a function, but got {type(lr_lambda).__name__}" ) super().__init__(optimizer, last_epoch) @override def state_dict(self) -> dict[str, Any]: """Return the state of the scheduler as a :class:`dict`. It contains an entry for every variable in ``self.__dict__`` which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. """ state_dict = { key: value for key, value in self.__dict__.items() if key not in ("optimizer", "lr_lambdas") } state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas) for idx, fn in enumerate(self.lr_lambdas): if not isinstance(fn, types.FunctionType): # pyrefly: ignore [unsupported-operation] state_dict["lr_lambdas"][idx] = fn.__dict__.copy() return state_dict @override def load_state_dict(self, state_dict: dict[str, Any]) -> None: """Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ lr_lambdas = state_dict.pop("lr_lambdas") self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict["lr_lambdas"] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn) @override def get_lr(self) -> list[float | Tensor]: r"""Compute the next learning rate for each of the optimizer's :attr:`~torch.optim.Optimizer.param_groups`. Scales the current ``group["lr"]``\s in each of the optimizer's :attr:`~torch.optim.Optimizer.param_groups` by the outputs of the :attr:`lr_lambdas` at :attr:`last_epoch`. Returns: list[float | Tensor]: A :class:`list` of learning rates for each of the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the same types as their current ``group["lr"]``\s. .. note:: If you're trying to inspect the most recent learning rate, use :meth:`get_last_lr()` instead. .. note:: The returned :class:`~torch.Tensor`\s are copies, and never alias the optimizer's ``group["lr"]``\s. """ _warn_get_lr_called_within_step(self) if not self._is_initial: return [ group["lr"] * lmbda(self.last_epoch) for lmbda, group in zip( self.lr_lambdas, self.optimizer.param_groups, strict=True ) ] else: return _param_groups_val_list(self.optimizer, "lr")
MultiplicativeLR
python
python-attrs__attrs
tests/dataclass_transform_example.py
{ "start": 357, "end": 472 }
class ____: a: str d = Frozen("a") d.a = "new" reveal_type(d.a) # noqa: F821 @attr.define(frozen=True)
Frozen
python
pytorch__pytorch
torch/testing/_internal/common_utils.py
{ "start": 3751, "end": 13231 }
class ____: # Set of env vars to set for the repro command that is output on test failure. # Specifically, this includes env vars that are set to non-default values and # are not implied. Maps from env var name -> value (int) repro_env_vars: dict = {} # Defines a flag usable throughout the test suite, determining its value by querying # the specified environment variable. # # Args: # name (str): The name of the flag. A global variable with this name will be set # for convenient access throughout the test suite. # env_var (str): The name of the primary environment variable from which to # determine the value of this flag. If this is None or the environment variable # is unset, the default value will be used unless otherwise implied (see # implied_by_fn). Default: None # default (bool): The default value to use for the flag if unset by the environment # variable and unimplied. Default: False # include_in_repro (bool): Indicates whether this flag should be included in the # repro command that is output on test failure (i.e. whether it is possibly # relevant to reproducing the test failure). Default: True # enabled_fn (Callable): Callable returning whether the flag should be enabled # given the environment variable value and the default value. Default: Lambda # requiring "0" to disable if on by default OR "1" to enable if off by default. # implied_by_fn (Callable): Thunk returning a bool to imply this flag as enabled # by something outside of its primary environment variable setting. For example, # this can be useful if the value of another environment variable implies the flag # as enabled. Default: Lambda returning False to indicate no implications. @staticmethod def def_flag( name, env_var=None, default=False, include_in_repro=True, enabled_fn=lambda env_var_val, default: ( (env_var_val != "0") if default else (env_var_val == "1")), implied_by_fn=lambda: False, ): enabled = default env_var_val = None if env_var is not None: env_var_val = os.getenv(env_var) enabled = enabled_fn(env_var_val, default) implied = implied_by_fn() enabled = enabled or implied if include_in_repro and (env_var is not None) and (enabled != default) and not implied: TestEnvironment.repro_env_vars[env_var] = env_var_val # export flag globally for convenience assert name not in globals(), f"duplicate definition of flag '{name}'" globals()[name] = enabled return enabled # Defines a setting usable throughout the test suite, determining its value by querying # the specified environment variable. This differs from a flag in that it's not restricted # to a boolean value. # # Args: # name (str): The name of the setting. A global variable with this name will be set # for convenient access throughout the test suite. # env_var (str): The name of the primary environment variable from which to # determine the value of this setting. If this is None or the environment variable # is unset, the default value will be used. Default: None # default (Any): The default value to use for the setting if unset by the environment # variable. Default: None # include_in_repro (bool): Indicates whether this setting should be included in the # repro command that is output on test failure (i.e. whether it is possibly # relevant to reproducing the test failure). Default: True # parse_fn (Callable): Callable parsing the env var string. Default value just uses # the string itself. @staticmethod def def_setting( name, env_var=None, default=None, include_in_repro=True, parse_fn=lambda maybe_val_str: maybe_val_str, ): value = default if env_var is None else os.getenv(env_var) value = parse_fn(value) if include_in_repro and (value != default): TestEnvironment.repro_env_vars[env_var] = value # export setting globally for convenience assert name not in globals(), f"duplicate definition of setting '{name}'" globals()[name] = value return value # Returns a string prefix usable to set environment variables for any test # settings that should be explicitly set to match this instantiation of the # test suite. # Example: "PYTORCH_TEST_WITH_ASAN=1 PYTORCH_TEST_WITH_ROCM=1" @staticmethod def repro_env_var_prefix() -> str: return " ".join([f"{env_var}={value}" for env_var, value in TestEnvironment.repro_env_vars.items()]) log = logging.getLogger(__name__) torch.backends.disable_global_flags() FILE_SCHEMA = "file://" if sys.platform == 'win32': FILE_SCHEMA = "file:///" # NB: This flag differs semantically from others in that setting the env var to any # non-empty value will cause it to be true: # CI=1, CI="true", CI=0, etc. all set the flag to be true. # CI= and an unset CI set the flag to be false. # GitHub sets the value to CI="true" to enable it. IS_CI: bool = TestEnvironment.def_flag( "IS_CI", env_var="CI", include_in_repro=False, enabled_fn=lambda env_var_value, _: bool(env_var_value), ) IS_SANDCASTLE: bool = TestEnvironment.def_flag( "IS_SANDCASTLE", env_var="SANDCASTLE", implied_by_fn=lambda: os.getenv("TW_JOB_USER") == "sandcastle", include_in_repro=False, ) IN_RE_WORKER: bool = os.environ.get("INSIDE_RE_WORKER") is not None _is_fbcode_default = ( hasattr(torch._utils_internal, "IS_FBSOURCE") and torch._utils_internal.IS_FBSOURCE ) IS_FBCODE: bool = TestEnvironment.def_flag( "IS_FBCODE", env_var="PYTORCH_TEST_FBCODE", default=_is_fbcode_default, include_in_repro=False, ) IS_REMOTE_GPU: bool = TestEnvironment.def_flag( "IS_REMOTE_GPU", env_var="PYTORCH_TEST_REMOTE_GPU", include_in_repro=False, ) DISABLE_RUNNING_SCRIPT_CHK: bool = TestEnvironment.def_flag( "DISABLE_RUNNING_SCRIPT_CHK", env_var="PYTORCH_DISABLE_RUNNING_SCRIPT_CHK", include_in_repro=False, ) # NB: enabled by default unless in an fbcode context. PRINT_REPRO_ON_FAILURE: bool = TestEnvironment.def_flag( "PRINT_REPRO_ON_FAILURE", env_var="PYTORCH_PRINT_REPRO_ON_FAILURE", default=(not IS_FBCODE), include_in_repro=False, ) # possibly restrict OpInfo tests to a single sample input OPINFO_SAMPLE_INPUT_INDEX: Optional[int] = TestEnvironment.def_setting( "OPINFO_SAMPLE_INPUT_INDEX", env_var="PYTORCH_OPINFO_SAMPLE_INPUT_INDEX", default=None, # Don't include the env var value in the repro command because the info will # be queried from the tracked sample input instead include_in_repro=False, parse_fn=lambda val: None if val is None else int(val), ) DEFAULT_DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' DEFAULT_SLOW_TESTS_FILE = 'slow_tests.json' disabled_tests_dict = {} slow_tests_dict = {} def maybe_load_json(filename): if os.path.isfile(filename): with open(filename) as fp: return json.load(fp) log.warning("Attempted to load json file '%s' but it does not exist.", filename) return {} # set them here in case the tests are running in a subprocess that doesn't call run_tests if os.getenv("SLOW_TESTS_FILE", ""): slow_tests_dict = maybe_load_json(os.getenv("SLOW_TESTS_FILE", "")) if os.getenv("DISABLED_TESTS_FILE", ""): disabled_tests_dict = maybe_load_json(os.getenv("DISABLED_TESTS_FILE", "")) NATIVE_DEVICES = ('cpu', 'cuda', 'xpu', 'meta', 'mps', 'mtia', torch._C._get_privateuse1_backend_name()) # used for managing devices testing for torch profiler UTs # for now cpu, cuda and xpu are added for testing torch profiler UTs DEVICE_LIST_SUPPORT_PROFILING_TEST = ('cpu', 'cuda', 'xpu') ALLOW_XPU_PROFILING_TEST = True check_names = ['orin', 'concord', 'galen', 'xavier', 'nano', 'jetson', 'tegra', 'thor'] IS_JETSON = any(name in platform.platform() for name in check_names) def gcIfJetson(fn): # Irregular Jetson host/device memory setup requires cleanup to avoid tests being killed @functools.wraps(fn) def wrapper(*args, **kwargs): if IS_JETSON: gc.collect() torch.cuda.empty_cache() fn(*args, **kwargs) return wrapper # Tries to extract the current test function by crawling the stack. # If unsuccessful, return None. def extract_test_fn() -> Optional[Callable]: try: stack = inspect.stack() for frame_info in stack: frame = frame_info.frame if "self" not in frame.f_locals: continue self_val = frame.f_locals["self"] if isinstance(self_val, unittest.TestCase): test_id = self_val.id() *_, cls_name, test_name = test_id.rsplit('.', 2) if cls_name == type(self_val).__name__ and test_name.startswith("test"): test_fn = getattr(self_val, test_name).__func__ return test_fn except Exception: pass return None # Contains tracked input data useful for debugging purposes @dataclass
TestEnvironment
python
huggingface__transformers
src/transformers/models/clvp/modeling_clvp.py
{ "start": 17755, "end": 18396 }
class ____(nn.Module): """ This MLP is used in CLVP speech or text encoder models. """ def __init__(self, config): super().__init__() self.config = config self.fc1 = ClvpGatedLinearUnit(config) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout_layer = nn.Dropout(config.dropout) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.fc1(hidden_states) hidden_states = self.dropout_layer(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
ClvpEncoderMLP
python
scipy__scipy
scipy/io/matlab/_mio4.py
{ "start": 19563, "end": 20993 }
class ____: ''' Class for writing matlab 4 format files ''' def __init__(self, file_stream, oned_as=None): self.file_stream = file_stream if oned_as is None: oned_as = 'row' self.oned_as = oned_as self._matrix_writer = None def put_variables(self, mdict, write_header=None): ''' Write variables in `mdict` to stream Parameters ---------- mdict : mapping mapping with method ``items`` return name, contents pairs where ``name`` which will appeak in the matlab workspace in file load, and ``contents`` is something writeable to a matlab file, such as a NumPy array. write_header : {None, True, False} If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file ''' # there is no header for a matlab 4 mat file, so we ignore the # ``write_header`` input argument. It's there for compatibility # with the matlab 5 version of this method self._matrix_writer = VarWriter4(self) for name, var in mdict.items(): self._matrix_writer.write(var, name)
MatFile4Writer
python
airbytehq__airbyte
airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py
{ "start": 11769, "end": 12800 }
class ____(ProjectComponents, GeneratorMixin): """ https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-project-components/#api-rest-api-3-component-post """ def path(self, **kwargs) -> str: return "component" def generate(self): projects_stream = Projects(authenticator=self._session.auth, domain=self._domain) for project in projects_stream.read_records(sync_mode=SyncMode.full_refresh): for index in range(random.randrange(6)): payload = json.dumps( { "isAssigneeTypeValid": False, "name": f"Component {index}", "description": "This is a Jira component", "project": project.get("key"), "assigneeType": "PROJECT_LEAD", "leadAccountId": "5fc9e78d2730d800760becc4", } ) self.generate_record(payload)
ProjectComponentsGenerator
python
scipy__scipy
scipy/stats/tests/test_continuous.py
{ "start": 46273, "end": 61674 }
class ____: @pytest.mark.parametrize('i, distdata', enumerate(distcont + distdiscrete)) def test_rv_generic(self, i, distdata): distname = distdata[0] slow = {'argus', 'exponpow', 'exponweib', 'genexpon', 'gompertz', 'halfgennorm', 'johnsonsb', 'kappa4', 'ksone', 'kstwo', 'kstwobign', 'norminvgauss', 'powerlognorm', 'powernorm', 'recipinvgauss', 'studentized_range', 'vonmises_line', # continuous 'betanbinom', 'logser', 'skellam', 'zipf'} # discrete if not int(os.environ.get('SCIPY_XSLOW', '0')) and distname in slow: pytest.skip('Skipping as XSLOW') if distname in { # skip these distributions 'levy_stable', # private methods seem to require >= 1d args 'vonmises', # circular distribution; shouldn't work 'poisson_binom', # vector shape parameter 'hypergeom', # distribution functions need interpolation 'nchypergeom_fisher', # distribution functions need interpolation 'nchypergeom_wallenius', # distribution functions need interpolation }: return # skip single test, mostly due to slight disagreement custom_tolerances = {'ksone': 1e-5, 'kstwo': 1e-5} # discontinuous PDF skip_entropy = {'kstwobign', 'pearson3'} # tolerance issue skip_skewness = {'exponpow', 'ksone', 'nchypergeom_wallenius'} # tolerance skip_kurtosis = {'chi', 'exponpow', 'invgamma', # tolerance 'johnsonsb', 'ksone', 'kstwo', # tolerance 'nchypergeom_wallenius'} # tolerance skip_logccdf = {'arcsine', 'skewcauchy', 'trapezoid', 'triang'} # tolerance skip_raw = {2: {'alpha', 'foldcauchy', 'halfcauchy', 'levy', 'levy_l'}, 3: {'pareto'}, # stats.pareto is just wrong 4: {'invgamma'}} # tolerance issue skip_standardized = {'exponpow', 'ksone'} # tolerances dist = getattr(stats, distname) params = dict(zip(dist.shapes.split(', '), distdata[1])) if dist.shapes else {} rng = np.random.default_rng(7548723590230982) CustomDistribution = stats.make_distribution(dist) X = CustomDistribution(**params) Y = dist(**params) x = X.sample(shape=10, rng=rng) p = X.cdf(x) rtol = custom_tolerances.get(distname, 1e-7) atol = 1e-12 with np.errstate(divide='ignore', invalid='ignore'): m, v, s, k = Y.stats('mvsk') assert_allclose(X.support(), Y.support()) if distname not in skip_entropy: assert_allclose(X.entropy(), Y.entropy(), rtol=rtol) if isinstance(Y, stats.rv_discrete): # some continuous distributions have trouble with `logentropy` because # it uses complex numbers assert_allclose(np.exp(X.logentropy()), Y.entropy(), rtol=rtol) assert_allclose(X.median(), Y.median(), rtol=rtol) assert_allclose(X.mean(), m, rtol=rtol, atol=atol) assert_allclose(X.variance(), v, rtol=rtol, atol=atol) if distname not in skip_skewness: assert_allclose(X.skewness(), s, rtol=rtol, atol=atol) if distname not in skip_kurtosis: assert_allclose(X.kurtosis(convention='excess'), k, rtol=rtol, atol=atol) if isinstance(dist, stats.rv_continuous): assert_allclose(X.logpdf(x), Y.logpdf(x), rtol=rtol) assert_allclose(X.pdf(x), Y.pdf(x), rtol=rtol) else: assert_allclose(X.logpmf(x), Y.logpmf(x), rtol=rtol) assert_allclose(X.pmf(x), Y.pmf(x), rtol=rtol) assert_allclose(X.logcdf(x), Y.logcdf(x), rtol=rtol) assert_allclose(X.cdf(x), Y.cdf(x), rtol=rtol) if distname not in skip_logccdf: assert_allclose(X.logccdf(x), Y.logsf(x), rtol=rtol) assert_allclose(X.ccdf(x), Y.sf(x), rtol=rtol) # old infrastructure convention for ppf(p=0) and isf(p=1) is different than # new infrastructure. Adjust reference values accordingly. a, _ = Y.support() ref_ppf = Y.ppf(p) ref_ppf[p == 0] = a ref_isf = Y.isf(p) ref_isf[p == 1] = a assert_allclose(X.icdf(p), ref_ppf, rtol=rtol) assert_allclose(X.iccdf(p), ref_isf, rtol=rtol) for order in range(5): if distname not in skip_raw.get(order, {}): assert_allclose(X.moment(order, kind='raw'), Y.moment(order), rtol=rtol, atol=atol) for order in range(3, 4): if distname not in skip_standardized: assert_allclose(X.moment(order, kind='standardized'), Y.stats('mvsk'[order-1]), rtol=rtol, atol=atol) if isinstance(dist, stats.rv_continuous): # For discrete distributions, these won't agree at the far left end # of the support, and the new infrastructure is slow there (for now). seed = 845298245687345 assert_allclose(X.sample(shape=10, rng=seed), Y.rvs(size=10, random_state=np.random.default_rng(seed)), rtol=rtol) def test_custom(self): rng = np.random.default_rng(7548723590230982) class MyLogUniform: @property def __make_distribution_version__(self): return "1.16.0" @property def parameters(self): return {'a': {'endpoints': (0, np.inf), 'inclusive': (False, False)}, 'b': {'endpoints': ('a', np.inf), 'inclusive': (False, False)}} @property def support(self): return {'endpoints': ('a', 'b')} def pdf(self, x, a, b): return 1 / (x * (np.log(b) - np.log(a))) def sample(self, shape, *, a, b, rng=None): p = rng.uniform(size=shape) return np.exp(np.log(a) + p * (np.log(b) - np.log(a))) def moment(self, order, kind='raw', *, a, b): if order == 1 and kind == 'raw': # quadrature is perfectly accurate here; add 1e-10 error so we # can tell the difference between the two return (b - a) / np.log(b/a) + 1e-10 LogUniform = stats.make_distribution(MyLogUniform()) X = LogUniform(a=1., b=np.e) Y = stats.exp(Uniform(a=0., b=1.)) # pre-2.0 support is not needed for much longer, so let's just test with 2.0+ if np.__version__ >= "2.0": assert str(X) == f"MyLogUniform(a=1.0, b={np.e})" assert repr(X) == f"MyLogUniform(a=np.float64(1.0), b=np.float64({np.e}))" x = X.sample(shape=10, rng=rng) p = X.cdf(x) assert_allclose(X.support(), Y.support()) assert_allclose(X.entropy(), Y.entropy()) assert_allclose(X.median(), Y.median()) assert_allclose(X.logpdf(x), Y.logpdf(x)) assert_allclose(X.pdf(x), Y.pdf(x)) assert_allclose(X.logcdf(x), Y.logcdf(x)) assert_allclose(X.cdf(x), Y.cdf(x)) assert_allclose(X.logccdf(x), Y.logccdf(x)) assert_allclose(X.ccdf(x), Y.ccdf(x)) assert_allclose(X.icdf(p), Y.icdf(p)) assert_allclose(X.iccdf(p), Y.iccdf(p)) for kind in ['raw', 'central', 'standardized']: for order in range(5): assert_allclose(X.moment(order, kind=kind), Y.moment(order, kind=kind)) # Confirm that the `sample` and `moment` methods are overriden as expected sample_formula = X.sample(shape=10, rng=0, method='formula') sample_inverse = X.sample(shape=10, rng=0, method='inverse_transform') assert_allclose(sample_formula, sample_inverse) assert not np.all(sample_formula == sample_inverse) assert_allclose(X.mean(method='formula'), X.mean(method='quadrature')) assert not X.mean(method='formula') == X.mean(method='quadrature') # pdf and cdf formulas below can warn on boundary of support in some cases. # See https://github.com/scipy/scipy/pull/22560#discussion_r1962763840. @pytest.mark.slow @pytest.mark.filterwarnings("ignore::RuntimeWarning") @pytest.mark.parametrize("c", [-1, 0, 1, np.asarray([-2.1, -1., 0., 1., 2.1])]) def test_custom_variable_support(self, c): rng = np.random.default_rng(7548723590230982) class MyGenExtreme: @property def __make_distribution_version__(self): return "1.16.0" @property def parameters(self): return { 'c': {'endpoints': (-np.inf, np.inf), 'inclusive': (False, False)}, 'mu': {'endpoints': (-np.inf, np.inf), 'inclusive': (False, False)}, 'sigma': {'endpoints': (0, np.inf), 'inclusive': (False, False)} } @property def support(self): def left(*, c, mu, sigma): c, mu, sigma = np.broadcast_arrays(c, mu, sigma) result = np.empty_like(c) result[c >= 0] = -np.inf result[c < 0] = mu[c < 0] + sigma[c < 0] / c[c < 0] return result[()] def right(*, c, mu, sigma): c, mu, sigma = np.broadcast_arrays(c, mu, sigma) result = np.empty_like(c) result[c <= 0] = np.inf result[c > 0] = mu[c > 0] + sigma[c > 0] / c[c > 0] return result[()] return {"endpoints": (left, right), "inclusive": (False, False)} def pdf(self, x, *, c, mu, sigma): x, c, mu, sigma = np.broadcast_arrays(x, c, mu, sigma) t = np.empty_like(x) mask = (c == 0) t[mask] = np.exp(-(x[mask] - mu[mask])/sigma[mask]) t[~mask] = ( 1 - c[~mask]*(x[~mask] - mu[~mask])/sigma[~mask] )**(1/c[~mask]) result = 1/sigma * t**(1 - c)*np.exp(-t) return result[()] def cdf(self, x, *, c, mu, sigma): x, c, mu, sigma = np.broadcast_arrays(x, c, mu, sigma) t = np.empty_like(x) mask = (c == 0) t[mask] = np.exp(-(x[mask] - mu[mask])/sigma[mask]) t[~mask] = ( 1 - c[~mask]*(x[~mask] - mu[~mask])/sigma[~mask] )**(1/c[~mask]) return np.exp(-t)[()] GenExtreme1 = stats.make_distribution(MyGenExtreme()) GenExtreme2 = stats.make_distribution(stats.genextreme) X1 = GenExtreme1(c=c, mu=0, sigma=1) X2 = GenExtreme2(c=c) x = X1.sample(shape=10, rng=rng) p = X1.cdf(x) assert_allclose(X1.support(), X2.support()) assert_allclose(X1.entropy(), X2.entropy(), rtol=5e-6) assert_allclose(X1.median(), X2.median()) assert_allclose(X1.logpdf(x), X2.logpdf(x)) assert_allclose(X1.pdf(x), X2.pdf(x)) assert_allclose(X1.logcdf(x), X2.logcdf(x)) assert_allclose(X1.cdf(x), X2.cdf(x)) assert_allclose(X1.logccdf(x), X2.logccdf(x)) assert_allclose(X1.ccdf(x), X2.ccdf(x)) assert_allclose(X1.icdf(p), X2.icdf(p)) assert_allclose(X1.iccdf(p), X2.iccdf(p)) @pytest.mark.slow @pytest.mark.parametrize("a", [0.5, np.asarray([0.5, 1.0, 2.0, 4.0, 8.0])]) @pytest.mark.parametrize("b", [0.5, np.asarray([0.5, 1.0, 2.0, 4.0, 8.0])]) def test_custom_multiple_parameterizations(self, a, b): rng = np.random.default_rng(7548723590230982) class MyBeta: @property def __make_distribution_version__(self): return "1.16.0" @property def parameters(self): return ( {"a": (0, np.inf), "b": (0, np.inf)}, {"mu": (0, 1), "nu": (0, np.inf)}, ) def process_parameters(self, a=None, b=None, mu=None, nu=None): if a is not None and b is not None and mu is None and nu is None: nu = a + b mu = a / nu else: a = mu * nu b = nu - a return {"a": a, "b": b, "mu": mu, "nu": nu} @property def support(self): return {'endpoints': (0, 1)} def pdf(self, x, a, b, mu, nu): return special._ufuncs._beta_pdf(x, a, b) def cdf(self, x, a, b, mu, nu): return special.betainc(a, b, x) Beta = stats.make_distribution(stats.beta) MyBeta = stats.make_distribution(MyBeta()) mu = a / (a + b) nu = a + b X = MyBeta(a=a, b=b) Y = MyBeta(mu=mu, nu=nu) Z = Beta(a=a, b=b) x = Z.sample(shape=10, rng=rng) p = Z.cdf(x) assert_allclose(X.support(), Z.support()) assert_allclose(X.median(), Z.median()) assert_allclose(X.pdf(x), Z.pdf(x)) assert_allclose(X.cdf(x), Z.cdf(x)) assert_allclose(X.ccdf(x), Z.ccdf(x)) assert_allclose(X.icdf(p), Z.icdf(p)) assert_allclose(X.iccdf(p), Z.iccdf(p)) assert_allclose(Y.support(), Z.support()) assert_allclose(Y.median(), Z.median()) assert_allclose(Y.pdf(x), Z.pdf(x)) assert_allclose(Y.cdf(x), Z.cdf(x)) assert_allclose(Y.ccdf(x), Z.ccdf(x)) assert_allclose(Y.icdf(p), Z.icdf(p)) assert_allclose(Y.iccdf(p), Z.iccdf(p)) def test_input_validation(self): message = '`levy_stable` is not supported.' with pytest.raises(NotImplementedError, match=message): stats.make_distribution(stats.levy_stable) message = '`vonmises` is not supported.' with pytest.raises(NotImplementedError, match=message): stats.make_distribution(stats.vonmises) message = "The argument must be an instance of..." with pytest.raises(ValueError, match=message): stats.make_distribution(object()) def test_repr_str_docs(self): from scipy.stats._distribution_infrastructure import _distribution_names for dist in _distribution_names.keys(): assert hasattr(stats, dist) dist = stats.make_distribution(stats.gamma) assert str(dist(a=2)) == "Gamma(a=2.0)" if np.__version__ >= "2": assert repr(dist(a=2)) == "Gamma(a=np.float64(2.0))" assert 'Gamma' in dist.__doc__ dist = stats.make_distribution(stats.halfgennorm) assert str(dist(beta=2)) == "HalfGeneralizedNormal(beta=2.0)" if np.__version__ >= "2": assert repr(dist(beta=2)) == "HalfGeneralizedNormal(beta=np.float64(2.0))" assert 'HalfGeneralizedNormal' in dist.__doc__
TestMakeDistribution
python
ansible__ansible
test/units/_internal/templating/fixtures/valid_collection/ansible_collections/valid/also_valid/plugins/lookup/runtime_error.py
{ "start": 84, "end": 212 }
class ____(LookupBase): def run(self, terms, variables=None, **kwargs) -> list: raise NotImplementedError()
LookupModule
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py
{ "start": 1176, "end": 1307 }
class ____(CloudflareAIGatewayError): """Raised when AI Gateway does not exist.""" pass
CloudflareAIGatewayDoesNotExistError
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol19.py
{ "start": 558, "end": 625 }
class ____(NamedTuple): x: int @dataclass(frozen=True)
ConcreteC1
python
scikit-image__scikit-image
tests/skimage/color/test_colorconv.py
{ "start": 1361, "end": 37344 }
class ____: img_rgb = data.colorwheel() img_grayscale = data.camera() img_rgba = np.array([[[0, 0.5, 1, 0], [0, 0.5, 1, 1], [0, 0.5, 1, 0.5]]]).astype( float ) img_stains = img_as_float(img_rgb) * 0.3 colbars = np.array( [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0]] ).astype(float) colbars_array = np.swapaxes(colbars.reshape(3, 4, 2), 0, 2) colbars_point75 = colbars * 0.75 colbars_point75_array = np.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2) xyz_array = np.array( [ [[0.4124, 0.21260, 0.01930]], # red [[0, 0, 0]], # black [[0.9505, 1.0, 1.089]], # white [[0.1805, 0.0722, 0.9505]], # blue [[0.07719, 0.15438, 0.02573]], # green ] ) lab_array = np.array( [ [[53.233, 80.109, 67.220]], # red [[0.0, 0.0, 0.0]], # black [[100.0, 0.005, -0.010]], # white [[32.303, 79.197, -107.864]], # blue [[46.229, -51.7, 49.898]], # green ] ) luv_array = np.array( [ [[53.233, 175.053, 37.751]], # red [[0.0, 0.0, 0.0]], # black [[100.0, 0.001, -0.017]], # white [[32.303, -9.400, -130.358]], # blue [[46.228, -43.774, 56.589]], # green ] ) # RGBA to RGB @pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3]) def test_rgba2rgb_conversion(self, channel_axis): rgba = self.img_rgba rgba = np.moveaxis(rgba, source=-1, destination=channel_axis) rgb = rgba2rgb(rgba, channel_axis=channel_axis) rgb = np.moveaxis(rgb, source=channel_axis, destination=-1) expected = np.array([[[1, 1, 1], [0, 0.5, 1], [0.5, 0.75, 1]]]).astype(float) assert_equal(rgb.shape, expected.shape) assert_almost_equal(rgb, expected) def test_rgba2rgb_error_grayscale(self): with pytest.raises(ValueError): rgba2rgb(self.img_grayscale) @pytest.mark.parametrize("channel_axis", [None, 1.5]) def test_rgba2rgb_error_channel_axis_invalid(self, channel_axis): with pytest.raises(TypeError): rgba2rgb(self.img_rgba, channel_axis=channel_axis) @pytest.mark.parametrize("channel_axis", [-4, 3]) def test_rgba2rgb_error_channel_axis_out_of_range(self, channel_axis): with pytest.raises(AxisError): rgba2rgb(self.img_rgba, channel_axis=channel_axis) def test_rgba2rgb_error_rgb(self): with pytest.raises(ValueError): rgba2rgb(self.img_rgb) def test_rgba2rgb_dtype(self): rgba = self.img_rgba.astype('float64') rgba32 = img_as_float32(rgba) assert rgba2rgb(rgba).dtype == rgba.dtype assert rgba2rgb(rgba32).dtype == rgba32.dtype # RGB to HSV @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2hsv_conversion(self, channel_axis): rgb = img_as_float(self.img_rgb)[::16, ::16] _rgb = np.moveaxis(rgb, source=-1, destination=channel_axis) hsv = rgb2hsv(_rgb, channel_axis=channel_axis) hsv = np.moveaxis(hsv, source=channel_axis, destination=-1) hsv = hsv.reshape(-1, 3) # ground truth from colorsys gt = np.array( [colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)] ) assert_almost_equal(hsv, gt) def test_rgb2hsv_error_grayscale(self): with pytest.raises(ValueError): rgb2hsv(self.img_grayscale) def test_rgb2hsv_dtype(self): rgb = img_as_float(self.img_rgb) rgb32 = img_as_float32(self.img_rgb) assert rgb2hsv(rgb).dtype == rgb.dtype assert rgb2hsv(rgb32).dtype == rgb32.dtype # HSV to RGB @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_hsv2rgb_conversion(self, channel_axis): rgb = self.img_rgb.astype("float32")[::16, ::16] # create HSV image with colorsys hsv = np.array( [colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)] ).reshape(rgb.shape) hsv = np.moveaxis(hsv, source=-1, destination=channel_axis) _rgb = hsv2rgb(hsv, channel_axis=channel_axis) _rgb = np.moveaxis(_rgb, source=channel_axis, destination=-1) # convert back to RGB and compare with original. # relative precision for RGB -> HSV roundtrip is about 1e-6 assert_almost_equal(rgb, _rgb, decimal=4) def test_hsv2rgb_error_grayscale(self): with pytest.raises(ValueError): hsv2rgb(self.img_grayscale) def test_hsv2rgb_dtype(self): rgb = self.img_rgb.astype("float32")[::16, ::16] # create HSV image with colorsys hsv = np.array( [colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)], dtype='float64', ).reshape(rgb.shape) hsv32 = hsv.astype('float32') assert hsv2rgb(hsv).dtype == hsv.dtype assert hsv2rgb(hsv32).dtype == hsv32.dtype # RGB to XYZ @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2xyz_conversion(self, channel_axis): gt = np.array( [ [ [0.950456, 1.0, 1.088754], [0.538003, 0.787329, 1.06942], [0.592876, 0.28484, 0.969561], [0.180423, 0.072169, 0.950227], ], [ [0.770033, 0.927831, 0.138527], [0.35758, 0.71516, 0.119193], [0.412453, 0.212671, 0.019334], [0.0, 0.0, 0.0], ], ] ) img = np.moveaxis(self.colbars_array, source=-1, destination=channel_axis) out = rgb2xyz(img, channel_axis=channel_axis) out = np.moveaxis(out, source=channel_axis, destination=-1) assert_almost_equal(out, gt) # stop repeating the "raises" checks for all other functions that are # implemented with color._convert() def test_rgb2xyz_error_grayscale(self): with pytest.raises(ValueError): rgb2xyz(self.img_grayscale) def test_rgb2xyz_dtype(self): img = self.colbars_array img32 = img.astype('float32') assert rgb2xyz(img).dtype == img.dtype assert rgb2xyz(img32).dtype == img32.dtype # XYZ to RGB def test_xyz2rgb_conversion(self): assert_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)), self.colbars_array) def test_xyz2rgb_dtype(self): img = rgb2xyz(self.colbars_array) img32 = img.astype('float32') assert xyz2rgb(img).dtype == img.dtype assert xyz2rgb(img32).dtype == img32.dtype # RGB<->XYZ roundtrip on another image @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_xyz_rgb_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb) img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis) round_trip = xyz2rgb( rgb2xyz(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ) assert_array_almost_equal(round_trip, img_rgb) # HED<->RGB roundtrip with ubyte image def test_hed_rgb_roundtrip(self): img_in = img_as_ubyte(self.img_stains) img_out = rgb2hed(hed2rgb(img_in)) assert_equal(img_as_ubyte(img_out), img_in) # HED<->RGB roundtrip with float image @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_hed_rgb_float_roundtrip(self, channel_axis): img_in = self.img_stains img_in = np.moveaxis(img_in, source=-1, destination=channel_axis) img_out = rgb2hed( hed2rgb(img_in, channel_axis=channel_axis), channel_axis=channel_axis ) assert_array_almost_equal(img_out, img_in) # BRO<->RGB roundtrip with ubyte image def test_bro_rgb_roundtrip(self): from skimage.color.colorconv import bro_from_rgb, rgb_from_bro img_in = img_as_ubyte(self.img_stains) img_out = combine_stains(img_in, rgb_from_bro) img_out = separate_stains(img_out, bro_from_rgb) assert_equal(img_as_ubyte(img_out), img_in) # BRO<->RGB roundtrip with float image @pytest.mark.parametrize("channel_axis", [0, 1, -1]) def test_bro_rgb_roundtrip_float(self, channel_axis): from skimage.color.colorconv import bro_from_rgb, rgb_from_bro img_in = self.img_stains img_in = np.moveaxis(img_in, source=-1, destination=channel_axis) img_out = combine_stains(img_in, rgb_from_bro, channel_axis=channel_axis) img_out = separate_stains(img_out, bro_from_rgb, channel_axis=channel_axis) assert_array_almost_equal(img_out, img_in) # RGB to RGB CIE @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2rgbcie_conversion(self, channel_axis): gt = np.array( [ [ [0.1488856, 0.18288098, 0.19277574], [0.01163224, 0.16649536, 0.18948516], [0.12259182, 0.03308008, 0.17298223], [-0.01466154, 0.01669446, 0.16969164], ], [ [0.16354714, 0.16618652, 0.0230841], [0.02629378, 0.1498009, 0.01979351], [0.13725336, 0.01638562, 0.00329059], [0.0, 0.0, 0.0], ], ] ) img = np.moveaxis(self.colbars_array, source=-1, destination=channel_axis) out = rgb2rgbcie(img, channel_axis=channel_axis) out = np.moveaxis(out, source=channel_axis, destination=-1) assert_almost_equal(out, gt) def test_rgb2rgbcie_dtype(self): img = self.colbars_array.astype('float64') img32 = img.astype('float32') assert rgb2rgbcie(img).dtype == img.dtype assert rgb2rgbcie(img32).dtype == img32.dtype # RGB CIE to RGB @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgbcie2rgb_conversion(self, channel_axis): rgb = np.moveaxis(self.colbars_array, source=-1, destination=channel_axis) round_trip = rgbcie2rgb( rgb2rgbcie(rgb, channel_axis=channel_axis), channel_axis=channel_axis ) # only roundtrip test, we checked rgb2rgbcie above already assert_almost_equal(round_trip, rgb) def test_rgbcie2rgb_dtype(self): img = rgb2rgbcie(self.colbars_array).astype('float64') img32 = img.astype('float32') assert rgbcie2rgb(img).dtype == img.dtype assert rgbcie2rgb(img32).dtype == img32.dtype @pytest.mark.parametrize("channel_axis", [0, -1]) def test_convert_colorspace(self, channel_axis): colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr', 'YDbDr'] colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb, ycbcr2rgb, ypbpr2rgb, ydbdr2rgb] colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz, rgb2ycbcr, rgb2ypbpr, rgb2ydbdr] colbars_array = np.moveaxis( self.colbars_array, source=-1, destination=channel_axis ) kw = dict(channel_axis=channel_axis) assert_almost_equal( convert_colorspace(colbars_array, 'RGB', 'RGB', **kw), colbars_array ) for i, space in enumerate(colspaces): gt = colfuncs_from[i](colbars_array, **kw) assert_almost_equal( convert_colorspace(colbars_array, space, 'RGB', **kw), gt ) gt = colfuncs_to[i](colbars_array, **kw) assert_almost_equal( convert_colorspace(colbars_array, 'RGB', space, **kw), gt ) with pytest.raises(ValueError): convert_colorspace(self.colbars_array, 'nokey', 'XYZ') with pytest.raises(ValueError): convert_colorspace(self.colbars_array, 'RGB', 'nokey') @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_rgb2gray(self, channel_axis): x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(float) x = np.moveaxis(x, source=-1, destination=channel_axis) g = rgb2gray(x, channel_axis=channel_axis) assert_array_almost_equal(g, 1) assert_equal(g.shape, (1, 1)) def test_rgb2gray_contiguous(self): x = np.random.rand(10, 10, 3) assert rgb2gray(x).flags["C_CONTIGUOUS"] assert rgb2gray(x[:5, :5]).flags["C_CONTIGUOUS"] def test_rgb2gray_alpha(self): x = np.empty((10, 10, 4)) with pytest.raises(ValueError): rgb2gray(x) def test_rgb2gray_on_gray(self): with pytest.raises(ValueError): rgb2gray(np.empty((5, 5))) def test_rgb2gray_dtype(self): img = np.random.rand(10, 10, 3).astype('float64') img32 = img.astype('float32') assert rgb2gray(img).dtype == img.dtype assert rgb2gray(img32).dtype == img32.dtype # test matrices for xyz2lab and lab2xyz generated using # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2lab(self, test_root_dir): assert_array_almost_equal(xyz2lab(self.xyz_array), self.lab_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["A", "B", "C", "d50", "d55", "d65"]: I = I.lower() for obs in ["2", "10", "R"]: obs = obs.lower() fname = f'color/data/lab_array_{I}_{obs}.npy' lab_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( lab_array_I_obs, xyz2lab(self.xyz_array, I, obs), decimal=2 ) for I in ["d75", "e"]: fname = f'color/data/lab_array_{I}_2.npy' lab_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( lab_array_I_obs, xyz2lab(self.xyz_array, I, "2"), decimal=2 ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_xyz2lab_channel_axis(self, channel_axis): # test conversion with channels along a specified axis xyz = np.moveaxis(self.xyz_array, source=-1, destination=channel_axis) lab = xyz2lab(xyz, channel_axis=channel_axis) lab = np.moveaxis(lab, source=channel_axis, destination=-1) assert_array_almost_equal(lab, self.lab_array, decimal=3) def test_xyz2lab_dtype(self): img = self.xyz_array.astype('float64') img32 = img.astype('float32') assert xyz2lab(img).dtype == img.dtype assert xyz2lab(img32).dtype == img32.dtype def test_lab2xyz(self, test_root_dir): assert_array_almost_equal(lab2xyz(self.lab_array), self.xyz_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["A", "B", "C", "d50", "d55", "d65"]: I = I.lower() for obs in ["2", "10", "R"]: obs = obs.lower() fname = f'color/data/lab_array_{I}_{obs}.npy' lab_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( lab2xyz(lab_array_I_obs, I, obs), self.xyz_array, decimal=3 ) for I in ["d75", "e"]: fname = f'color/data/lab_array_{I}_2.npy' lab_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( lab2xyz(lab_array_I_obs, I, "2"), self.xyz_array, decimal=3 ) # And we include a call to test the exception handling in the code. with pytest.raises(ValueError): lab2xyz(lab_array_I_obs, "NaI", "2") # Not an illuminant with pytest.raises(ValueError): lab2xyz(lab_array_I_obs, "d50", "42") # Not a degree @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_lab2xyz_channel_axis(self, channel_axis): # test conversion with channels along a specified axis lab = np.moveaxis(self.lab_array, source=-1, destination=channel_axis) xyz = lab2xyz(lab, channel_axis=channel_axis) xyz = np.moveaxis(xyz, source=channel_axis, destination=-1) assert_array_almost_equal(xyz, self.xyz_array, decimal=3) def test_lab2xyz_dtype(self): img = self.lab_array.astype('float64') img32 = img.astype('float32') assert lab2xyz(img).dtype == img.dtype assert lab2xyz(img32).dtype == img32.dtype def test_rgb2lab_brucelindbloom(self): """ Test the RGB->Lab conversion by comparing to the calculator on the authoritative Bruce Lindbloom [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma gt_for_colbars = np.array( [ [100, 0, 0], [97.1393, -21.5537, 94.4780], [91.1132, -48.0875, -14.1312], [87.7347, -86.1827, 83.1793], [60.3242, 98.2343, -60.8249], [53.2408, 80.0925, 67.2032], [32.2970, 79.1875, -107.8602], [0, 0, 0], ] ).T gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_lab_rgb_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb) img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis) assert_array_almost_equal( lab2rgb( rgb2lab(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) def test_rgb2lab_dtype(self): img = self.colbars_array.astype('float64') img32 = img.astype('float32') assert rgb2lab(img).dtype == img.dtype assert rgb2lab(img32).dtype == img32.dtype def test_lab2rgb_dtype(self): img = self.lab_array.astype('float64') img32 = img.astype('float32') assert lab2rgb(img).dtype == img.dtype assert lab2rgb(img32).dtype == img32.dtype # test matrices for xyz2luv and luv2xyz generated using # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2luv(self, test_root_dir): assert_array_almost_equal(xyz2luv(self.xyz_array), self.luv_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["A", "B", "C", "d50", "d55", "d65"]: I = I.lower() for obs in ["2", "10", "R"]: obs = obs.lower() fname = f'color/data/luv_array_{I}_{obs}.npy' luv_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( luv_array_I_obs, xyz2luv(self.xyz_array, I, obs), decimal=2 ) for I in ["d75", "e"]: fname = f'color/data/luv_array_{I}_2.npy' luv_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( luv_array_I_obs, xyz2luv(self.xyz_array, I, "2"), decimal=2 ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_xyz2luv_channel_axis(self, channel_axis): # test conversion with channels along a specified axis xyz = np.moveaxis(self.xyz_array, source=-1, destination=channel_axis) luv = xyz2luv(xyz, channel_axis=channel_axis) luv = np.moveaxis(luv, source=channel_axis, destination=-1) assert_array_almost_equal(luv, self.luv_array, decimal=3) def test_xyz2luv_dtype(self): img = self.xyz_array.astype('float64') img32 = img.astype('float32') assert xyz2luv(img).dtype == img.dtype assert xyz2luv(img32).dtype == img32.dtype def test_luv2xyz(self, test_root_dir): assert_array_almost_equal(luv2xyz(self.luv_array), self.xyz_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["A", "B", "C", "d50", "d55", "d65"]: I = I.lower() for obs in ["2", "10", "R"]: obs = obs.lower() fname = f'color/data/luv_array_{I}_{obs}.npy' luv_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( luv2xyz(luv_array_I_obs, I, obs), self.xyz_array, decimal=3 ) for I in ["d75", "e"]: fname = f'color/data/luv_array_{I}_2.npy' luv_array_I_obs = np.load(test_root_dir / fname) assert_array_almost_equal( luv2xyz(luv_array_I_obs, I, "2"), self.xyz_array, decimal=3 ) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_luv2xyz_channel_axis(self, channel_axis): # test conversion with channels along a specified axis luv = np.moveaxis(self.luv_array, source=-1, destination=channel_axis) xyz = luv2xyz(luv, channel_axis=channel_axis) xyz = np.moveaxis(xyz, source=channel_axis, destination=-1) assert_array_almost_equal(xyz, self.xyz_array, decimal=3) def test_luv2xyz_dtype(self): img = self.luv_array.astype('float64') img32 = img.astype('float32') assert luv2xyz(img).dtype == img.dtype assert luv2xyz(img32).dtype == img32.dtype def test_rgb2luv_brucelindbloom(self): """ Test the RGB->Lab conversion by comparing to the calculator on the authoritative Bruce Lindbloom [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma gt_for_colbars = np.array( [ [100, 0, 0], [97.1393, 7.7056, 106.7866], [91.1132, -70.4773, -15.2042], [87.7347, -83.0776, 107.3985], [60.3242, 84.0714, -108.6834], [53.2408, 175.0151, 37.7564], [32.2970, -9.4054, -130.3423], [0, 0, 0], ] ).T gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) assert_array_almost_equal(rgb2luv(self.colbars_array), gt_array, decimal=2) def test_rgb2luv_dtype(self): img = self.colbars_array.astype('float64') img32 = img.astype('float32') assert rgb2luv(img).dtype == img.dtype assert rgb2luv(img32).dtype == img32.dtype def test_luv2rgb_dtype(self): img = self.luv_array.astype('float64') img32 = img.astype('float32') assert luv2rgb(img).dtype == img.dtype assert luv2rgb(img32).dtype == img32.dtype @pytest.mark.parametrize("channel_axis", [0, 1, -1 - 2]) def test_luv_rgb_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb) img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis) assert_array_almost_equal( luv2rgb( rgb2luv(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) def test_lab_rgb_outlier(self): lab_array = np.ones((3, 1, 3)) lab_array[0] = [50, -12, 85] lab_array[1] = [50, 12, -85] lab_array[2] = [90, -4, -47] rgb_array = np.array( [ [[0.501, 0.481, 0]], [[0, 0.482, 1.0]], [[0.578, 0.914, 1.0]], ] ) assert_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3) def test_lab_full_gamut(self): a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100)) L = np.ones(a.shape) lab = np.dstack((L, a, b)) regex = ( "Conversion from CIE-LAB to XYZ color space resulted in " "\\d+ negative Z values that have been clipped to zero" ) for value in [0, 10, 20]: lab[:, :, 0] = value with pytest.warns(UserWarning, match=regex) as record: lab2xyz(lab) assert_stacklevel(record) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_lab_lch_roundtrip(self, channel_axis): rgb = img_as_float(self.img_rgb) rgb = np.moveaxis(rgb, source=-1, destination=channel_axis) lab = rgb2lab(rgb, channel_axis=channel_axis) lab2 = lch2lab( lab2lch(lab, channel_axis=channel_axis), channel_axis=channel_axis, ) assert_array_almost_equal(lab2, lab) def test_rgb_lch_roundtrip(self): rgb = img_as_float(self.img_rgb) lab = rgb2lab(rgb) lch = lab2lch(lab) lab2 = lch2lab(lch) rgb2 = lab2rgb(lab2) assert_array_almost_equal(rgb, rgb2) def test_lab_lch_0d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch2 = lab2lch(lab0[None, None, :]) assert_array_almost_equal(lch0, lch2[0, 0, :]) def test_lab_lch_1d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch1 = lab2lch(lab0[None, :]) assert_array_almost_equal(lch0, lch1[0, :]) def test_lab_lch_3d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch3 = lab2lch(lab0[None, None, None, :]) assert_array_almost_equal(lch0, lch3[0, 0, 0, :]) def _get_lab0(self): rgb = img_as_float(self.img_rgb[:1, :1, :]) return rgb2lab(rgb)[0, 0, :] def test_yuv(self): rgb = np.array([[[1.0, 1.0, 1.0]]]) assert_array_almost_equal(rgb2yuv(rgb), np.array([[[1, 0, 0]]])) assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]])) assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]])) assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]])) assert_array_almost_equal(rgb2ydbdr(rgb), np.array([[[1, 0, 0]]])) rgb = np.array([[[0.0, 1.0, 0.0]]]) assert_array_almost_equal( rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]]) ) assert_array_almost_equal( rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]]) ) assert_array_almost_equal( rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]]) ) assert_array_almost_equal( rgb2ycbcr(rgb), np.array([[[144.553, 53.797, 34.214]]]) ) assert_array_almost_equal(rgb2ydbdr(rgb), np.array([[[0.587, -0.883, 1.116]]])) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_yuv_roundtrip(self, channel_axis): img_rgb = img_as_float(self.img_rgb)[::16, ::16] img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis) assert_array_almost_equal( yuv2rgb( rgb2yuv(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) assert_array_almost_equal( yiq2rgb( rgb2yiq(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) assert_array_almost_equal( ypbpr2rgb( rgb2ypbpr(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) assert_array_almost_equal( ycbcr2rgb( rgb2ycbcr(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) assert_array_almost_equal( ydbdr2rgb( rgb2ydbdr(img_rgb, channel_axis=channel_axis), channel_axis=channel_axis ), img_rgb, ) def test_rgb2yuv_dtype(self): img = self.colbars_array.astype('float64') img32 = img.astype('float32') assert rgb2yuv(img).dtype == img.dtype assert rgb2yuv(img32).dtype == img32.dtype def test_yuv2rgb_dtype(self): img = rgb2yuv(self.colbars_array).astype('float64') img32 = img.astype('float32') assert yuv2rgb(img).dtype == img.dtype assert yuv2rgb(img32).dtype == img32.dtype def test_rgb2yiq_conversion(self): rgb = img_as_float(self.img_rgb)[::16, ::16] yiq = rgb2yiq(rgb).reshape(-1, 3) gt = np.array( [colorsys.rgb_to_yiq(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)] ) assert_almost_equal(yiq, gt, decimal=2) @pytest.mark.parametrize("func", [lab2rgb, lab2xyz]) def test_warning_stacklevel(self, func): regex = ( "Conversion from CIE-LAB.* XYZ.*color space resulted in " "1 negative Z values that have been clipped to zero" ) with pytest.warns(UserWarning, match=regex) as messages: func(lab=[[[0, 0, 300.0]]]) assert_stacklevel(messages) assert len(messages) == 1 assert messages[0].filename == __file__, "warning points at wrong file" def test_gray2rgb(): x = np.array([0, 0.5, 1]) w = gray2rgb(x) expected_output = np.array( [ [0, 0, 0], [ 0.5, 0.5, 0.5, ], [1, 1, 1], ] ) assert_equal(w, expected_output) x = x.reshape((3, 1)) y = gray2rgb(x) assert_equal(y.shape, (3, 1, 3)) assert_equal(y.dtype, x.dtype) assert_equal(y[..., 0], x) assert_equal(y[0, 0, :], [0, 0, 0]) x = np.array([[0, 128, 255]], dtype=np.uint8) z = gray2rgb(x) assert_equal(z.shape, (1, 3, 3)) assert_equal(z[..., 0], x) assert_equal(z[0, 1, :], [128, 128, 128]) def test_gray2rgb_rgb(): x = np.random.rand(5, 5, 4) y = gray2rgb(x) assert y.shape == (x.shape + (3,)) for i in range(3): assert_equal(x, y[..., i]) @pytest.mark.parametrize("shape", [(5, 5), (5, 5, 4), (5, 4, 5, 4)]) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_gray2rgba(shape, channel_axis): # nD case img = np.random.random(shape) rgba = gray2rgba(img, channel_axis=channel_axis) assert rgba.ndim == img.ndim + 1 # Shape check new_axis_loc = channel_axis % rgba.ndim assert_equal(rgba.shape, shape[:new_axis_loc] + (4,) + shape[new_axis_loc:]) # dtype check assert rgba.dtype == img.dtype # RGB channels check for channel in range(3): assert_equal(rgba[slice_at_axis(channel, axis=new_axis_loc)], img) # Alpha channel check assert_equal(rgba[slice_at_axis(3, axis=new_axis_loc)], 1.0) @pytest.mark.parametrize("shape", [(5, 5), (5, 5, 4), (5, 4, 5, 4)]) @pytest.mark.parametrize("channel_axis", [0, 1, -1, -2]) def test_gray2rgb_channel_axis(shape, channel_axis): # nD case img = np.random.random(shape) rgb = gray2rgb(img, channel_axis=channel_axis) assert rgb.ndim == img.ndim + 1 # Shape check new_axis_loc = channel_axis % rgb.ndim assert_equal(rgb.shape, shape[:new_axis_loc] + (3,) + shape[new_axis_loc:]) # dtype check assert rgb.dtype == img.dtype def test_gray2rgba_dtype(): img_f64 = np.random.random((5, 5)) img_f32 = img_f64.astype('float32') img_u8 = img_as_ubyte(img_f64) img_int = img_u8.astype(int) for img in [img_f64, img_f32, img_u8, img_int]: assert gray2rgba(img).dtype == img.dtype def test_gray2rgba_alpha(): img = np.random.random((5, 5)) img_u8 = img_as_ubyte(img) # Default alpha = None rgba = gray2rgba(img, alpha) assert_equal(rgba[..., :3], gray2rgb(img)) assert_equal(rgba[..., 3], 1.0) # Scalar alpha = 0.5 rgba = gray2rgba(img, alpha) assert_equal(rgba[..., :3], gray2rgb(img)) assert_equal(rgba[..., 3], alpha) # Array alpha = np.random.random((5, 5)) rgba = gray2rgba(img, alpha) assert_equal(rgba[..., :3], gray2rgb(img)) assert_equal(rgba[..., 3], alpha) # Warning about alpha cast alpha = 0.5 with expected_warnings(["alpha cannot be safely cast to image dtype"]): rgba = gray2rgba(img_u8, alpha) assert_equal(rgba[..., :3], gray2rgb(img_u8)) # Invalid shape alpha = np.random.random((5, 5, 1)) expected_err_msg = "alpha.shape must match image.shape" with pytest.raises(ValueError) as err: rgba = gray2rgba(img, alpha) assert expected_err_msg == str(err.value) @pytest.mark.parametrize( "alpha,dtype", [ (-1, np.uint8), (300, np.int8), (0.5, int), (0.5, np.uint8), (np.finfo(np.float64).max, np.float32), ], ) def test_gray2rgba_alpha_fail_cast(alpha, dtype): image = np.ones((5, 5), dtype=dtype) with pytest.warns(UserWarning, match="alpha cannot be safely cast"): gray2rgba(image, alpha=alpha) @pytest.mark.parametrize("func", [rgb2gray, gray2rgb, gray2rgba]) @pytest.mark.parametrize( "shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)]) ) def test_nD_gray_conversion(func, shape): img = np.random.rand(*shape) out = func(img) common_ndim = min(out.ndim, len(shape)) assert out.shape[:common_ndim] == shape[:common_ndim] @pytest.mark.parametrize( "func", [ rgb2hsv, hsv2rgb, rgb2xyz, xyz2rgb, rgb2hed, hed2rgb, rgb2rgbcie, rgbcie2rgb, xyz2lab, lab2xyz, lab2rgb, rgb2lab, xyz2luv, luv2xyz, luv2rgb, rgb2luv, lab2lch, lch2lab, rgb2yuv, yuv2rgb, rgb2yiq, yiq2rgb, rgb2ypbpr, ypbpr2rgb, rgb2ycbcr, ycbcr2rgb, rgb2ydbdr, ydbdr2rgb, ], ) @pytest.mark.parametrize( "shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)]) ) def test_nD_color_conversion(func, shape): img = np.random.rand(*shape) out = func(img) assert out.shape == img.shape @pytest.mark.parametrize( "shape", ([(4,), (2, 4), (4, 5, 4), (5, 4, 5, 4), (4, 5, 4, 5, 4)]) ) def test_rgba2rgb_nD(shape): img = np.random.rand(*shape) out = rgba2rgb(img) expected_shape = shape[:-1] + (3,) assert out.shape == expected_shape @pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64]) def test_rgba2rgb_dtypes(dtype): rgba = np.array([[[0, 0.5, 1, 0], [0, 0.5, 1, 1], [0, 0.5, 1, 0.5]]]).astype( dtype=dtype ) rgb = rgba2rgb(rgba) float_dtype = _supported_float_type(rgba.dtype) assert rgb.dtype == float_dtype expected = np.array([[[1, 1, 1], [0, 0.5, 1], [0.5, 0.75, 1]]]).astype(float) assert rgb.shape == expected.shape assert_almost_equal(rgb, expected) @pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64]) def test_lab_lch_roundtrip_dtypes(dtype): rgb = img_as_float(data.colorwheel()).astype(dtype=dtype, copy=False) lab = rgb2lab(rgb) float_dtype = _supported_float_type(dtype) assert lab.dtype == float_dtype lab2 = lch2lab(lab2lch(lab)) decimal = 4 if float_dtype == np.float32 else 7 assert_array_almost_equal(lab2, lab, decimal=decimal) @pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64]) def test_rgb2hsv_dtypes(dtype): rgb = img_as_float(data.colorwheel())[::16, ::16] rgb = rgb.astype(dtype=dtype, copy=False) hsv = rgb2hsv(rgb).reshape(-1, 3) float_dtype = _supported_float_type(dtype) assert hsv.dtype == float_dtype # ground truth from colorsys gt = np.array( [colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)] ) decimal = 3 if float_dtype == np.float32 else 7 assert_array_almost_equal(hsv, gt, decimal=decimal)
TestColorconv
python
getsentry__sentry
src/sentry/core/endpoints/scim/teams.py
{ "start": 4750, "end": 9835 }
class ____(SCIMEndpoint): publish_status = { "GET": ApiPublishStatus.PUBLIC, "POST": ApiPublishStatus.PUBLIC, } permission_classes = (OrganizationSCIMTeamPermission,) @extend_schema( operation_id="List an Organization's Paginated Teams", parameters=[GlobalParams.ORG_ID_OR_SLUG, SCIMQueryParamSerializer], request=None, responses={ 200: inline_sentry_response_serializer( "SCIMListResponseEnvelopeSCIMTeamIndexResponse", SCIMListTeamsResponse ), 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, examples=SCIMExamples.LIST_ORG_PAGINATED_TEAMS, ) def get(self, request: Request, organization: Organization, **kwds: Any) -> Response: """ Returns a paginated list of teams bound to a organization with a SCIM Groups GET Request. Note that the members field will only contain up to 10,000 members. """ query_params = self.get_query_parameters(request) queryset = Team.objects.filter( organization=organization, status=TeamStatus.ACTIVE ).order_by("slug") if query_params["filter"]: queryset = queryset.filter(slug__iexact=slugify(query_params["filter"])) def data_fn(offset, limit): return list(queryset[offset : offset + limit]) def on_results(results): results = serialize( results, None, TeamSCIMSerializer(expand=_team_expand(query_params["excluded_attributes"])), ) return self.list_api_format(results, queryset.count(), query_params["start_index"]) return self.paginate( request=request, on_results=on_results, paginator=GenericOffsetPaginator(data_fn=data_fn), default_per_page=query_params["count"], cursor_cls=SCIMCursor, ) @extend_schema( operation_id="Provision a New Team", parameters=[GlobalParams.ORG_ID_OR_SLUG], request=inline_serializer( name="SCIMTeamRequestBody", fields={ "displayName": serializers.CharField( help_text="The slug of the team that is shown in the UI.", required=True, ), }, ), responses={ 201: TeamSCIMSerializer, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, examples=SCIMExamples.PROVISION_NEW_TEAM, ) def post(self, request: Request, organization: Organization, **kwds: Any) -> Response: """ Create a new team bound to an organization via a SCIM Groups POST Request. The slug will have a normalization of uppercases/spaces to lowercases and dashes. Note that teams are always created with an empty member set. """ # shim displayName from SCIM api in order to work with # our regular team index POST request.data.update( { "name": request.data["displayName"], "slug": slugify(request.data["displayName"]), "idp_provisioned": True, } ) metrics.incr("sentry.scim.team.provision") serializer = TeamPostSerializer(data=request.data) if serializer.is_valid(): result = serializer.validated_data try: with transaction.atomic(router.db_for_write(Team)): team = Team.objects.create( name=result.get("name") or result["slug"], slug=result["slug"], idp_provisioned=result.get("idp_provisioned", False), organization_id=organization.id, ) team_created.send_robust( organization_id=organization.id, user_id=request.user.id, team_id=team.id, sender=None, ) except (IntegrityError, MaxSnowflakeRetryError): return Response( { "non_field_errors": [CONFLICTING_SLUG_ERROR], "detail": CONFLICTING_SLUG_ERROR, }, status=409, ) self.create_audit_entry( request=request, organization=organization, target_object=team.id, event=audit_log.get_event_id("TEAM_ADD"), data=team.get_audit_log_data(), ) return Response( serialize(team, request.user, TeamSCIMSerializer(expand=["members"])), status=201, ) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @extend_schema(tags=["SCIM"]) @region_silo_endpoint
OrganizationSCIMTeamIndex
python
run-llama__llama_index
llama-index-core/llama_index/core/instrumentation/events/retrieval.py
{ "start": 154, "end": 451 }
class ____(BaseEvent): """ RetrievalStartEvent. Args: str_or_query_bundle (QueryType): Query bundle. """ str_or_query_bundle: QueryType @classmethod def class_name(cls) -> str: """Class name.""" return "RetrievalStartEvent"
RetrievalStartEvent
python
getsentry__sentry
src/sentry/identity/oauth2.py
{ "start": 9149, "end": 15032 }
class ____: access_token_url: str | None = None client_id: str | None = None client_secret: str | None = None def __init__(self, access_token_url=None, client_id=None, client_secret=None, *args, **kwargs): super().__init__(*args, **kwargs) if access_token_url is not None: self.access_token_url = access_token_url if client_id is not None: self.client_id = client_id if client_secret is not None: self.client_secret = client_secret def get_token_params(self, code: str, redirect_uri: str) -> dict[str, str | None]: return { "grant_type": "authorization_code", "code": code, "redirect_uri": redirect_uri, "client_id": self.client_id, "client_secret": self.client_secret, } def get_access_token(self, pipeline: IdentityPipeline, code: str) -> Response: data = self.get_token_params(code=code, redirect_uri=absolute_uri(_redirect_url(pipeline))) verify_ssl = pipeline.config.get("verify_ssl", True) return safe_urlopen(self.access_token_url, data=data, verify_ssl=verify_ssl) def exchange_token( self, request: HttpRequest, pipeline: IdentityPipeline, code: str ) -> dict[str, str]: with record_event( IntegrationPipelineViewType.TOKEN_EXCHANGE, pipeline.provider.key ).capture() as lifecycle: try: req: Response = self.get_access_token(pipeline, code) req.raise_for_status() except HTTPError as e: error_resp = e.response exc = ApiError.from_response(error_resp, url=self.access_token_url) sentry_sdk.capture_exception(exc) lifecycle.record_failure(exc) return { "error": f"Could not retrieve access token. Received {exc.code}: {exc.text}", } except SSLError: lifecycle.record_failure( "ssl_error", { "verify_ssl": pipeline.config.get("verify_ssl", True), "url": self.access_token_url, }, ) url = self.access_token_url return { "error": "Could not verify SSL certificate", "error_description": f"Ensure that {url} has a valid SSL certificate", } except ConnectionError: url = self.access_token_url lifecycle.record_failure("connection_error", {"url": url}) return { "error": "Could not connect to host or service", "error_description": f"Ensure that {url} is open to connections", } try: body = safe_urlread(req) content_type = req.headers.get("Content-Type", "").lower() if content_type.startswith("application/x-www-form-urlencoded"): return dict(parse_qsl(body)) return orjson.loads(body) except orjson.JSONDecodeError: lifecycle.record_failure( "json_error", { "content_type": content_type, "url": self.access_token_url, "status_code": req.status_code, }, ) return { "error": "Could not decode a JSON Response", "error_description": "We were not able to parse a JSON response, please try again.", } def dispatch(self, request: HttpRequest, pipeline: IdentityPipeline) -> HttpResponseBase: with record_event( IntegrationPipelineViewType.OAUTH_CALLBACK, pipeline.provider.key ).capture() as lifecycle: error = request.GET.get("error") state = request.GET.get("state") code = request.GET.get("code") if error: lifecycle.record_failure( IntegrationPipelineErrorReason.TOKEN_EXCHANGE_MISMATCHED_STATE, extra={"error": error}, ) return pipeline.error(f"{ERR_INVALID_STATE}\nError: {error}") if state != pipeline.fetch_state("state"): extra = { "error": "invalid_state", "state": state, "pipeline_state": pipeline.fetch_state("state"), "code": code, } lifecycle.record_failure( IntegrationPipelineErrorReason.TOKEN_EXCHANGE_MISMATCHED_STATE, extra=extra ) return pipeline.error(ERR_INVALID_STATE) if code is None: lifecycle.record_halt(IntegrationPipelineHaltReason.NO_CODE_PROVIDED) return pipeline.error("no code was provided") # separate lifecycle event inside exchange_token data = self.exchange_token(request, pipeline, code) # these errors are based off of the results of exchange_token, lifecycle errors are captured inside if "error_description" in data: error = data.get("error") return pipeline.error(data["error_description"]) if "error" in data: logger.info("identity.token-exchange-error", extra={"error": data["error"]}) return pipeline.error(f"{ERR_TOKEN_RETRIEVAL}\nError: {data['error']}") # we can either expect the API to be implicit and say "im looking for # blah within state data" or we need to pass implementation + call a # hook here pipeline.bind_state("data", data) return pipeline.next_step()
OAuth2CallbackView
python
pytorch__pytorch
torchgen/gen_vmap_plumbing.py
{ "start": 8861, "end": 9391 }
class ____: @method_with_native_function def __call__(self, f: NativeFunction) -> str | None: result = gen_vmap_plumbing(f) return result def gen_all_vmap_plumbing(native_functions: Sequence[NativeFunction]) -> str: body = "\n".join(list(mapMaybe(ComputeBatchRulePlumbing(), native_functions))) return f""" #pragma once #include <ATen/Operators.h> #include <ATen/functorch/PlumbingHelper.h> namespace at {{ namespace functorch {{ {body} }}}} // namespace at::functorch """
ComputeBatchRulePlumbing
python
nedbat__coveragepy
tests/test_api.py
{ "start": 24344, "end": 28502 }
class ____(CoverageTest): """Tests of the .switch_context() method.""" def make_test_files(self) -> None: """Create a simple file representing a method with two tests.""" self.make_file( "testsuite.py", """\ def timestwo(x): return x*2 def test_multiply_zero(): assert timestwo(0) == 0 def test_multiply_six(): assert timestwo(6) == 12 """, ) def test_switch_context_testrunner(self) -> None: # This test simulates a coverage-aware test runner, # measuring labeled coverage via public API self.make_test_files() # Test runner starts cov = coverage.Coverage() with cov.collect(): # Imports the test suite suite = import_local_file("testsuite") # Measures test case 1 cov.switch_context("multiply_zero") suite.test_multiply_zero() # Measures test case 2 cov.switch_context("multiply_six") suite.test_multiply_six() # Runner finishes cov.save() # Labeled data is collected data = cov.get_data() assert ["", "multiply_six", "multiply_zero"] == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) suite_filename = filenames["testsuite.py"] data.set_query_context("multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) data.set_query_context("multiply_zero") assert [2, 5] == sorted_lines(data, suite_filename) def test_switch_context_with_static(self) -> None: # This test simulates a coverage-aware test runner, # measuring labeled coverage via public API, # with static label prefix. self.make_test_files() # Test runner starts cov = coverage.Coverage(context="mysuite") with cov.collect(): # Imports the test suite suite = import_local_file("testsuite") # Measures test case 1 cov.switch_context("multiply_zero") suite.test_multiply_zero() # Measures test case 2 cov.switch_context("multiply_six") suite.test_multiply_six() # Runner finishes cov.save() # Labeled data is collected data = cov.get_data() expected = ["mysuite", "mysuite|multiply_six", "mysuite|multiply_zero"] assert expected == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) suite_filename = filenames["testsuite.py"] data.set_query_context("mysuite|multiply_six") assert [2, 8] == sorted_lines(data, suite_filename) data.set_query_context("mysuite|multiply_zero") assert [2, 5] == sorted_lines(data, suite_filename) def test_dynamic_context_conflict(self) -> None: cov = coverage.Coverage(source=["."]) cov.set_option("run:dynamic_context", "test_function") with cov.collect(): with pytest.warns(Warning) as warns: # Switch twice, but only get one warning. cov.switch_context("test1") cov.switch_context("test2") assert_coverage_warnings(warns, "Conflicting dynamic contexts (dynamic-conflict)") def test_unknown_dynamic_context(self) -> None: cov = coverage.Coverage() cov.set_option("run:dynamic_context", "no-idea") with pytest.raises(Exception, match="Don't understand dynamic_context setting: 'no-idea'"): cov.start() def test_switch_context_unstarted(self) -> None: # Coverage must be started to switch context msg = "Cannot switch context, coverage is not started" cov = coverage.Coverage() with pytest.raises(CoverageException, match=msg): cov.switch_context("test1") with cov.collect(): cov.switch_context("test2") with pytest.raises(CoverageException, match=msg): cov.switch_context("test3")
SwitchContextTest
python
PrefectHQ__prefect
tests/utilities/test_timeout.py
{ "start": 106, "end": 868 }
class ____(TimeoutError): ... def test_timeout_raises_custom_error_type_sync(): with pytest.raises(CustomTimeoutError): with timeout(seconds=0.1, timeout_exc_type=CustomTimeoutError): time.sleep(1) async def test_timeout_raises_custom_error_type_async(): with pytest.raises(CustomTimeoutError): with timeout_async(seconds=0.1, timeout_exc_type=CustomTimeoutError): await asyncio.sleep(1) @pytest.mark.parametrize("timeout_context", [timeout, timeout_async]) def test_timeout_raises_if_non_timeout_exception_type_passed(timeout_context): with pytest.raises(ValueError, match="must be a subclass of `TimeoutError`"): with timeout_context(timeout_exc_type=Exception): pass
CustomTimeoutError
python
pytorch__pytorch
torch/distributed/checkpoint/metadata.py
{ "start": 3271, "end": 3406 }
class ____: properties: TensorProperties size: torch.Size chunks: list[ChunkStorageMetadata] @dataclass
TensorStorageMetadata
python
ApeWorX__ape
tests/functional/test_explorer.py
{ "start": 210, "end": 772 }
class ____(ExplorerAPI): def get_transaction_url(self, transaction_hash: str) -> str: return "" def get_address_url(self, address: "AddressType") -> str: return "" def get_contract_type(self, address: "AddressType") -> Optional["ContractType"]: return None def publish_contract(self, address: "AddressType"): return @pytest.fixture def explorer(networks): return MyExplorer(name="mine", network=networks.ethereum.local) def test_supports_chain(explorer): assert not explorer.supports_chain(1)
MyExplorer
python
huggingface__transformers
src/transformers/models/mllama/processing_mllama.py
{ "start": 6647, "end": 16870 }
class ____(ProcessorMixin): r""" Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and [`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. The preferred way of passing kwargs is as a dictionary per modality, see usage example below. ```python from transformers import MllamaProcessor from PIL import Image processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision") processor( images=your_pil_image, text=["<|image|>If I had to write a haiku for this one"], images_kwargs = {"size": {"height": 448, "width": 448}}, text_kwargs = {"padding": "right"}, common_kwargs = {"return_tensors": "pt"}, ) ``` Args: image_processor ([`MllamaImageProcessor`]): The image processor is a required input. tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ def __init__(self, image_processor, tokenizer, chat_template=None): if not hasattr(tokenizer, "image_token"): self.image_token = "<|image|>" self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) else: self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id self.python_token = "<|python_tag|>" self.python_token_id = tokenizer.convert_tokens_to_ids(self.python_token) self.bos_token = tokenizer.bos_token super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, **kwargs: Unpack[MllamaProcessorKwargs], ) -> BatchFeature: """ Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` arguments to MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask """ if text is None and images is None: raise ValueError("You must specify either text or images.") output_kwargs = self._merge_kwargs( MllamaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) data = {} if text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)): raise ValueError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [t.count(self.image_token) for t in text] text = [build_string_from_input(text_item, self.bos_token, self.image_token) for text_item in text] encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, encoding, modalities=["image"]) n_images_in_ids = [token_ids.count(self.image_token_id) for token_ids in encoding["input_ids"]] data.update(encoding) n_images_in_images = [0] if images is not None: images = self.image_processor.fetch_images(images) images = make_nested_list_of_images(images) n_images_in_images = [len(sample) for sample in images] if text is not None: if any(batch_img == 0 for batch_img in n_images_in_text) and not all( batch_img == 0 for batch_img in n_images_in_text ): raise ValueError( "If a batch of text is provided, there should be either no images or at least one image per sample" ) if sum(n_images_in_text) > 0 and ( n_images_in_images != n_images_in_text or n_images_in_ids != n_images_in_images ): if images is None: raise ValueError("No image were provided, but there are image tokens in the prompt") else: add_message = "" if sum(n_images_in_images) == sum(n_images_in_text) and n_images_in_images != n_images_in_text: add_message = "Make sure to pass your images as a nested list, where each sub-list holds images per batch" elif n_images_in_ids != n_images_in_images: add_message = "If you activated truncation with `max_length`, increase the `max_length` so image tokens aren't cropped." raise ValueError( f"The number of image tokens in each text ({n_images_in_text}) should be the same as the " f"number of provided images per batch ({n_images_in_images}). {add_message}" ) if images is not None: image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) num_tiles = image_features.pop("num_tiles") data.update(image_features) # Create cross attention mask if images is not None and text is not None: cross_attention_token_mask = [ get_cross_attention_token_mask(token_ids, self.image_token_id) for token_ids in encoding["input_ids"] ] cross_attention_mask = convert_sparse_cross_attention_mask_to_dense( cross_attention_token_mask, num_tiles=num_tiles, max_num_tiles=self.image_processor.max_image_tiles, length=max(len(input_ids) for input_ids in encoding["input_ids"]), ) data["cross_attention_mask"] = cross_attention_mask return BatchFeature(data=data, tensor_type=return_tensors) def post_process_image_text_to_text( self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs ): """ Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `list[str]`: The decoded text. """ return self.tokenizer.batch_decode( generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names # Remove `num_tiles`, it is popped and used only when processing. Make a copy of list when removing # otherwise `self.image_processor.model_input_names` is also modified image_processor_input_names = [name for name in image_processor_input_names if name != "num_tiles"] return list(tokenizer_input_names + image_processor_input_names + ["cross_attention_mask"]) __all__ = ["MllamaProcessor"]
MllamaProcessor
python
ansible__ansible
test/integration/targets/template/role_filter/filter_plugins/myplugin.py
{ "start": 60, "end": 201 }
class ____(object): def filters(self): return {'parse_ip': self.parse_ip} def parse_ip(self, ip): return ip
FilterModule
python
run-llama__llama_index
llama-index-core/llama_index/core/voice_agents/websocket.py
{ "start": 164, "end": 1671 }
class ____(ABC): """ Abstract base class for a voice agent websocket. Attributes: uri (str): URL of the websocket. ws (Optional[ClientConnection]): Private attribute, initialized as None, represents the websocket client. """ def __init__( self, uri: str, ): self.uri = uri self.ws: Optional[ClientConnection] = None def connect(self) -> None: """ Connect to the websocket. Args: None Returns: out (None): This function does not return anything. """ async def aconnect(self) -> None: """ Asynchronously connect to the websocket. The implementation should be: ``` self.ws = await websockets.connect(uri=self.uri) ``` Args: None Returns: out (None): This function does not return anything. """ @abstractmethod async def send(self, data: Any) -> None: """ Send data to the websocket. Args: data (Any): Data to send to the websocket. Returns: out (None): This function does not return anything. """ ... @abstractmethod async def close(self) -> None: """ Close the connection with the websocket. Args: None Returns: out (None): This function does not return anything. """ ...
BaseVoiceAgentWebsocket
python
ray-project__ray
python/ray/dag/tests/experimental/test_torch_tensor_transport.py
{ "start": 9055, "end": 11872 }
class ____: """Tests driver to worker tensor transport with default device.""" def create_and_execute_dag(self, actor, device, tensor_input, is_dict=False): """Create a DAG with tensor transport and execute it.""" with InputNode() as inp: method = actor.echo_dict_device if is_dict else actor.echo_device dag = method.bind(inp.with_tensor_transport(device=device)) compiled_dag = dag.experimental_compile() return compiled_dag.execute(tensor_input) def test_src_cpu_tensor_dst_cpu_node(self, ray_start_regular): actor = Actor.remote() ref = run_driver_to_worker_dag(actor, "default", torch.tensor([1])) assert ray.get(ref) == "cpu" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_gpu_tensor_dst_cpu_node(self, ray_start_regular): actor = Actor.remote() ref = run_driver_to_worker_dag( actor, "default", torch.tensor([1], device="cuda") ) with pytest.raises( RayTaskError, match="RuntimeError: No CUDA GPUs are available" ): ray.get(ref) @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_cpu_tensor_dst_gpu_node(self, ray_start_regular): actor = Actor.options(num_gpus=1).remote() ref = run_driver_to_worker_dag(actor, "default", torch.tensor([1])) assert ray.get(ref) == "cpu" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_gpu_tensor_dst_gpu_node(self, ray_start_regular): actor = Actor.options(num_gpus=1).remote() ref = run_driver_to_worker_dag( actor, "default", torch.tensor([1], device="cuda") ) assert ray.get(ref) == "cuda:0" @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_mix_tensors_dst_cpu_node(self, ray_start_regular): actor = Actor.remote() tensor_dict = { "cpu_tensor": torch.tensor([1]), "gpu_tensor": torch.tensor([1], device="cuda"), } ref = run_driver_to_worker_dag(actor, "default", tensor_dict, is_dict=True) with pytest.raises( RayTaskError, match="RuntimeError: No CUDA GPUs are available" ): ray.get(ref) @pytest.mark.skipif(not USE_GPU, reason="Test requires GPU") def test_src_mix_tensors_dst_gpu_node(self, ray_start_regular): actor = Actor.options(num_gpus=1).remote() tensor_dict = { "cpu_tensor": torch.tensor([1]), "gpu_tensor": torch.tensor([1], device="cuda"), } ref = run_driver_to_worker_dag(actor, "default", tensor_dict, is_dict=True) assert ray.get(ref) == {"cpu_tensor": "cpu", "gpu_tensor": "cuda:0"}
TestDriverToWorkerDeviceDefault
python
huggingface__transformers
src/transformers/models/superpoint/image_processing_superpoint.py
{ "start": 3760, "end": 16471 }
class ____(BaseImageProcessor): r""" Constructs a SuperPoint image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`): Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to `True`. Can be overridden by `size` in the `preprocess` method. resample (`Resampling`, *optional*, defaults to `2`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_grayscale (`bool`, *optional*, defaults to `False`): Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method. """ model_input_names = ["pixel_values"] valid_kwargs = SuperPointImageProcessorKwargs def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_grayscale: bool = False, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 480, "width": 640} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_grayscale = do_grayscale def resize( self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ size = get_size_dict(size, default_to_square=False) return resize( image, size=(size["height"], size["width"]), data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: Optional[PILImageResampling] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_grayscale: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> BatchFeature: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`): Whether to convert the image to grayscale. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_grayscale: images = [convert_to_grayscale(image, input_data_format=input_data_format) for image in images] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_keypoint_detection( self, outputs: "SuperPointKeypointDescriptionOutput", target_sizes: Union[TensorType, list[tuple]] ) -> list[dict[str, "torch.Tensor"]]: """ Converts the raw output of [`SuperPointForKeypointDetection`] into lists of keypoints, scores and descriptors with coordinates absolute to the original image sizes. Args: outputs ([`SuperPointKeypointDescriptionOutput`]): Raw outputs of the model containing keypoints in a relative (x, y) format, with scores and descriptors. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. This must be the original image size (before any processing). Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in absolute format according to target_sizes, scores and descriptors for an image in the batch as predicted by the model. """ if len(outputs.mask) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask") if isinstance(target_sizes, list): image_sizes = torch.tensor(target_sizes, device=outputs.mask.device) else: if target_sizes.shape[1] != 2: raise ValueError( "Each element of target_sizes must contain the size (h, w) of each image of the batch" ) image_sizes = target_sizes # Flip the image sizes to (width, height) and convert keypoints to absolute coordinates image_sizes = torch.flip(image_sizes, [1]) masked_keypoints = outputs.keypoints * image_sizes[:, None] # Convert masked_keypoints to int masked_keypoints = masked_keypoints.to(torch.int32) results = [] for image_mask, keypoints, scores, descriptors in zip( outputs.mask, masked_keypoints, outputs.scores, outputs.descriptors ): indices = torch.nonzero(image_mask).squeeze(1) keypoints = keypoints[indices] scores = scores[indices] descriptors = descriptors[indices] results.append({"keypoints": keypoints, "scores": scores, "descriptors": descriptors}) return results __all__ = ["SuperPointImageProcessor"]
SuperPointImageProcessor
python
vyperlang__vyper
vyper/builtins/functions.py
{ "start": 57453, "end": 59116 }
class ____(_CreateBase): # create an EIP1167 "minimal proxy" to the target contract _id = "create_minimal_proxy_to" _inputs = [("target", AddressT())] def _add_gas_estimate(self, args, should_use_create2): a, b, c = eip1167_bytecode() bytecode_len = 20 + len(b) + len(c) return _create_addl_gas_estimate(bytecode_len, should_use_create2) def _build_create_IR(self, expr, args, context, value, salt, revert_on_failure): target_address = args[0] buf = context.new_internal_variable(BytesT(96)) loader_evm, forwarder_pre_evm, forwarder_post_evm = eip1167_bytecode() # Adjust to 32-byte boundaries preamble_length = len(loader_evm) + len(forwarder_pre_evm) forwarder_preamble = bytes_to_int( loader_evm + forwarder_pre_evm + b"\x00" * (32 - preamble_length) ) forwarder_post = bytes_to_int(forwarder_post_evm + b"\x00" * (32 - len(forwarder_post_evm))) # left-align the target if target_address.is_literal: # note: should move to optimizer once we have # codesize optimization pipeline aligned_target = args[0].value << 96 else: aligned_target = shl(96, target_address) buf_len = preamble_length + 20 + len(forwarder_post_evm) return [ "seq", ["mstore", buf, forwarder_preamble], ["mstore", add_ofst(buf, preamble_length), aligned_target], ["mstore", add_ofst(buf, preamble_length + 20), forwarder_post], _create_ir(value, buf, buf_len, salt, revert_on_failure), ]
CreateMinimalProxyTo
python
pytorch__pytorch
torch/_dynamo/variables/functions.py
{ "start": 90075, "end": 94843 }
class ____(VariableTracker): _nonvar_fields = { "fn", "wrapped_fn", "traceable_fn", *VariableTracker._nonvar_fields, } @classmethod @functools.cache def _get_polyfill_handlers(cls) -> dict[Callable[..., Any], types.FunctionType]: return {} @classmethod def create_with_source( cls, value: Any, source: Source ) -> "PolyfilledFunctionVariable": install_guard(source.make_guard(GuardBuilder.CLOSURE_MATCH)) return cls(value, source=source) def __init__(self, fn: _F, **kwargs: Any) -> None: super().__init__(**kwargs) # pyrefly: ignore[invalid-type-var] self.fn: _F = fn handler = self._get_polyfill_handlers().get(fn, fn) traceable_fn = None assert callable(handler), f"Polyfill handler {handler} is not callable for {fn}" for candidate_attr in ( "__torch_dynamo_polyfill__", # registered polyfill "__python_implementation__", # self handler from third-party libraries ): candidate = getattr(handler, candidate_attr, None) if candidate: assert callable(candidate) traceable_fn = candidate break else: raise RuntimeError( f"Polyfill handler {handler} does not have a traceable function" ) # pyrefly: ignore[invalid-type-var] self.wrapped_fn = handler # pyrefly: ignore[invalid-type-var] self.traceable_fn: _F = traceable_fn @property def polyfill_fn(self) -> Callable[..., Any]: return self.traceable_fn def can_constant_fold_through(self) -> bool: return getattr( self.wrapped_fn, "__torch_dynamo_can_constant_fold_through__", False ) def get_function(self) -> Any: return self.as_python_constant() def call_function( self, tx: "InstructionTranslator", args: Sequence[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: if self.can_constant_fold_through() and check_unspec_or_constant_args( args, kwargs ): result = ( self.fn( # use the original function which is faster than the polyfill *[x.as_python_constant() for x in args], **{k: v.as_python_constant() for k, v in kwargs.items()}, ) ) return VariableTracker.build(tx, result) # Special case for sum on tuple/list of ints if ( self.fn is builtins.sum and len(args) == 1 and not kwargs and isinstance(args[0], (variables.ListVariable, variables.TupleVariable)) and all( (isinstance(x, variables.ConstantVariable) and isinstance(x.value, int)) or (isinstance(x, variables.SymNodeVariable) and x.python_type() is int) for x in args[0].items ) ): return variables.SymNodeVariable.create( tx, tx.output.create_proxy( "call_function", torch.sym_sum, (tuple(a.as_proxy() for a in args[0].items),), {}, ), sym_num=torch.sym_sum( [ ( x.value if isinstance(x, variables.ConstantVariable) else x.sym_num # type: ignore[attr-defined] ) for x in args[0].items ] ), ) traceable_function_variable = VariableTracker.build(tx, self.traceable_fn) return traceable_function_variable.call_function(tx, args, kwargs) def call_method( self, tx: "InstructionTranslator", name: str, args: list[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: if name == "__call__": return self.call_function(tx, args, kwargs) method = getattr(self.fn, name, None) if not (method or is_function(method)): raise_type_error_exc(tx, f"Cannot find callable {name} in {self.fn}") options = {} if self.source: options["source"] = AttrSource(self.source, name) # pyrefly: ignore[bad-specialization] polyfilled_method_variable = PolyfilledFunctionVariable(method, **options) return polyfilled_method_variable.call_function(tx, args, kwargs) def as_python_constant(self) -> Any: return self.fn
PolyfilledFunctionVariable
python
falconry__falcon
tests/test_http_method_routing.py
{ "start": 2347, "end": 2697 }
class ____: pass def capture(func): @wraps(func) def with_capture(*args, **kwargs): self = args[0] self.called = True self.req, self.resp = args[1:] func(*args, **kwargs) return with_capture def selfless_decorator(func): def faulty(req, resp, foo, bar): pass return faulty
Stonewall
python
huggingface__transformers
src/transformers/models/xlm/modeling_xlm.py
{ "start": 17511, "end": 22539 }
class ____(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`XLMConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: XLMConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output
XLMSequenceSummary