language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/matchSequence1.py
{ "start": 10209, "end": 14282 }
class ____(Protocol): def __lt__(self, __other: Any) -> bool: ... def __le__(self, __other: Any) -> bool: ... SupportsLessThanT = TypeVar("SupportsLessThanT", bound=SupportsLessThan) def sort(seq: List[SupportsLessThanT]) -> List[SupportsLessThanT]: match seq: case [] | [_]: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y] if x <= y: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y]: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return [y, x] case [x, y, z] if x <= y <= z: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return seq case [x, y, z] if x > y > z: reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return [z, y, x] case [p, *rest]: a = sort([x for x in rest if x <= p]) b = sort([x for x in rest if p < x]) reveal_type(seq, expected_text="List[SupportsLessThanT@sort]") return a + [p] + b return seq def test_exceptions(seq: Union[str, bytes, bytearray]): match seq: case [x, y]: reveal_type(x, expected_text="Never") reveal_type(y, expected_text="Never") return seq def test_object1(seq: object): match seq: case (a1, a2) as a3: reveal_type(a1, expected_text="Unknown") reveal_type(a2, expected_text="Unknown") reveal_type(a3, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case (*b1, b2) as b3: reveal_type(b1, expected_text="list[Unknown]") reveal_type(b2, expected_text="Unknown") reveal_type(b3, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case (c1, *c2) as c3: reveal_type(c1, expected_text="Unknown") reveal_type(c2, expected_text="list[Unknown]") reveal_type(c3, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case (d1, *d2, d3) as d4: reveal_type(d1, expected_text="Unknown") reveal_type(d2, expected_text="list[Unknown]") reveal_type(d3, expected_text="Unknown") reveal_type(d4, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case (3, *e1) as e2: reveal_type(e1, expected_text="list[Unknown]") reveal_type(e2, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case ("hi", *f1) as f2: reveal_type(f1, expected_text="list[Unknown]") reveal_type(f2, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case (*g1, "hi") as g2: reveal_type(g1, expected_text="list[Unknown]") reveal_type(g2, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case [1, "hi", True] as h1: reveal_type(h1, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") case [1, i1] as i2: reveal_type(i1, expected_text="Unknown") reveal_type(i2, expected_text="Sequence[Unknown]") reveal_type(seq, expected_text="Sequence[Unknown]") def test_object2(value_to_match: object): match value_to_match: case [*a1]: reveal_type(a1, expected_text="list[Unknown]") case b1: reveal_type(b1, expected_text="object") def test_sequence(value_to_match: Sequence[Any]): match value_to_match: case [*a1]: reveal_type(a1, expected_text="list[Any]") case b1: reveal_type(b1, expected_text="Never") _T = TypeVar("_T")
SupportsLessThan
python
readthedocs__readthedocs.org
readthedocs/forms.py
{ "start": 228, "end": 940 }
class ____(SignupForm): """Custom signup form that includes a checkbox to subscribe to a newsletter.""" receive_newsletter = forms.BooleanField( required=False, label=("Subscribe to our newsletter to get product updates."), ) field_order = [ "email", "username", "password1", "password2", "receive_newsletter", ] def save(self, request): user = super().save(request) receive_newsletter = self.cleaned_data.get("receive_newsletter") profile, _ = UserProfile.objects.get_or_create(user=user) profile.mailing_list = receive_newsletter profile.save() return user
SignupFormWithNewsletter
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
{ "start": 33325, "end": 33793 }
class ____(BaseModel): type: Literal["RecordFilter"] condition: Optional[str] = Field( "", description="The predicate to filter a record. Records will be removed if evaluated to False.", examples=[ "{{ record['created_at'] >= stream_interval['start_time'] }}", "{{ record.status in ['active', 'expired'] }}", ], ) parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
RecordFilter
python
kamyu104__LeetCode-Solutions
Python/top-k-frequent-elements.py
{ "start": 2195, "end": 2435 }
class ____(object): def topKFrequent(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[int] """ return [key for key, _ in collections.Counter(nums).most_common(k)]
Solution3
python
doocs__leetcode
solution/0400-0499/0466.Count The Repetitions/Solution.py
{ "start": 0, "end": 518 }
class ____: def getMaxRepetitions(self, s1: str, n1: int, s2: str, n2: int) -> int: n = len(s2) d = {} for i in range(n): cnt = 0 j = i for c in s1: if c == s2[j]: j += 1 if j == n: cnt += 1 j = 0 d[i] = (cnt, j) ans = 0 j = 0 for _ in range(n1): cnt, j = d[j] ans += cnt return ans // n2
Solution
python
sqlalchemy__sqlalchemy
test/ext/test_mutable.py
{ "start": 37745, "end": 38444 }
class ____: @classmethod def define_tables(cls, metadata): Table( "foo", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("x", Integer), Column("y", Integer), Column("unrelated_data", String(50)), ) def setup_test(self): from sqlalchemy.ext import mutable mutable._setup_composite_listener() def teardown_test(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() @classmethod def _type_fixture(cls): return Point
_CompositeTestBase
python
getsentry__sentry
src/sentry/integrations/jira_server/client.py
{ "start": 1134, "end": 8903 }
class ____(ApiClient): COMMENTS_URL = "/rest/api/2/issue/%s/comment" COMMENT_URL = "/rest/api/2/issue/%s/comment/%s" STATUS_URL = "/rest/api/2/status" CREATE_URL = "/rest/api/2/issue" ISSUE_URL = "/rest/api/2/issue/%s" ISSUE_FIELDS_URL = "/rest/api/2/issue/createmeta/%s/issuetypes/%s" ISSUE_TYPES_URL = "/rest/api/2/issue/createmeta/%s/issuetypes" PRIORITIES_URL = "/rest/api/2/priority" PROJECT_URL = "/rest/api/2/project" SEARCH_URL = "/rest/api/2/search/" VERSIONS_URL = "/rest/api/2/project/%s/versions" USERS_URL = "/rest/api/2/user/assignable/search" USER_URL = "/rest/api/2/user" SERVER_INFO_URL = "/rest/api/2/serverInfo" ASSIGN_URL = "/rest/api/2/issue/%s/assignee" TRANSITION_URL = "/rest/api/2/issue/%s/transitions" AUTOCOMPLETE_URL = "/rest/api/2/jql/autocompletedata/suggestions" PROPERTIES_URL = "/rest/api/3/issue/%s/properties/%s" integration_name = IntegrationProviderSlug.JIRA_SERVER.value # This timeout is completely arbitrary. Jira doesn't give us any # caching headers to work with. Ideally we want a duration that # lets the user make their second jira issue with cached data. cache_time = 240 def __init__( self, integration: RpcIntegration | Integration, identity: RpcIdentity, logging_context: Any | None = None, ): self.base_url = integration.metadata["base_url"] self.identity = identity super().__init__( integration_id=integration.id, verify_ssl=integration.metadata["verify_ssl"], logging_context=logging_context, ) def get_cache_prefix(self) -> str: return "sentry-jira-server:" def finalize_request(self, prepared_request: PreparedRequest) -> PreparedRequest: return self.authorize_request(prepared_request=prepared_request) def authorize_request(self, prepared_request: PreparedRequest): """Jira Server authorizes with RSA-signed OAuth1 scheme""" if not self.identity: return prepared_request auth_scheme = OAuth1( client_key=self.identity.data["consumer_key"], rsa_key=self.identity.data["private_key"], resource_owner_key=self.identity.data["access_token"], resource_owner_secret=self.identity.data["access_token_secret"], signature_method=SIGNATURE_RSA, signature_type="auth_header", decoding=None, ) prepared_request.prepare_auth(auth=auth_scheme) return prepared_request def user_id_get_param(self) -> str: return "username" def user_id_field(self) -> str: return "name" def user_query_param(self) -> str: return "username" def get_issue(self, issue_id): return self.get(self.ISSUE_URL % (issue_id,)) def search_issues(self, query): q = query.replace('"', '\\"') # check if it looks like an issue id if ISSUE_KEY_RE.match(query): jql = f'id="{q}"' else: jql = f'text ~ "{q}"' return self.get(self.SEARCH_URL, params={"jql": jql}) def create_comment(self, issue_key, comment): return self.post(self.COMMENTS_URL % issue_key, data={"body": comment}) def update_comment(self, issue_key, comment_id, comment): return self.put(self.COMMENT_URL % (issue_key, comment_id), data={"body": comment}) def get_projects_list(self, cached: bool = True): if not cached: return self.get(self.PROJECT_URL) return self.get_cached(self.PROJECT_URL) def get_issue_types(self, project_id): # Get a list of issue types for the given project return self.get_cached(self.ISSUE_TYPES_URL % (project_id)) def get_issue_fields(self, project_id, issue_type_id): # Get a list of fields for the issue type and project return self.get_cached(self.ISSUE_FIELDS_URL % (project_id, issue_type_id)) def get_project_key_for_id(self, project_id) -> str: if not project_id: return "" projects = self.get_projects_list() for project in projects: if project["id"] == project_id: return project["key"] return "" def get_versions(self, project): return self.get_cached(self.VERSIONS_URL % project) def get_priorities(self): """ XXX(schew2381): There is an existing bug where we fetch and show all project priorities instead of scoping them to the selected project. This is fine when manually creating a Jira Server issue b/c we surface that the selected priority is not available. However for the alert rule action, you can save the action with an invalid priority for the chosen project. We surface this issue externally in our docs: https://docs.sentry.io/product/integrations/issue-tracking/jira/#issue-alert-not-creating-jira-issues We are limited by the Jira Server API b/c fetching priorities requires global/project admin permissions. There is currently no workaround for this! Please DO NOT attempt to use the following APIs: https://docs.atlassian.com/software/jira/docs/api/REST/9.11.0/#api/2/priorityschemes-getPrioritySchemes https://docs.atlassian.com/software/jira/docs/api/REST/9.11.0/#api/2/project/{projectKeyOrId}/priorityscheme-getAssignedPriorityScheme """ return self.get_cached(self.PRIORITIES_URL) def search_users_for_project(self, project, username): # Jira Server wants a project key, while cloud is indifferent. project_key = self.get_project_key_for_id(project) return self.get_cached( self.USERS_URL, params={"project": project_key, self.user_query_param(): username} ) def search_users_for_issue(self, issue_key, email): return self.get_cached( self.USERS_URL, params={"issueKey": issue_key, self.user_query_param(): email} ) def get_user(self, user_id): user_id_get_param = self.user_id_get_param() return self.get_cached(self.USER_URL, params={user_id_get_param: user_id}) def create_issue(self, raw_form_data): data = {"fields": raw_form_data} return self.post(self.CREATE_URL, data=data) def get_server_info(self): return self.get(self.SERVER_INFO_URL) def get_valid_statuses(self): return self.get_cached(self.STATUS_URL) def get_transitions(self, issue_key): return self.get_cached(self.TRANSITION_URL % issue_key)["transitions"] def transition_issue(self, issue_key, transition_id): return self.post( self.TRANSITION_URL % issue_key, data={"transition": {"id": transition_id}} ) def assign_issue(self, key, name_or_account_id): user_id_field = self.user_id_field() return self.put(self.ASSIGN_URL % key, data={user_id_field: name_or_account_id}) def set_issue_property(self, issue_key, badge_num): module_key = "sentry-issues-glance" properties_key = f"com.atlassian.jira.issue:{JIRA_KEY}:{module_key}:status" data = {"type": "badge", "value": {"label": badge_num}} return self.put(self.PROPERTIES_URL % (issue_key, properties_key), data=data) def get_field_autocomplete(self, name, value): if name.startswith(CUSTOMFIELD_PREFIX): # Transform `customfield_0123` into `cf[0123]` cf_id = name[len(CUSTOMFIELD_PREFIX) :] jql_name = f"cf[{cf_id}]" else: jql_name = name return self.get_cached( self.AUTOCOMPLETE_URL, params={"fieldName": jql_name, "fieldValue": value} )
JiraServerClient
python
wandb__wandb
wandb/sdk/artifacts/_generated/artifact_version_files.py
{ "start": 522, "end": 654 }
class ____(GQLResult): artifact: Optional[ArtifactVersionFilesProjectArtifactTypeArtifact]
ArtifactVersionFilesProjectArtifactType
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-mongodb/llama_index/readers/mongodb/base.py
{ "start": 248, "end": 7400 }
class ____(BaseReader): """ Simple mongo reader. Concatenates each Mongo doc into Document used by LlamaIndex. Args: host (str): Mongo host. port (int): Mongo port. """ def __init__( self, host: Optional[str] = None, port: Optional[int] = None, uri: Optional[str] = None, ) -> None: """Initialize with parameters.""" try: from pymongo import MongoClient, AsyncMongoClient from pymongo.driver_info import DriverInfo except ImportError as err: raise ImportError( "`pymongo` package not found, please run `pip install pymongo`" ) from err if uri: client_args = (uri,) elif host and port: client_args = (host, port) else: raise ValueError("Either `host` and `port` or `uri` must be provided.") self.client = MongoClient(*client_args) self.async_client = AsyncMongoClient(*client_args) # append_metadata was added in PyMongo 4.14.0, but is a valid database name on earlier versions if callable(self.client.append_metadata): self.client.append_metadata( DriverInfo(name="llama-index", version=version("llama-index")) ) if callable(self.async_client.append_metadata): self.async_client.append_metadata( DriverInfo(name="llama-index", version=version("llama-index")) ) def lazy_load_data( self, db_name: str, collection_name: str, field_names: List[str] = ["text"], separator: str = "", query_dict: Optional[Dict] = None, max_docs: int = 0, metadata_names: Optional[List[str]] = None, field_extractors: Optional[Dict[str, Callable[..., str]]] = None, ) -> Iterable[Document]: """ Lazy load data from MongoDB. Args: db_name (str): name of the database. collection_name (str): name of the collection. field_names(List[str]): names of the fields to be concatenated. Defaults to ["text"] separator (str): separator to be used between fields. Defaults to "" query_dict (Optional[Dict]): query to filter documents. Read more at [docs](https://docs.mongodb.com/manual/reference/method/db.collection.find/) Defaults to empty dict max_docs (int): maximum number of documents to load. Defaults to 0 (no limit) metadata_names (Optional[List[str]]): names of the fields to be added to the metadata attribute of the Document. Defaults to None field_extractors (Optional[Dict[str, Callable[..., str]]]): a dictionary of functions to use when extracting a field from a document. Defaults to None Yields: Document: a document object with the concatenated text and metadata. Raises: ValueError: if a field is not found in a document. """ db = self.client[db_name] cursor = db[collection_name].find( filter=query_dict or {}, limit=max_docs, projection=dict.fromkeys(field_names + (metadata_names or []), 1), ) field_extractors = field_extractors or {} for item in cursor: try: texts = [ field_extractors.get(name, str)(item[name]) for name in field_names ] except KeyError as err: raise ValueError( f"{err.args[0]} field not found in Mongo document." ) from err text = separator.join(texts) if metadata_names is None: yield Document(text=text, id_=str(item["_id"])) else: try: metadata = {name: item.get(name) for name in metadata_names} metadata["collection"] = collection_name except KeyError as err: raise ValueError( f"{err.args[0]} field not found in Mongo document." ) from err yield Document(text=text, id_=str(item["_id"]), metadata=metadata) async def alazy_load_data( self, db_name: str, collection_name: str, field_names: List[str] = ["text"], separator: str = "", query_dict: Optional[Dict] = None, max_docs: int = 0, metadata_names: Optional[List[str]] = None, field_extractors: Optional[Dict[str, Callable[..., str]]] = None, ): """ Asynchronously lazy load data from a MongoDB collection. Args: db_name (str): The name of the database to connect to. collection_name (str): The name of the collection to query. field_names (List[str]): The fields to concatenate into the document's text. Defaults to ["text"]. separator (str): The separator to use between concatenated fields. Defaults to "". query_dict (Optional[Dict]): A dictionary to filter documents. Defaults to None. max_docs (int): The maximum number of documents to load. Defaults to 0 (no limit). metadata_names (Optional[List[str]]): The fields to include in the document's metadata. Defaults to None. field_extractors (Optional[Dict[str, Callable[..., str]]]): A dictionary of field-specific extractor functions. Defaults to None. Yields: Document: An asynchronous generator of Document objects with concatenated text and optional metadata. Raises: ValueError: If the async_client is not initialized or if a specified field is not found in a document. """ db = self.async_client[db_name] cursor = db[collection_name].find( filter=query_dict or {}, limit=max_docs, projection=dict.fromkeys(field_names + (metadata_names or []), 1), ) field_extractors = field_extractors or {} async for item in cursor: try: texts = [ field_extractors.get(name, str)(item[name]) for name in field_names ] except KeyError as err: raise ValueError( f"{err.args[0]} field not found in Mongo document." ) from err text = separator.join(texts) if metadata_names is None: yield Document(text=text, id_=str(item["_id"])) else: try: metadata = {name: item.get(name) for name in metadata_names} metadata["collection"] = collection_name except KeyError as err: raise ValueError( f"{err.args[0]} field not found in Mongo document." ) from err yield Document(text=text, id_=str(item["_id"]), metadata=metadata)
SimpleMongoReader
python
doocs__leetcode
solution/1900-1999/1954.Minimum Garden Perimeter to Collect Enough Apples/Solution.py
{ "start": 0, "end": 188 }
class ____: def minimumPerimeter(self, neededApples: int) -> int: x = 1 while 2 * x * (x + 1) * (2 * x + 1) < neededApples: x += 1 return x * 8
Solution
python
spack__spack
lib/spack/spack/cmd/commands.py
{ "start": 2979, "end": 4216 }
class ____(ArgparseRstWriter): """RST writer tailored for spack documentation.""" def __init__( self, prog: str, out: IO = sys.stdout, aliases: bool = False, documented_commands: Set[str] = set(), rst_levels: Sequence[str] = ["-", "-", "^", "~", ":", "`"], ): """Initialize a new SpackArgparseRstWriter instance. Args: prog: Program name. out: File object to write to. aliases: Whether or not to include subparsers for aliases. documented_commands: Set of commands with additional documentation. rst_levels: List of characters for rst section headings. """ super().__init__(prog, out, aliases, rst_levels) self.documented = documented_commands def usage(self, usage: str) -> str: """Example usage of a command. Args: usage: Command usage. Returns: Usage of a command. """ string = super().usage(usage) cmd = self.parser.prog.replace(" ", "-") if cmd in self.documented: string = f"{string}\n:ref:`More documentation <cmd-{cmd}>`\n" return string
SpackArgparseRstWriter
python
realpython__materials
python-magic-methods/bitwise_number.py
{ "start": 0, "end": 615 }
class ____: def __init__(self, value): self.value = value def __and__(self, other): return type(self)(self.value & other.value) def __or__(self, other): return type(self)(self.value | other.value) def __xor__(self, other): return type(self)(self.value ^ other.value) def __invert__(self): return type(self)(~self.value) def __lshift__(self, places): return type(self)(self.value << places) def __rshift__(self, places): return type(self)(self.value >> places) def __repr__(self): return bin(self.value)
BitwiseNumber
python
cherrypy__cherrypy
cherrypy/test/benchmark.py
{ "start": 3864, "end": 3918 }
class ____: """A null HTTP response."""
NullResponse
python
walkccc__LeetCode
solutions/1005. Maximize Sum Of Array After K Negations/1005.py
{ "start": 0, "end": 263 }
class ____: def largestSumAfterKNegations(self, nums: list[int], k: int) -> int: nums.sort() for i, num in enumerate(nums): if num > 0 or k == 0: break nums[i] = -num k -= 1 return sum(nums) - (k % 2) * min(nums) * 2
Solution
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 301714, "end": 307719 }
class ____: def test_logpdf(self): # gh-6217 y = stats.weibull_min.logpdf(0, 1) assert_equal(y, 0) def test_with_maxima_distrib(self): # Tests for weibull_min and weibull_max. # The expected values were computed using the symbolic algebra # program 'maxima' with the package 'distrib', which has # 'pdf_weibull' and 'cdf_weibull'. The mapping between the # scipy and maxima functions is as follows: # ----------------------------------------------------------------- # scipy maxima # --------------------------------- ------------------------------ # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b) # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b)) # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b) # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b)) # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b) # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b)) # # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b) # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b)) # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b) # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b)) # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b) # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b)) # ----------------------------------------------------------------- x = 1.5 a = 2.0 b = 3.0 # weibull_min p = stats.weibull_min.pdf(x, a, scale=b) assert_allclose(p, np.exp(-0.25)/3) lp = stats.weibull_min.logpdf(x, a, scale=b) assert_allclose(lp, -0.25 - np.log(3)) c = stats.weibull_min.cdf(x, a, scale=b) assert_allclose(c, -special.expm1(-0.25)) lc = stats.weibull_min.logcdf(x, a, scale=b) assert_allclose(lc, np.log(-special.expm1(-0.25))) s = stats.weibull_min.sf(x, a, scale=b) assert_allclose(s, np.exp(-0.25)) ls = stats.weibull_min.logsf(x, a, scale=b) assert_allclose(ls, -0.25) # Also test using a large value x, for which computing the survival # function using the CDF would result in 0. s = stats.weibull_min.sf(30, 2, scale=3) assert_allclose(s, np.exp(-100)) ls = stats.weibull_min.logsf(30, 2, scale=3) assert_allclose(ls, -100) # weibull_max x = -1.5 p = stats.weibull_max.pdf(x, a, scale=b) assert_allclose(p, np.exp(-0.25)/3) lp = stats.weibull_max.logpdf(x, a, scale=b) assert_allclose(lp, -0.25 - np.log(3)) c = stats.weibull_max.cdf(x, a, scale=b) assert_allclose(c, np.exp(-0.25)) lc = stats.weibull_max.logcdf(x, a, scale=b) assert_allclose(lc, -0.25) s = stats.weibull_max.sf(x, a, scale=b) assert_allclose(s, -special.expm1(-0.25)) ls = stats.weibull_max.logsf(x, a, scale=b) assert_allclose(ls, np.log(-special.expm1(-0.25))) # Also test using a value of x close to 0, for which computing the # survival function using the CDF would result in 0. s = stats.weibull_max.sf(-1e-9, 2, scale=3) assert_allclose(s, -special.expm1(-1/9000000000000000000)) ls = stats.weibull_max.logsf(-1e-9, 2, scale=3) assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000))) @pytest.mark.parametrize('scale', [1.0, 0.1]) def test_delta_cdf(self, scale): # Expected value computed with mpmath: # # def weibull_min_sf(x, k, scale): # x = mpmath.mpf(x) # k = mpmath.mpf(k) # scale =mpmath.mpf(scale) # return mpmath.exp(-(x/scale)**k) # # >>> import mpmath # >>> mpmath.mp.dps = 60 # >>> sf1 = weibull_min_sf(7.5, 3, 1) # >>> sf2 = weibull_min_sf(8.0, 3, 1) # >>> float(sf1 - sf2) # 6.053624060118734e-184 # delta = stats.weibull_min._delta_cdf(scale*7.5, scale*8, 3, scale=scale) assert_allclose(delta, 6.053624060118734e-184) def test_fit_min(self): rng = np.random.default_rng(5985959307161735394) c, loc, scale = 2, 3.5, 0.5 # arbitrary, valid parameters dist = stats.weibull_min(c, loc, scale) rvs = dist.rvs(size=100, random_state=rng) # test that MLE still honors guesses and fixed parameters c2, loc2, scale2 = stats.weibull_min.fit(rvs, 1.5, floc=3) c3, loc3, scale3 = stats.weibull_min.fit(rvs, 1.6, floc=3) assert loc2 == loc3 == 3 # fixed parameter is respected assert c2 != c3 # different guess -> (slightly) different outcome # quality of fit is tested elsewhere # test that MoM honors fixed parameters, accepts (but ignores) guesses c4, loc4, scale4 = stats.weibull_min.fit(rvs, 3, fscale=3, method='mm') assert scale4 == 3 # because scale was fixed, only the mean and skewness will be matched dist4 = stats.weibull_min(c4, loc4, scale4) res = dist4.stats(moments='ms') ref = np.mean(rvs), stats.skew(rvs) assert_allclose(res, ref) # reference values were computed via mpmath # from mpmath import mp # def weibull_sf_mpmath(x, c): # x = mp.mpf(x) # c = mp.mpf(c) # return float(mp.exp(-x**c)) @pytest.mark.parametrize('x, c, ref', [(50, 1, 1.9287498479639178e-22), (1000, 0.8, 8.131269637872743e-110)]) def test_sf_isf(self, x, c, ref): assert_allclose(stats.weibull_min.sf(x, c), ref, rtol=5e-14) assert_allclose(stats.weibull_min.isf(ref, c), x, rtol=5e-14)
TestWeibull
python
vyperlang__vyper
vyper/builtins/functions.py
{ "start": 2459, "end": 2604 }
class ____(BuiltinFunctionT): # Base class for nodes which should always be folded _modifiability = Modifiability.CONSTANT
FoldedFunctionT
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/plan/step.py
{ "start": 12167, "end": 16486 }
class ____( # pyright: ignore[reportIncompatibleVariableOverride] NamedTuple( "_UnresolvedCollectExecutionStep", [ ("handle", StepHandle), ("job_name", str), ("step_input_dict", Mapping[str, Union[StepInput, UnresolvedCollectStepInput]]), ("step_output_dict", Mapping[str, StepOutput]), ("tags", Mapping[str, str]), ("pool", Optional[str]), ], ), IExecutionStep, ): """A placeholder step that will become 1 ExecutionStep that collects over a dynamic output or downstream from one once it resolves.""" def __new__( cls, handle: StepHandle, job_name: str, step_inputs: Sequence[Union[StepInput, UnresolvedCollectStepInput]], step_outputs: Sequence[StepOutput], tags: Optional[Mapping[str, str]], pool: Optional[str], ): return super().__new__( cls, handle=check.inst_param(handle, "handle", StepHandle), job_name=check.str_param(job_name, "job_name"), step_input_dict={ si.name: si for si in check.sequence_param( step_inputs, "step_inputs", of_type=(StepInput, UnresolvedCollectStepInput) ) }, step_output_dict={ so.name: so for so in check.sequence_param(step_outputs, "step_outputs", of_type=StepOutput) }, tags=check.opt_mapping_param(tags, "tags", key_type=str), pool=check.opt_str_param(pool, "pool"), ) @property def node_handle(self) -> "NodeHandle": return self.handle.node_handle @property def key(self) -> str: return self.handle.to_key() @property def kind(self) -> StepKind: return StepKind.UNRESOLVED_COLLECT @property def step_inputs(self) -> Sequence[Union[StepInput, UnresolvedCollectStepInput]]: return list(self.step_input_dict.values()) @property def step_outputs(self) -> Sequence[StepOutput]: return list(self.step_output_dict.values()) def step_input_named(self, name: str) -> Union[StepInput, UnresolvedCollectStepInput]: check.str_param(name, "name") return self.step_input_dict[name] def step_output_named(self, name: str) -> StepOutput: check.str_param(name, "name") return self.step_output_dict[name] def get_all_dependency_keys(self) -> set[str]: deps = set() for inp in self.step_inputs: if isinstance(inp, StepInput): deps.update( [handle.step_key for handle in inp.get_step_output_handle_dependencies()] ) elif isinstance(inp, UnresolvedCollectStepInput): deps.update( [ handle.step_key for handle in inp.get_step_output_handle_deps_with_placeholders() ] ) else: check.failed(f"Unexpected step input type {inp}") return deps @property def resolved_by_step_keys(self) -> frozenset[str]: keys = set() for inp in self.step_inputs: if isinstance(inp, UnresolvedCollectStepInput): keys.add(inp.resolved_by_step_key) return frozenset(keys) def resolve( self, mappings: Mapping[str, Mapping[str, Optional[Sequence[str]]]] ) -> ExecutionStep: check.invariant( all(key in mappings for key in self.resolved_by_step_keys), "resolving with mappings that do not contain all required step keys", ) resolved_inputs = [] for inp in self.step_inputs: if isinstance(inp, StepInput): resolved_inputs.append(inp) else: resolved_inputs.append( inp.resolve(mappings[inp.resolved_by_step_key][inp.resolved_by_output_name]) ) return ExecutionStep( handle=self.handle, job_name=self.job_name, step_inputs=resolved_inputs, step_outputs=self.step_outputs, tags=self.tags, pool=self.pool, )
UnresolvedCollectExecutionStep
python
openai__openai-python
src/openai/types/beta/threads/runs/file_search_tool_call_delta.py
{ "start": 230, "end": 655 }
class ____(BaseModel): file_search: object """For now, this is always going to be an empty object.""" index: int """The index of the tool call in the tool calls array.""" type: Literal["file_search"] """The type of tool call. This is always going to be `file_search` for this type of tool call. """ id: Optional[str] = None """The ID of the tool call object."""
FileSearchToolCallDelta
python
apache__airflow
airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_dag_runs.py
{ "start": 7371, "end": 10886 }
class ____: def setup_method(self): clear_db_runs() def teardown_method(self): clear_db_runs() def test_get_count_basic(self, client, session, dag_maker): with dag_maker("test_dag"): pass dag_maker.create_dagrun() session.commit() response = client.get("/execution/dag-runs/count", params={"dag_id": "test_dag"}) assert response.status_code == 200 assert response.json() == 1 def test_get_count_with_states(self, client, session, dag_maker): """Test counting DAG runs in specific states.""" with dag_maker("test_get_count_with_states"): pass # Create DAG runs with different states dag_maker.create_dagrun( state=State.SUCCESS, logical_date=timezone.datetime(2025, 1, 1), run_id="test_run_id1" ) dag_maker.create_dagrun( state=State.FAILED, logical_date=timezone.datetime(2025, 1, 2), run_id="test_run_id2" ) dag_maker.create_dagrun( state=State.RUNNING, logical_date=timezone.datetime(2025, 1, 3), run_id="test_run_id3" ) session.commit() response = client.get( "/execution/dag-runs/count", params={"dag_id": "test_get_count_with_states", "states": [State.SUCCESS, State.FAILED]}, ) assert response.status_code == 200 assert response.json() == 2 def test_get_count_with_logical_dates(self, client, session, dag_maker): with dag_maker("test_get_count_with_logical_dates"): pass date1 = timezone.datetime(2025, 1, 1) date2 = timezone.datetime(2025, 1, 2) dag_maker.create_dagrun(run_id="test_run_id1", logical_date=date1) dag_maker.create_dagrun(run_id="test_run_id2", logical_date=date2) session.commit() response = client.get( "/execution/dag-runs/count", params={ "dag_id": "test_get_count_with_logical_dates", "logical_dates": [date1.isoformat(), date2.isoformat()], }, ) assert response.status_code == 200 assert response.json() == 2 def test_get_count_with_run_ids(self, client, session, dag_maker): with dag_maker("test_get_count_with_run_ids"): pass dag_maker.create_dagrun(run_id="run1", logical_date=timezone.datetime(2025, 1, 1)) dag_maker.create_dagrun(run_id="run2", logical_date=timezone.datetime(2025, 1, 2)) session.commit() response = client.get( "/execution/dag-runs/count", params={"dag_id": "test_get_count_with_run_ids", "run_ids": ["run1", "run2"]}, ) assert response.status_code == 200 assert response.json() == 2 def test_get_count_with_mixed_states(self, client, session, dag_maker): with dag_maker("test_get_count_with_mixed"): pass dag_maker.create_dagrun( state=State.SUCCESS, run_id="runid1", logical_date=timezone.datetime(2025, 1, 1) ) dag_maker.create_dagrun( state=State.QUEUED, run_id="runid2", logical_date=timezone.datetime(2025, 1, 2) ) session.commit() response = client.get( "/execution/dag-runs/count", params={"dag_id": "test_get_count_with_mixed", "states": [State.SUCCESS, State.QUEUED]}, ) assert response.status_code == 200 assert response.json() == 2
TestGetDagRunCount
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 187160, "end": 187239 }
class ____(_Int8RangeTests, _RangeTypeRoundTrip): pass
Int8RangeRoundTripTest
python
tensorflow__tensorflow
tensorflow/python/framework/type_spec_test.py
{ "start": 3119, "end": 3237 }
class ____(TwoTensorsSpec): pass @type_spec_registry.register("tf.TwoTensorsSpecVariableSerialize")
TwoTensorsSpecTwin
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_looker.py
{ "start": 1444, "end": 2000 }
class ____: @classmethod def setUpClass(cls): cls.dagbag = DagBag(dag_folder="/dev/null", include_examples=False) cls.dag = DAG(TEST_DAG_ID, default_args={"owner": "airflow", "start_date": DEFAULT_DATE}) def setup_method(self): self.mock_ti = MagicMock() self.mock_context = {"ti": self.mock_ti} def tearDown(self): self.mock_ti = MagicMock() self.mock_context = {"ti": self.mock_ti} @classmethod def tearDownClass(cls): clear_db_runs() clear_db_xcom()
LookerTestBase
python
dagster-io__dagster
python_modules/dagster/dagster_tests/execution_tests/pipes_tests/in_process_client.py
{ "start": 2152, "end": 2767 }
class ____(dg.PipesMessageReader): def __init__( self, message_writer: InProcessPipesMessageWriter, pipes_context: PipesContext, ) -> None: self.message_writer = message_writer self.pipes_context = pipes_context @contextmanager def read_messages(self, handler: PipesMessageHandler) -> Iterator[PipesParams]: yield {} for pipes_message in self.message_writer.write_channel.messages: handler.handle_message(pipes_message) def no_messages_debug_text(self) -> str: return "In-process message reader."
InProcessMessageReader
python
ray-project__ray
python/ray/llm/_internal/serve/core/configs/openai_api_models.py
{ "start": 2673, "end": 3144 }
class ____(vLLMEmbeddingCompletionRequest): model_config = ConfigDict(arbitrary_types_allowed=True) request_id: str = Field( default_factory=lambda: f"{random_uuid()}", description=( "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response." ), )
EmbeddingCompletionRequest
python
scipy__scipy
scipy/cluster/hierarchy.py
{ "start": 4544, "end": 36707 }
class ____(UserWarning): pass def _warning(s): warnings.warn(f'scipy.cluster: {s}', ClusterWarning, stacklevel=3) def int_floor(arr, xp): # array_api_strict is strict about not allowing `int()` on a float array. # That's typically not needed, here it is - so explicitly convert return int(xp.asarray(arr, dtype=xp.int64)) lazy_cython = xp_capabilities( cpu_only=True, reason="Cython code", warnings=[("dask.array", "merges chunks")]) @lazy_cython def single(y): """ Perform single/min/nearest linkage on the condensed distance matrix ``y``. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray The linkage matrix. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import single, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = single(y) >>> Z array([[ 0., 1., 1., 2.], [ 2., 12., 1., 3.], [ 3., 4., 1., 2.], [ 5., 14., 1., 3.], [ 6., 7., 1., 2.], [ 8., 16., 1., 3.], [ 9., 10., 1., 2.], [11., 18., 1., 3.], [13., 15., 2., 6.], [17., 20., 2., 9.], [19., 21., 2., 12.]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) >>> fcluster(Z, 1, criterion='distance') array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) >>> fcluster(Z, 2, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='single', metric='euclidean') @lazy_cython def complete(y): """ Perform complete/max/farthest point linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See the `linkage` function documentation for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import complete, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = complete(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.41421356, 3. ], [ 5. , 13. , 1.41421356, 3. ], [ 8. , 14. , 1.41421356, 3. ], [11. , 15. , 1.41421356, 3. ], [16. , 17. , 4.12310563, 6. ], [18. , 19. , 4.12310563, 6. ], [20. , 21. , 5.65685425, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) >>> fcluster(Z, 1.5, criterion='distance') array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) >>> fcluster(Z, 4.5, criterion='distance') array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) >>> fcluster(Z, 6, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='complete', metric='euclidean') @lazy_cython def average(y): """ Perform average/UPGMA linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import average, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = average(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.20710678, 3. ], [ 5. , 13. , 1.20710678, 3. ], [ 8. , 14. , 1.20710678, 3. ], [11. , 15. , 1.20710678, 3. ], [16. , 17. , 3.39675184, 6. ], [18. , 19. , 3.39675184, 6. ], [20. , 21. , 4.09206523, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) >>> fcluster(Z, 1.5, criterion='distance') array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) >>> fcluster(Z, 4, criterion='distance') array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) >>> fcluster(Z, 6, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='average', metric='euclidean') @lazy_cython def weighted(y): """ Perform weighted/WPGMA linkage on the condensed distance matrix. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import weighted, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = weighted(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 9. , 11. , 1. , 2. ], [ 2. , 12. , 1.20710678, 3. ], [ 8. , 13. , 1.20710678, 3. ], [ 5. , 14. , 1.20710678, 3. ], [10. , 15. , 1.20710678, 3. ], [18. , 19. , 3.05595762, 6. ], [16. , 17. , 3.32379407, 6. ], [20. , 21. , 4.06357713, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) >>> fcluster(Z, 1.5, criterion='distance') array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) >>> fcluster(Z, 4, criterion='distance') array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) >>> fcluster(Z, 6, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='weighted', metric='euclidean') @lazy_cython def centroid(y): """ Perform centroid/UPGMC linkage. See `linkage` for more information on the input matrix, return structure, and algorithm. The following are common calling conventions: 1. ``Z = centroid(y)`` Performs centroid/UPGMC linkage on the condensed distance matrix ``y``. 2. ``Z = centroid(X)`` Performs centroid/UPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as an m by n array. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See the `linkage` function documentation for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import centroid, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = centroid(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 2. , 12. , 1.11803399, 3. ], [ 5. , 13. , 1.11803399, 3. ], [ 8. , 15. , 1.11803399, 3. ], [11. , 14. , 1.11803399, 3. ], [18. , 19. , 3.33333333, 6. ], [16. , 17. , 3.33333333, 6. ], [20. , 21. , 3.33333333, 12. ]]) # may vary The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) # may vary >>> fcluster(Z, 1.1, criterion='distance') array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) # may vary >>> fcluster(Z, 2, criterion='distance') array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) # may vary >>> fcluster(Z, 4, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='centroid', metric='euclidean') @lazy_cython def median(y): """ Perform median/WPGMC linkage. See `linkage` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = median(y)`` Performs median/WPGMC linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. 2. ``Z = median(X)`` Performs median/WPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as an m by n array. Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import median, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = median(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 2. , 12. , 1.11803399, 3. ], [ 5. , 13. , 1.11803399, 3. ], [ 8. , 15. , 1.11803399, 3. ], [11. , 14. , 1.11803399, 3. ], [18. , 19. , 3. , 6. ], [16. , 17. , 3.5 , 6. ], [20. , 21. , 3.25 , 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) >>> fcluster(Z, 1.1, criterion='distance') array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) >>> fcluster(Z, 2, criterion='distance') array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) >>> fcluster(Z, 4, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='median', metric='euclidean') @lazy_cython def ward(y): """ Perform Ward's linkage on a condensed distance matrix. See `linkage` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = ward(y)`` Performs Ward's linkage on the condensed distance matrix ``y``. 2. ``Z = ward(X)`` Performs Ward's linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as an m by n array. Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. See `linkage` for more information on the return structure and algorithm. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import ward, fcluster >>> from scipy.spatial.distance import pdist First, we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then, we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = ward(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 9. , 10. , 1. , 2. ], [ 2. , 12. , 1.29099445, 3. ], [ 5. , 13. , 1.29099445, 3. ], [ 8. , 14. , 1.29099445, 3. ], [11. , 15. , 1.29099445, 3. ], [16. , 17. , 5.77350269, 6. ], [18. , 19. , 5.77350269, 6. ], [20. , 21. , 8.16496581, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) >>> fcluster(Z, 1.1, criterion='distance') array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) >>> fcluster(Z, 3, criterion='distance') array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) >>> fcluster(Z, 9, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='ward', metric='euclidean') @lazy_cython def linkage(y, method='single', metric='euclidean', optimal_ordering=False): """ Perform hierarchical/agglomerative clustering. The input y may be either a 1-D condensed distance matrix or a 2-D array of observation vectors. If y is a 1-D condensed distance matrix, then y must be a :math:`\\binom{n}{2}` sized vector, where n is the number of original observations paired in the distance matrix. The behavior of this function is very similar to the MATLAB linkage function. A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A cluster with an index less than :math:`n` corresponds to one of the :math:`n` original observations. The distance between clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The fourth value ``Z[i, 3]`` represents the number of original observations in the newly formed cluster. The following linkage methods are used to compute the distance :math:`d(s, t)` between two clusters :math:`s` and :math:`t`. The algorithm begins with a forest of clusters that have yet to be used in the hierarchy being formed. When two clusters :math:`s` and :math:`t` from this forest are combined into a single cluster :math:`u`, :math:`s` and :math:`t` are removed from the forest, and :math:`u` is added to the forest. When only one cluster remains in the forest, the algorithm stops, and this cluster becomes the root. A distance matrix is maintained at each iteration. The ``d[i,j]`` entry corresponds to the distance between cluster :math:`i` and :math:`j` in the original forest. At each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster u with the remaining clusters in the forest. Suppose there are :math:`|u|` original observations :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in cluster :math:`v`. Recall, :math:`s` and :math:`t` are combined to form cluster :math:`u`. Let :math:`v` be any remaining cluster in the forest that is not :math:`u`. The following are methods for calculating the distance between the newly formed cluster :math:`u` and each :math:`v`. * method='single' assigns .. math:: d(u,v) = \\min(dist(u[i],v[j])) for all points :math:`i` in cluster :math:`u` and :math:`j` in cluster :math:`v`. This is also known as the Nearest Point Algorithm. * method='complete' assigns .. math:: d(u, v) = \\max(dist(u[i],v[j])) for all points :math:`i` in cluster u and :math:`j` in cluster :math:`v`. This is also known by the Farthest Point Algorithm or Voor Hees Algorithm. * method='average' assigns .. math:: d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} {(|u|*|v|)} for all points :math:`i` and :math:`j` where :math:`|u|` and :math:`|v|` are the cardinalities of clusters :math:`u` and :math:`v`, respectively. This is also called the UPGMA algorithm. * method='weighted' assigns .. math:: d(u,v) = (dist(s,v) + dist(t,v))/2 where cluster u was formed with cluster s and t and v is a remaining cluster in the forest (also called WPGMA). * method='centroid' assigns .. math:: dist(s,t) = ||c_s-c_t||_2 where :math:`c_s` and :math:`c_t` are the centroids of clusters :math:`s` and :math:`t`, respectively. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the new centroid is computed over all the original objects in clusters :math:`s` and :math:`t`. The distance then becomes the Euclidean distance between the centroid of :math:`u` and the centroid of a remaining cluster :math:`v` in the forest. This is also known as the UPGMC algorithm. * method='median' assigns :math:`d(s,t)` like the ``centroid`` method. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the average of centroids s and t give the new centroid :math:`u`. This is also known as the WPGMC algorithm. * method='ward' uses the Ward variance minimization algorithm. The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 - \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. Warning: When the minimum distance pair in the forest is chosen, there may be two or more pairs with the same minimum distance. This implementation may choose a different minimum than the MATLAB version. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of :math:`m` observation vectors in :math:`n` dimensions may be passed as an :math:`m` by :math:`n` array. All elements of the condensed distance matrix must be finite, i.e., no NaNs or infs. method : str, optional The linkage algorithm to use. See the ``Linkage Methods`` section below for full descriptions. metric : str or function, optional The distance metric to use in the case that y is a collection of observation vectors; ignored otherwise. See the ``pdist`` function for a list of valid distance metrics. A custom distance function can also be used. optimal_ordering : bool, optional If True, the linkage matrix will be reordered so that the distance between successive leaves is minimal. This results in a more intuitive tree structure when the data are visualized. defaults to False, because this algorithm can be slow, particularly on large datasets [2]_. See also the `optimal_leaf_ordering` function. .. versionadded:: 1.0.0 Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. Notes ----- 1. For method 'single', an optimized algorithm based on minimum spanning tree is implemented. It has time complexity :math:`O(n^2)`. For methods 'complete', 'average', 'weighted' and 'ward', an algorithm called nearest-neighbors chain is implemented. It also has time complexity :math:`O(n^2)`. For other methods, a naive algorithm is implemented with :math:`O(n^3)` time complexity. All algorithms use :math:`O(n^2)` memory. Refer to [1]_ for details about the algorithms. 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if Euclidean pairwise metric is used. If `y` is passed as precomputed pairwise distances, then it is the user's responsibility to assure that these distances are in fact Euclidean, otherwise the produced result will be incorrect. See Also -------- scipy.spatial.distance.pdist : pairwise distance metrics References ---------- .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering algorithms", :arXiv:`1109.2378v1`. .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal leaf ordering for hierarchical clustering", 2001. Bioinformatics :doi:`10.1093/bioinformatics/17.suppl_1.S22` Examples -------- >>> from scipy.cluster.hierarchy import dendrogram, linkage >>> from matplotlib import pyplot as plt >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] >>> Z = linkage(X, 'ward') >>> fig = plt.figure(figsize=(25, 10)) >>> dn = dendrogram(Z) >>> Z = linkage(X, 'single') >>> fig = plt.figure(figsize=(25, 10)) >>> dn = dendrogram(Z) >>> plt.show() """ xp = array_namespace(y) y = _asarray(y, order='C', dtype=xp.float64, xp=xp) lazy = is_lazy_array(y) if method not in _LINKAGE_METHODS: raise ValueError(f"Invalid method: {method}") if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2: msg = f"`method={method}` requires the distance metric to be Euclidean" raise ValueError(msg) if y.ndim == 1: distance.is_valid_y(y, throw=True, name='y') elif y.ndim == 2: if (not lazy and y.shape[0] == y.shape[1] and xp.all(xpx.isclose(xp.linalg.diagonal(y), 0)) and xp.all(y >= 0) and xp.all(xpx.isclose(y, y.T))): warnings.warn('The symmetric non-negative hollow observation ' 'matrix looks suspiciously like an uncondensed ' 'distance matrix', ClusterWarning, stacklevel=2) y = distance.pdist(y, metric) else: raise ValueError("`y` must be 1 or 2 dimensional.") if not lazy and not xp.all(xp.isfinite(y)): raise ValueError("The condensed distance matrix must contain only " "finite values.") n = distance.num_obs_y(y) method_code = _LINKAGE_METHODS[method] def cy_linkage(y, validate): if validate and not np.all(np.isfinite(y)): raise ValueError("The condensed distance matrix must contain only " "finite values.") if method == 'single': return _hierarchy.mst_single_linkage(y, n) elif method in ('complete', 'average', 'weighted', 'ward'): return _hierarchy.nn_chain(y, n, method_code) else: return _hierarchy.fast_linkage(y, n, method_code) result = xpx.lazy_apply(cy_linkage, y, validate=lazy, shape=(n - 1, 4), dtype=xp.float64, as_numpy=True, xp=xp) if optimal_ordering: return optimal_leaf_ordering(result, y) else: return result
ClusterWarning
python
getsentry__sentry
src/sentry/integrations/vercel/integration.py
{ "start": 4008, "end": 7550 }
class ____: """ Builder for creating Vercel environment variable maps. env_var_map = ( VercelEnvVarMapBuilder() .with_organization(organization) .with_project(project) .with_project_key(project_key) .with_auth_token(auth_token) .with_framework(framework) .build() ) """ def __init__(self) -> None: self._organization: RpcOrganization | None = None self._project: RpcProject | None = None self._project_key: RpcProjectKey | None = None self._auth_token: str | None = None self._framework: str | None = None def with_organization(self, organization: RpcOrganization) -> Self: self._organization = organization return self def with_project(self, project: RpcProject) -> Self: self._project = project return self def with_project_key(self, project_key: RpcProjectKey) -> Self: self._project_key = project_key return self def with_auth_token(self, auth_token: str | None) -> Self: self._auth_token = auth_token return self def with_framework(self, framework: str | None) -> Self: self._framework = framework return self def build(self) -> dict[str, VercelEnvVarDefinition]: if self._organization is None: raise ValueError("organization is required") if self._project is None: raise ValueError("project is required") if self._project_key is None: raise ValueError("project_key is required") is_next_js = self._framework == "nextjs" dsn_env_name = "NEXT_PUBLIC_SENTRY_DSN" if is_next_js else "SENTRY_DSN" return { "SENTRY_ORG": { "type": "encrypted", "value": self._organization.slug, "target": ["production", "preview"], }, "SENTRY_PROJECT": { "type": "encrypted", "value": self._project.slug, "target": ["production", "preview"], }, dsn_env_name: { "type": "encrypted", "value": self._project_key.dsn_public, "target": [ "production", "preview", "development", # The DSN is the only value that makes sense to have available locally via Vercel CLI's `vercel dev` command ], }, "SENTRY_AUTH_TOKEN": { "type": "encrypted", "value": self._auth_token, "target": ["production", "preview"], }, "VERCEL_GIT_COMMIT_SHA": { "type": "system", "value": "VERCEL_GIT_COMMIT_SHA", "target": ["production", "preview"], }, "SENTRY_VERCEL_LOG_DRAIN_URL": { "type": "encrypted", "value": f"{self._project_key.integration_endpoint}vercel/logs/", "target": ["production", "preview"], }, "SENTRY_OTLP_TRACES_URL": { "type": "encrypted", "value": f"{self._project_key.integration_endpoint}otlp/v1/traces", "target": ["production", "preview"], }, "SENTRY_PUBLIC_KEY": { "type": "encrypted", "value": self._project_key.public_key, "target": ["production", "preview"], }, }
VercelEnvVarMapBuilder
python
sphinx-doc__sphinx
sphinx/ext/autodoc/_legacy_class_based/_documenters.py
{ "start": 41564, "end": 42451 }
class ____(Documenter): """Specialized Documenter subclass for objects on module level (functions, classes, data/constants). """ def resolve_name( self, modname: str | None, parents: Any, path: str, base: str ) -> tuple[str | None, list[str]]: if modname is not None: return modname, [*parents, base] if path: modname = path.rstrip('.') return modname, [*parents, base] # if documenting a toplevel object without explicit module, # it can be contained in another auto directive ... modname = self._current_document.autodoc_module # ... or in the scope of a module directive if not modname: modname = self.env.ref_context.get('py:module') # ... else, it stays None, which means invalid return modname, [*parents, base]
ModuleLevelDocumenter
python
django__django
tests/prefetch_related/models.py
{ "start": 5771, "end": 6072 }
class ____(models.Model): class CustomUUIDField(models.UUIDField): def get_prep_value(self, value): return str(value) id = CustomUUIDField(primary_key=True, default=uuid.uuid4) name = models.CharField(max_length=30) # Models for lookup ordering tests
ArticleCustomUUID
python
getsentry__sentry
src/sentry/apidocs/examples/tags_examples.py
{ "start": 1533, "end": 1929 }
class ____: GROUP_TAGKEY_DETAILS = OpenApiExample( "Return a specific tag's details", value=SIMPLE_TAG_DETAILS, response_only=True, status_codes=["200"], ) GROUP_TAGKEY_VALUES = OpenApiExample( "Return all tag values for a specific tag", value=SIMPLE_TAG_VALUES, response_only=True, status_codes=["200"], )
TagsExamples
python
pypa__pipenv
pipenv/patched/pip/_internal/commands/debug.py
{ "start": 5407, "end": 7067 }
class ____(Command): """ Display debug information. """ usage = """ %prog <options>""" ignore_require_venv = True def add_options(self) -> None: cmdoptions.add_target_python_options(self.cmd_opts) self.parser.insert_option_group(0, self.cmd_opts) self.parser.config.load() def run(self, options: Values, args: List[str]) -> int: logger.warning( "This command is only meant for debugging. " "Do not use this with automation for parsing and getting these " "details, since the output and options of this command may " "change without notice." ) show_value("pip version", get_pip_version()) show_value("sys.version", sys.version) show_value("sys.executable", sys.executable) show_value("sys.getdefaultencoding", sys.getdefaultencoding()) show_value("sys.getfilesystemencoding", sys.getfilesystemencoding()) show_value( "locale.getpreferredencoding", locale.getpreferredencoding(), ) show_value("sys.platform", sys.platform) show_sys_implementation() show_value("'cert' config value", ca_bundle_info(self.parser.config)) show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE")) show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE")) show_value("pipenv.patched.pip._vendor.certifi.where()", where()) show_value("pipenv.patched.pip._vendor.DEBUNDLED", pipenv.patched.pip._vendor.DEBUNDLED) show_vendor_versions() show_tags(options) return SUCCESS
DebugCommand
python
Lightning-AI__lightning
examples/pytorch/domain_templates/generative_adversarial_net.py
{ "start": 2851, "end": 7390 }
class ____(LightningModule): """ >>> GAN(img_shape=(1, 8, 8)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE GAN( (generator): Generator( (model): Sequential(...) ) (discriminator): Discriminator( (model): Sequential(...) ) ) """ def __init__( self, img_shape: tuple = (1, 28, 28), lr: float = 0.0002, b1: float = 0.5, b2: float = 0.999, latent_dim: int = 100, ): super().__init__() self.save_hyperparameters() self.automatic_optimization = False # networks self.generator = Generator(latent_dim=self.hparams.latent_dim, img_shape=img_shape) self.discriminator = Discriminator(img_shape=img_shape) self.validation_z = torch.randn(8, self.hparams.latent_dim) self.example_input_array = torch.zeros(2, self.hparams.latent_dim) def forward(self, z): return self.generator(z) @staticmethod def adversarial_loss(y_hat, y): return F.binary_cross_entropy_with_logits(y_hat, y) def training_step(self, batch): imgs, _ = batch opt_g, opt_d = self.optimizers() # sample noise z = torch.randn(imgs.shape[0], self.hparams.latent_dim) z = z.type_as(imgs) # Train generator # ground truth result (ie: all fake) # put on GPU because we created this tensor inside training_loop valid = torch.ones(imgs.size(0), 1) valid = valid.type_as(imgs) self.toggle_optimizer(opt_g) # adversarial loss is binary cross-entropy g_loss = self.adversarial_loss(self.discriminator(self(z)), valid) opt_g.zero_grad() self.manual_backward(g_loss) opt_g.step() self.untoggle_optimizer(opt_g) # Train discriminator # Measure discriminator's ability to classify real from generated samples # how well can it label as real? valid = torch.ones(imgs.size(0), 1) valid = valid.type_as(imgs) self.toggle_optimizer(opt_d) real_loss = self.adversarial_loss(self.discriminator(imgs), valid) # how well can it label as fake? fake = torch.zeros(imgs.size(0), 1) fake = fake.type_as(imgs) fake_loss = self.adversarial_loss(self.discriminator(self(z).detach()), fake) # discriminator loss is the average of these d_loss = (real_loss + fake_loss) / 2 opt_d.zero_grad() self.manual_backward(d_loss) opt_d.step() self.untoggle_optimizer(opt_d) self.log_dict({"d_loss": d_loss, "g_loss": g_loss}) def configure_optimizers(self): lr = self.hparams.lr b1 = self.hparams.b1 b2 = self.hparams.b2 opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2)) opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2)) return opt_g, opt_d def on_train_epoch_end(self): z = self.validation_z.type_as(self.generator.model[0].weight) # log sampled images sample_imgs = self(z) grid = torchvision.utils.make_grid(sample_imgs) for logger in self.loggers: logger.experiment.add_image("generated_images", grid, self.current_epoch) def main(args: Namespace) -> None: # ------------------------ # 1 INIT LIGHTNING MODEL # ------------------------ model = GAN(lr=args.lr, b1=args.b1, b2=args.b2, latent_dim=args.latent_dim) # ------------------------ # 2 INIT TRAINER # ------------------------ # If use distributed training PyTorch recommends to use DistributedDataParallel. # See: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel dm = MNISTDataModule() trainer = Trainer(accelerator="gpu", devices=1) # ------------------------ # 3 START TRAINING # ------------------------ trainer.fit(model, dm) if __name__ == "__main__": cli_lightning_logo() parser = ArgumentParser() # Hyperparameters parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate") parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient") parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient") parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space") args = parser.parse_args() main(args)
GAN
python
run-llama__llama_index
llama-index-core/llama_index/core/evaluation/faithfulness.py
{ "start": 3738, "end": 7460 }
class ____(BaseEvaluator): """ Faithfulness evaluator. Evaluates whether a response is faithful to the contexts (i.e. whether the response is supported by the contexts or hallucinated.) This evaluator only considers the response string and the list of context strings. Args: raise_error(bool): Whether to raise an error when the response is invalid. Defaults to False. eval_template(Optional[Union[str, BasePromptTemplate]]): The template to use for evaluation. refine_template(Optional[Union[str, BasePromptTemplate]]): The template to use for refining the evaluation. """ def __init__( self, llm: Optional[LLM] = None, raise_error: bool = False, eval_template: Optional[Union[str, BasePromptTemplate]] = None, refine_template: Optional[Union[str, BasePromptTemplate]] = None, ) -> None: """Init params.""" self._llm = llm or Settings.llm self._raise_error = raise_error self._eval_template: BasePromptTemplate if isinstance(eval_template, str): self._eval_template = PromptTemplate(eval_template) if isinstance(eval_template, BasePromptTemplate): self._eval_template = eval_template else: model_name = self._llm.metadata.model_name self._eval_template = TEMPLATES_CATALOG.get( model_name, DEFAULT_EVAL_TEMPLATE ) self._refine_template: BasePromptTemplate if isinstance(refine_template, str): self._refine_template = PromptTemplate(refine_template) else: self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE def _get_prompts(self) -> PromptDictType: """Get prompts.""" return { "eval_template": self._eval_template, "refine_template": self._refine_template, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "eval_template" in prompts: self._eval_template = prompts["eval_template"] if "refine_template" in prompts: self._refine_template = prompts["refine_template"] async def aevaluate( self, query: str | None = None, response: str | None = None, contexts: Sequence[str] | None = None, sleep_time_in_seconds: int = 0, **kwargs: Any, ) -> EvaluationResult: """Evaluate whether the response is faithful to the contexts.""" del kwargs # Unused await asyncio.sleep(sleep_time_in_seconds) if contexts is None or response is None: raise ValueError("contexts and response must be provided") docs = [Document(text=context) for context in contexts] index = SummaryIndex.from_documents(docs) query_engine = index.as_query_engine( llm=self._llm, text_qa_template=self._eval_template, refine_template=self._refine_template, ) response_obj = await query_engine.aquery(response) raw_response_txt = str(response_obj) if "yes" in raw_response_txt.lower(): passing = True else: passing = False if self._raise_error: raise ValueError("The response is invalid") return EvaluationResult( query=query, response=response, contexts=contexts, passing=passing, score=1.0 if passing else 0.0, feedback=raw_response_txt, ) # legacy: backward compatibility ResponseEvaluator = FaithfulnessEvaluator
FaithfulnessEvaluator
python
ray-project__ray
python/ray/data/aggregate.py
{ "start": 22422, "end": 26962 }
class ____(AggregateFnV2[List[Union[int, float]], float]): """Defines standard deviation aggregation. Uses Welford's online algorithm for numerical stability. This method computes the standard deviation in a single pass. Results may differ slightly from libraries like NumPy or Pandas that use a two-pass algorithm but are generally more accurate. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm Example: .. testcode:: import ray from ray.data.aggregate import Std ds = ray.data.range(100) # Schema: {'id': int64} ds = ds.add_column("group_key", lambda x: x % 3) # Schema: {'id': int64, 'group_key': int64} # Calculating the standard deviation per group: result = ds.groupby("group_key").aggregate(Std(on="id")).take_all() # result: [{'group_key': 0, 'std(id)': ...}, # {'group_key': 1, 'std(id)': ...}, # {'group_key': 2, 'std(id)': ...}] Args: on: The name of the column to calculate standard deviation on. ddof: Delta Degrees of Freedom. The divisor used in calculations is `N - ddof`, where `N` is the number of elements. Default is 1. ignore_nulls: Whether to ignore null values. Default is True. alias_name: Optional name for the resulting column. """ def __init__( self, on: Optional[str] = None, ddof: int = 1, ignore_nulls: bool = True, alias_name: Optional[str] = None, ): super().__init__( alias_name if alias_name else f"std({str(on)})", on=on, ignore_nulls=ignore_nulls, # Accumulator: [M2, mean, count] # M2: sum of squares of differences from the current mean # mean: current mean # count: current count of non-null elements # We need to copy the list as it might be modified in-place by some aggregations. zero_factory=lambda: list([0, 0, 0]), # noqa: C410 ) self._ddof = ddof def aggregate_block(self, block: Block) -> List[Union[int, float]]: block_acc = BlockAccessor.for_block(block) count = block_acc.count(self._target_col_name, ignore_nulls=self._ignore_nulls) if count == 0 or count is None: # Empty or all null. return None sum_ = block_acc.sum(self._target_col_name, self._ignore_nulls) if is_null(sum_): # If sum is null (e.g., ignore_nulls=False and a null was encountered), # return as is to prevent type conversions. return sum_ mean = sum_ / count M2 = block_acc.sum_of_squared_diffs_from_mean( self._target_col_name, self._ignore_nulls, mean ) return [M2, mean, count] def combine( self, current_accumulator: List[float], new: List[float] ) -> List[float]: # Merges two accumulators [M2, mean, count] using a parallel algorithm. # See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm M2_a, mean_a, count_a = current_accumulator M2_b, mean_b, count_b = new delta = mean_b - mean_a count = count_a + count_b # NOTE: We use this mean calculation since it's more numerically # stable than mean_a + delta * count_b / count, which actually # deviates from Pandas in the ~15th decimal place and causes our # exact comparison tests to fail. mean = (mean_a * count_a + mean_b * count_b) / count # Update the sum of squared differences. M2 = M2_a + M2_b + (delta**2) * count_a * count_b / count return [M2, mean, count] def finalize(self, accumulator: List[float]) -> Optional[float]: # Compute the final standard deviation from the accumulated # sum of squared differences from current mean and the count. # Final accumulator: [M2, mean, count] M2, mean, count = accumulator # Denominator for variance calculation is count - ddof if count - self._ddof <= 0: # If count - ddof is not positive, variance/std is undefined (or zero). # Return NaN, consistent with pandas/numpy. return np.nan # Standard deviation is the square root of variance (M2 / (count - ddof)) return math.sqrt(M2 / (count - self._ddof)) @PublicAPI
Std
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 148519, "end": 151250 }
class ____(fixtures.TestBase): __backend__ = True __only_on__ = "postgresql" def test_concatenation(self, connection): coltype = BIT(varying=True) q = select( literal(BitString("1111"), coltype).concat(BitString("0000")) ) r = connection.execute(q).first() eq_(r[0], BitString("11110000")) def test_invert_operator(self, connection): coltype = BIT(4) q = select(literal(BitString("0010"), coltype).bitwise_not()) r = connection.execute(q).first() eq_(r[0], BitString("1101")) def test_and_operator(self, connection): coltype = BIT(6) q1 = select( literal(BitString("001010"), coltype) & literal(BitString("010111"), coltype) ) r1 = connection.execute(q1).first() eq_(r1[0], BitString("000010")) q2 = select( literal(BitString("010101"), coltype) & BitString("001011") ) r2 = connection.execute(q2).first() eq_(r2[0], BitString("000001")) def test_or_operator(self, connection): coltype = BIT(6) q1 = select( literal(BitString("001010"), coltype) | literal(BitString("010111"), coltype) ) r1 = connection.execute(q1).first() eq_(r1[0], BitString("011111")) q2 = select( literal(BitString("010101"), coltype) | BitString("001011") ) r2 = connection.execute(q2).first() eq_(r2[0], BitString("011111")) def test_xor_operator(self, connection): coltype = BIT(6) q1 = select( literal(BitString("001010"), coltype).bitwise_xor( literal(BitString("010111"), coltype) ) ) r1 = connection.execute(q1).first() eq_(r1[0], BitString("011101")) q2 = select( literal(BitString("010101"), coltype).bitwise_xor( BitString("001011") ) ) r2 = connection.execute(q2).first() eq_(r2[0], BitString("011110")) def test_lshift_operator(self, connection): coltype = BIT(6) q = select( literal(BitString("001010"), coltype), literal(BitString("001010"), coltype) << 1, ) r = connection.execute(q).first() eq_(tuple(r), (BitString("001010"), BitString("010100"))) def test_rshift_operator(self, connection): coltype = BIT(6) q = select( literal(BitString("001010"), coltype), literal(BitString("001010"), coltype) >> 1, ) r = connection.execute(q).first() eq_(tuple(r), (BitString("001010"), BitString("000101")))
BitTests
python
kamyu104__LeetCode-Solutions
Python/flip-equivalent-binary-trees.py
{ "start": 66, "end": 227 }
class ____(object): def __init__(self, x): self.val = x self.left = None self.right = None import collections # bfs solution
TreeNode
python
run-llama__llama_index
llama-index-integrations/output_parsers/llama-index-output-parsers-langchain/llama_index/output_parsers/langchain/base.py
{ "start": 278, "end": 1884 }
class ____(BaseOutputParser): """Langchain output parser.""" def __init__( self, output_parser: "LCOutputParser", format_key: Optional[str] = None ) -> None: """Init params.""" self._output_parser = output_parser self._format_key = format_key def parse(self, output: str) -> Any: """Parse, validate, and correct errors programmatically.""" # TODO: this object may be stringified by our upstream llmpredictor, # figure out better # ways to "convert" the object to a proper string format. return self._output_parser.parse(output) def format(self, query: str) -> str: """Format a query with structured output formatting instructions.""" format_instructions = self._output_parser.get_format_instructions() # TODO: this is a temporary hack. if there's curly brackets in the format # instructions (and query is a string template), we need to # escape the curly brackets in the format instructions to preserve the # overall template. query_tmpl_vars = { v for _, v, _, _ in Formatter().parse(query) if v is not None } if len(query_tmpl_vars) > 0: format_instructions = format_instructions.replace("{", "{{") format_instructions = format_instructions.replace("}", "}}") if self._format_key is not None: fmt_query = query.format(**{self._format_key: format_instructions}) else: fmt_query = query + "\n\n" + format_instructions return fmt_query
LangchainOutputParser
python
patrick-kidger__equinox
equinox/_ad.py
{ "start": 31163, "end": 42185 }
class ____: """As `jax.custom_vjp`, but with a nicer interface. Usage is: ```python @equinox.filter_custom_vjp def fn(vjp_arg, *args, **kwargs): # `vjp_arg` is some PyTree of arbitrary Python objects. # `args`, `kwargs` contain arbitrary Python objects. ... return out # some PyTree of arbitrary Python objects. @fn.def_fwd def fn_fwd(perturbed, vjp_arg, *args, **kwargs): # `perturbed` is a pytree with the same structure as `vjp_arg`. Every leaf is # either `True` or `False`, indicating whether that leaf is being # differentiated. (All leaves that are not floating-point arrays will # necessarily have `False`. Some floating-point arrays might happen not to be # differentiated either.) ... # Should return `out` as before. `residuals` can be any collection of JAX # arrays you want to keep around for the backward pass. return out, residuals @fn.def_bwd def fn_bwd(residuals, grad_obj, perturbed, vjp_arg, *args, **kwargs): # `grad_obj` will have `None` as the gradient for any leaves of `out` that were # not differentiated. ... # `grad_vjp_arg` should be a pytree with the same structure as `vjp_arg`. # It can have `None` leaves to indicate that that argument has zero gradient. # (E.g. if the leaf was not a JAX array.) return grad_vjp_arg ``` The key differences to `jax.custom_vjp` are that: - Only the gradient of the first argument, `vjp_arg`, should be computed on the backward pass. Everything else will automatically have zero gradient. - You do not need to distinguish differentiable from nondifferentiable manually. Instead you should return gradients for all perturbed arrays in the first argument. (And just put `None` on every other leaf of the PyTree.) - As a convenience, all of the inputs from the forward pass are additionally made available to you on the backward pass. - As a convenience, you can declare forward and backward passes using `def_fwd` and `def_bwd`, rather than a single `defvjp` as in core JAX. !!! tip If you need gradients with respect to multiple arguments, then just pack them together as a tuple via the first argument `vjp_arg`. (See also [`equinox.filter_grad`][] for a similar trick.) """ def __init__(self, fn): self.fn = fn self.fn_fwd: Callable | None = None self.fn_bwd: Callable | None = None self.fn_wrapped = None def def_fwd(self, fn_fwd): self.fn_fwd = fn_fwd if self.fn_bwd is not None: self._defvjp() def def_bwd(self, fn_bwd): self.fn_bwd = fn_bwd if self.fn_fwd is not None: self._defvjp() def defvjp(self, fn_fwd, fn_bwd): warnings.warn( "As of Equinox 0.10.7, `equinox.filter_custom_vjp.defvjp` is deprecated in " "favour of `.def_fwd` and `.def_bwd`. This new API supports symbolic " "zeros, which allow for more efficient autodifferentiation rules. In " "particular:\n" "- the fwd and bwd functions take an extra `perturbed` argument, which " " indicates which primals actually need a gradient. You can use this " " to skip computing the gradient for any unperturbed value. (You can " " also safely just ignore this if you wish.)\n" "- `None` was previously passed to indicate a symbolic zero gradient for " " all objects that weren't inexact arrays, but all inexact arrays " " always had an array-valued gradient. Now, `None` may also be passed " " to indicate that an inexact array has a symbolic zero gradient.", stacklevel=2, ) def _fn_fwd(perturbed, vjp_arg, *args, **kwargs): del perturbed out, residuals = fn_fwd(vjp_arg, *args, **kwargs) return out, (out, residuals) def _fn_bwd( residuals, grad_diff_array_out, perturbed, vjp_arg, *args, **kwargs ): del perturbed out, residuals = residuals grad_diff_array_out = jtu.tree_map( _materialise_symbolic_zero, out, grad_diff_array_out ) return fn_bwd(residuals, grad_diff_array_out, vjp_arg, *args, **kwargs) self.def_fwd(_fn_fwd) self.def_bwd(_fn_bwd) def _defvjp(self): def fn_wrapped( nonarray_vjp_arg, nonarray_args_kwargs, diff_array_vjp_arg, nondiff_array_vjp_arg, array_args_kwargs, ): vjp_arg = combine( nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg ) args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs) out = self.fn(vjp_arg, *args, **kwargs) array_out, nonarray_out = partition(out, is_array) diff_array_out, nondiff_array_out = partition(array_out, is_inexact_array) return diff_array_out, nondiff_array_out, Static(nonarray_out) def fn_fwd_wrapped( nonarray_vjp_arg, nonarray_args_kwargs, diff_array_vjp_arg, nondiff_array_vjp_arg, array_args_kwargs, ): assert self.fn_fwd is not None nonarray_perturbed = jtu.tree_map(lambda _: False, nonarray_vjp_arg) nondiff_array_perturbed = jtu.tree_map( lambda _: False, nondiff_array_vjp_arg ) diff_array_perturbed = jtu.tree_map(_get_perturbed, diff_array_vjp_arg) perturbed = combine( nonarray_perturbed, nondiff_array_perturbed, diff_array_perturbed ) diff_array_vjp_arg = jtu.tree_map(_get_value, diff_array_vjp_arg) nondiff_array_vjp_arg = jtu.tree_map( _get_value_assert_unperturbed, nondiff_array_vjp_arg ) array_args_kwargs = jtu.tree_map( _get_value_assert_unperturbed, array_args_kwargs ) vjp_arg = combine( nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg ) args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs) out, residuals = self.fn_fwd(perturbed, vjp_arg, *args, **kwargs) array_out, nonarray_out = partition(out, is_array) array_residuals, nonarray_residuals = partition(residuals, is_array) diff_array_out, nondiff_array_out = partition(array_out, is_inexact_array) out = diff_array_out, nondiff_array_out, Static(nonarray_out) return out, ( array_residuals, Static(nonarray_residuals), diff_array_vjp_arg, nondiff_array_vjp_arg, array_args_kwargs, perturbed, ) def fn_bwd_wrapped(nonarray_vjp_arg, nonarray_args_kwargs, residuals, grad_out): assert self.fn_bwd is not None ( array_residuals, nonarray_residuals, diff_array_vjp_arg, nondiff_array_vjp_arg, array_args_kwargs, perturbed, ) = residuals residuals = combine(array_residuals, nonarray_residuals.value) vjp_arg = combine( nonarray_vjp_arg, diff_array_vjp_arg, nondiff_array_vjp_arg ) args, kwargs = combine(nonarray_args_kwargs, array_args_kwargs) grad_diff_array_out, _, _ = grad_out grad_diff_array_out = jtu.tree_map(_zero_to_none, grad_diff_array_out) out = self.fn_bwd( residuals, grad_diff_array_out, perturbed, vjp_arg, *args, **kwargs ) if jtu.tree_structure(out, is_leaf=_is_none) != jtu.tree_structure( diff_array_vjp_arg, is_leaf=_is_none ): raise RuntimeError( "custom_vjp gradients must have the same structure as " "`equinox.filter(vjp_arg, equinox.is_inexact_array)`, where " "`vjp_arg` is the first argument used in the forward pass." ) out = jtu.tree_map(_none_to_zero, out, diff_array_vjp_arg, is_leaf=_is_none) # None is the gradient through nondiff_array_vjp_arg and array_args_kwargs return out, None, None fn_wrapped = jax.custom_vjp(fn_wrapped, nondiff_argnums=(0, 1)) fn_wrapped.defvjp(fn_fwd_wrapped, fn_bwd_wrapped, symbolic_zeros=True) self.fn_wrapped = fn_wrapped def __call__(self, vjp_arg, /, *args, **kwargs): if self.fn_wrapped is None: raise RuntimeError( f"`def_fwd` or `def_bwd` not yet called for {self.fn.__name__}" ) array_vjp_arg, nonarray_vjp_arg = partition(vjp_arg, is_array) diff_array_vjp_arg, nondiff_array_vjp_arg = partition( array_vjp_arg, is_inexact_array ) array_args_kwargs, nonarray_args_kwargs = partition((args, kwargs), is_array) array_args_kwargs = nondifferentiable( array_args_kwargs, name="`*args` and `**kwargs` to `filter_custom_vjp`" ) out = self.fn_wrapped( nonarray_vjp_arg, nonarray_args_kwargs, diff_array_vjp_arg, nondiff_array_vjp_arg, array_args_kwargs, ) diff_array_out, nondiff_array_out, nonarray_out = out return combine(diff_array_out, nondiff_array_out, nonarray_out.value) def filter_checkpoint( fun: Callable[_P, _T] = sentinel, *, prevent_cse: bool = True, policy: Callable[..., bool] | None = None, ) -> Callable[_P, _T]: """Filtered version of `jax.checkpoint`. Gradient checkpointing is a technique for reducing memory usage during backpropagation, especially when used with reverse mode automatic differentiation (e.g., `jax.grad` or `equinox.filter_grad`). **Arguments:** - `fun`: The function to be checkpointed. Will be called as `fun(*args, **kwargs)`. Can return an arbitrary PyTree. - `prevent_cse`: If `True` (the default), then JAX will not perform common subexpression elimination. Please see the documentation for `jax.checkpoint ` for more details. - `policy`: Callable for controlling which intermediate values should be rematerialized. It should be one of the attributes of `jax.checkpoint_policies`. """ if fun is sentinel: return ft.partial( # pyright: ignore filter_checkpoint, prevent_cse=prevent_cse, policy=policy, ) return module_update_wrapper( _CheckpointWrapper(fun, prevent_cse=prevent_cse, policy=policy) )
filter_custom_vjp
python
streamlit__streamlit
lib/tests/streamlit/commands/navigation_test.py
{ "start": 1145, "end": 18531 }
class ____(DeltaGeneratorTestCase): """Test st.navigation""" def test_no_pages(self): """Test that an error is thrown with no pages""" with pytest.raises(StreamlitAPIException): st.navigation([]) def test_single_page(self): """Test that a single page is returned""" single_page = st.Page("page1.py") page = st.navigation([single_page]) assert page == single_page def test_single_page_with_path(self): """Test that a single page is returned with a Path object""" single_page = st.Page(Path("page1.py")) page = st.navigation([single_page]) assert page == single_page def test_first_page_is_default(self): """Test that the first page is returned if there are multiple pages and no default""" single_page = st.Page("page1.py") page = st.navigation([single_page, st.Page("page2.py"), st.Page("page3.py")]) assert page == single_page assert page._default def test_default_page_returned_if_specified(self): """Test that the first page is returned if there are multiple pages and no default""" default_page = st.Page("page3.py", default=True) page = st.navigation([st.Page("page1.py"), st.Page("page2.py"), default_page]) assert page == default_page assert page._default def test_multiple_defaults_raises_APIException(self): """Test that an error is thrown if multiple defaults are specified""" with pytest.raises(StreamlitAPIException): st.navigation( [st.Page("page1.py", default=True), st.Page("page2.py", default=True)] ) def test_same_url_paths_raises_APIException(self): """Test that an error is thrown if same url_paths are specified""" with pytest.raises(StreamlitAPIException): st.navigation( [ st.Page("page1.py", url_path="foo"), st.Page("page2.py", url_path="foo"), ] ) def test_same_inferred_url_paths_raises_APIException(self): """Test that an error is thrown if the same inferred url_paths are specified""" with pytest.raises(StreamlitAPIException): st.navigation( [ st.Page("page1.py", url_path="foo"), st.Page("foo.py"), ] ) def test_page_found_by_hash(self): found_page = st.Page("page2.py") self.script_run_ctx.pages_manager.set_script_intent(found_page._script_hash, "") page = st.navigation([st.Page("page1.py"), found_page, st.Page("page3.py")]) assert page == found_page def test_page_found_by_name(self): found_page = st.Page("page2.py") self.script_run_ctx.pages_manager.set_script_intent("", "page2") page = st.navigation([st.Page("page1.py"), found_page, st.Page("page3.py")]) assert page == found_page assert self.script_run_ctx.page_script_hash == found_page._script_hash def test_page_not_found_by_name(self): default_page = st.Page("page1.py") self.script_run_ctx.pages_manager.set_script_intent("", "bad_page") page = st.navigation([default_page, st.Page("page2.py"), st.Page("page3.py")]) c = self.get_message_from_queue(-2) assert c.HasField("page_not_found") assert page == default_page assert self.script_run_ctx.page_script_hash == default_page._script_hash def test_page_not_found_by_hash_returns_default(self): default_page = st.Page("page1.py") self.script_run_ctx.pages_manager.set_script_intent("bad_hash", "") page = st.navigation([default_page, st.Page("page2.py"), st.Page("page3.py")]) assert page == default_page assert self.script_run_ctx.page_script_hash == default_page._script_hash def test_navigation_message(self): st.navigation( { "Section 1": [st.Page("page1.py")], "Section 2": [st.Page("page2.py"), st.Page("page3.py")], } ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "Section 1" assert c.app_pages[1].section_header == "Section 2" assert c.app_pages[2].section_header == "Section 2" assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default assert c.position == NavigationProto.Position.SIDEBAR assert not c.expanded assert c.sections == ["Section 1", "Section 2"] def test_navigation_message_with_position(self): st.navigation( [st.Page("page1.py"), st.Page("page2.py"), st.Page("page3.py")], position="hidden", ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "" assert c.app_pages[1].section_header == "" assert c.app_pages[2].section_header == "" assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default assert c.position == NavigationProto.Position.HIDDEN assert not c.expanded assert c.sections == [""] @patch_config_options({"client.showSidebarNavigation": False}) def test_navigation_message_with_sidebar_nav_config(self): st.navigation( [st.Page("page1.py"), st.Page("page2.py"), st.Page("page3.py")], ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "" assert c.app_pages[1].section_header == "" assert c.app_pages[2].section_header == "" assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default assert c.position == NavigationProto.Position.HIDDEN assert not c.expanded assert c.sections == [""] def test_navigation_message_with_expanded(self): st.navigation( [st.Page("page1.py"), st.Page("page2.py"), st.Page("page3.py")], expanded=True, ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "" assert c.app_pages[1].section_header == "" assert c.app_pages[2].section_header == "" assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default assert c.position == NavigationProto.Position.SIDEBAR assert c.expanded assert c.sections == [""] def test_convert_to_streamlit_page_with_string(self): """Test converting string path to StreamlitPage""" page = convert_to_streamlit_page("page1.py") assert isinstance(page, StreamlitPage) assert isinstance(page._page, Path) assert str(page._page) == str(Path("page1.py").absolute()) def test_convert_to_streamlit_page_with_function(self): """Test converting function to StreamlitPage""" def test_page(): pass page = convert_to_streamlit_page(test_page) assert isinstance(page, StreamlitPage) assert page._page == test_page def test_convert_to_streamlit_page_with_streamlit_page(self): """Test passing StreamlitPage directly""" original_page = st.Page("page1.py") page = convert_to_streamlit_page(original_page) assert page == original_page def test_convert_to_streamlit_page_invalid_type(self): """Test that invalid types raise exception""" with pytest.raises(StreamlitAPIException) as exc_info: convert_to_streamlit_page(123) assert "Invalid page type" in str(exc_info.value) def test_navigation_with_string_list(self): """Test navigation with list of strings""" pages = ["page1.py", "page2.py", "page3.py"] page = st.navigation(pages) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default def test_navigation_with_function_list(self): """Test navigation with list of functions""" def page1(): pass def page2(): pass pages = [page1, page2] page = st.navigation(pages) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 2 assert c.app_pages[0].is_default assert not c.app_pages[1].is_default def test_navigation_with_mixed_list(self): """Test navigation with mixed list of strings, functions and StreamlitPages""" def page2(): pass pages = ["page1.py", page2, st.Page("page3.py")] page = st.navigation(pages) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default def test_navigation_with_sections_and_mixed_types(self): """Test navigation with sections containing mixed types""" def page2(): pass pages = {"Section 1": ["page1.py", page2], "Section 2": [st.Page("page3.py")]} st.navigation(pages) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "Section 1" assert c.app_pages[1].section_header == "Section 1" assert c.app_pages[2].section_header == "Section 2" def test_navigation_duplicate_paths_with_mixed_types(self): """Test that duplicate paths raise exception with mixed types""" def foo(): pass with pytest.raises(StreamlitAPIException): st.navigation( [ "foo.py", foo, # This should create same URL path as foo.py ] ) def test_convert_to_streamlit_page_with_pathlib_path(self): """Test converting pathlib.Path to StreamlitPage""" page = convert_to_streamlit_page(Path("page1.py")) assert isinstance(page, StreamlitPage) assert isinstance(page._page, Path) assert str(page._page) == str(Path("page1.py").absolute()) def test_navigation_with_pathlib_path_list(self): """Test navigation with list of pathlib.Path""" pages = [Path("page1.py"), Path("page2.py"), Path("page3.py")] page = st.navigation(pages) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default def test_navigation_with_mixed_list_including_pathlib_path(self): """Test navigation with mixed list including pathlib.Path""" def page2(): pass pages = [Path("page1.py"), page2, st.Page("page3.py")] page = st.navigation(pages) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default def test_navigation_with_sections_and_mixed_types_including_pathlib_path(self): """Test navigation with sections containing mixed types, including pathlib.Path""" def page2(): pass pages = { "Section 1": [Path("page1.py"), page2], "Section 2": [st.Page("page3.py")], } st.navigation(pages) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.app_pages[0].section_header == "Section 1" assert c.app_pages[1].section_header == "Section 1" assert c.app_pages[2].section_header == "Section 2" def test_navigation_sends_prefixed_emoji_icons(self): """Test navigation with pages with emoji icons prefix them correctly""" page = st.navigation( [ st.Page("page1.py", icon="🚀"), st.Page("page2.py", icon=":material/settings:"), ] ) assert isinstance(page, StreamlitPage) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 2 assert c.app_pages[0].icon == "emoji:🚀" assert c.app_pages[1].icon == ":material/settings:" def test_navigation_duplicate_paths_with_mixed_types_including_pathlib_path( self, ): """Test that duplicate paths raise exception with mixed types, including pathlib.Path""" def foo(): pass with pytest.raises(StreamlitAPIException): st.navigation( [ Path("foo.py"), foo, # This should create same URL path as foo.py ] ) def test_navigation_with_path_and_string_same_name(self): with pytest.raises(StreamlitAPIException): st.navigation( [ Path("foo.py"), "foo.py", ] ) def test_navigation_with_top_position(self): """Test that position="top" produces NavigationProto.Position.TOP""" st.navigation( [st.Page("page1.py"), st.Page("page2.py"), st.Page("page3.py")], position="top", ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 3 assert c.position == NavigationProto.Position.TOP assert c.app_pages[0].is_default assert not c.app_pages[1].is_default assert not c.app_pages[2].is_default def test_navigation_with_invalid_position(self): """Test that invalid position value raises appropriate error""" with pytest.raises(StreamlitAPIException) as exc_info: st.navigation( [st.Page("page1.py"), st.Page("page2.py")], position="foo", # Invalid position ) assert "Invalid position" in str(exc_info.value) or "position must be" in str( exc_info.value ) def test_navigation_top_position_no_fallback_with_config(self): """Test that position="top" remains TOP even when client.showSidebarNavigation=False""" with patch_config_options({"client.showSidebarNavigation": False}): st.navigation( [st.Page("page1.py"), st.Page("page2.py"), st.Page("page3.py")], position="top", ) c = self.get_message_from_queue().navigation assert ( c.position == NavigationProto.Position.TOP ) # Should remain TOP, not fallback to HIDDEN def test_navigation_with_sidebar_position_explicit(self): """Test that position="sidebar" produces NavigationProto.Position.SIDEBAR""" st.navigation( [st.Page("page1.py"), st.Page("page2.py")], position="sidebar", ) c = self.get_message_from_queue().navigation assert c.position == NavigationProto.Position.SIDEBAR def test_navigation_with_hidden_position_explicit(self): """Test that position="hidden" produces NavigationProto.Position.HIDDEN""" st.navigation( [st.Page("page1.py"), st.Page("page2.py")], position="hidden", ) c = self.get_message_from_queue().navigation assert c.position == NavigationProto.Position.HIDDEN def test_navigation_top_position_with_sections(self): """Test top navigation with sections""" st.navigation( { "Section 1": [st.Page("page1.py"), st.Page("page2.py")], "Section 2": [st.Page("page3.py"), st.Page("page4.py")], }, position="top", ) c = self.get_message_from_queue().navigation assert len(c.app_pages) == 4 assert c.position == NavigationProto.Position.TOP assert c.app_pages[0].section_header == "Section 1" assert c.app_pages[1].section_header == "Section 1" assert c.app_pages[2].section_header == "Section 2" assert c.app_pages[3].section_header == "Section 2" assert c.sections == ["Section 1", "Section 2"] def test_navigation_position_parameter_type(self): """Test that position parameter only accepts valid literal values""" # Test with valid positions - should not raise for pos in ["sidebar", "hidden", "top"]: st.navigation([st.Page("page1.py")], position=pos) self.get_message_from_queue() # Clear queue # Test with invalid type with pytest.raises(StreamlitAPIException): st.navigation([st.Page("page1.py")], position=123) # type: ignore
NavigationTest
python
astropy__astropy
astropy/samp/standard_profile.py
{ "start": 4801, "end": 5707 }
class ____(socketserver.ThreadingMixIn, SimpleXMLRPCServer): """ Asynchronous multithreaded XMLRPC server. """ def __init__( self, addr, log=None, requestHandler=SAMPSimpleXMLRPCRequestHandler, logRequests=True, allow_none=True, encoding=None, ): self.log = log SimpleXMLRPCServer.__init__( self, addr, requestHandler, logRequests, allow_none, encoding ) def handle_error(self, request, client_address): if self.log is None: socketserver.BaseServer.handle_error(self, request, client_address) else: warnings.warn( ( "Exception happened during processing of request from " f"{client_address}: {sys.exc_info()[1]}" ), SAMPWarning, )
ThreadingXMLRPCServer
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/self8.py
{ "start": 291, "end": 551 }
class ____(enum.IntEnum): def __new__(cls, value: int, doc: str) -> Self: member = int.__new__(cls, value) reveal_type(member, expected_text="Self@Enum1") member._value_ = value member.__doc__ = doc return member
Enum1
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_meid.py
{ "start": 1842, "end": 4553 }
class ____(ColumnMapExpectation): """Expect column values to be valid MEID (Mobile Equipment Identifier).""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "all_valid": [ "AF 01 23 45 0A BC DE C", "29360 87365 0070 3710 0", "AF0123450ABCDE", "AF 01 23 45 0A BC DE C", "29360 87365 0070 3710 0", ], "some_other": [ "AF 01 23 45 0A BC DE C", "29360 87365 0070 3710 0", "AF0123450ABCDE", "AF 01 23 45 0A BC DE C", "29360 87365 0070 371X 0", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_valid"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_other", "mostly": 1}, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.to_be_valid_meid" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": [ "hackathon-22", "experimental", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@szecsip", # Don't forget to add your github handle here! ], "requirements": ["python-stdnum"], } if __name__ == "__main__": ExpectColumnValuesToBeValidMeid().print_diagnostic_checklist()
ExpectColumnValuesToBeValidMeid
python
modin-project__modin
modin/core/io/column_stores/parquet_dispatcher.py
{ "start": 10012, "end": 36970 }
class ____(ColumnStoreDispatcher): """Class handles utils for reading `.parquet` files.""" index_regex = re.compile(r"__index_level_\d+__") @classmethod def get_dataset(cls, path, engine, storage_options): """ Retrieve Parquet engine specific Dataset implementation. Parameters ---------- path : str, path object or file-like object The filepath of the parquet file in local filesystem or hdfs. engine : str Parquet library to use (only 'PyArrow' is supported for now). storage_options : dict Parameters for specific storage engine. Returns ------- Dataset Either a PyArrowDataset or FastParquetDataset object. """ if engine == "auto": # We follow in concordance with pandas engine_classes = [PyArrowDataset, FastParquetDataset] error_msgs = "" for engine_class in engine_classes: try: return engine_class(path, storage_options) except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "A suitable version of " + "pyarrow or fastparquet is required for parquet " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" ) elif engine == "pyarrow": return PyArrowDataset(path, storage_options) elif engine == "fastparquet": return FastParquetDataset(path, storage_options) else: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") @classmethod def _determine_partitioning( cls, dataset: ColumnStoreDataset ) -> "list[list[ParquetFileToRead]]": """ Determine which partition will read certain files/row groups of the dataset. Parameters ---------- dataset : ColumnStoreDataset Returns ------- list[list[ParquetFileToRead]] Each element in the returned list describes a list of files that a partition has to read. """ from modin.core.storage_formats.pandas.parsers import ParquetFileToRead parquet_files = dataset.files row_groups_per_file = dataset.row_groups_per_file num_row_groups = sum(row_groups_per_file) if num_row_groups == 0: return [] num_splits = min(NPartitions.get(), num_row_groups) part_size = num_row_groups // num_splits # If 'num_splits' does not divide 'num_row_groups' then we can't cover all of # the row groups using the original 'part_size'. According to the 'reminder' # there has to be that number of partitions that should read 'part_size + 1' # number of row groups. reminder = num_row_groups % num_splits part_sizes = [part_size] * (num_splits - reminder) + [part_size + 1] * reminder partition_files = [] file_idx = 0 row_group_idx = 0 row_groups_left_in_current_file = row_groups_per_file[file_idx] # this is used for sanity check at the end, verifying that we indeed added all of the row groups total_row_groups_added = 0 for size in part_sizes: row_groups_taken = 0 part_files = [] while row_groups_taken != size: if row_groups_left_in_current_file < 1: file_idx += 1 row_group_idx = 0 row_groups_left_in_current_file = row_groups_per_file[file_idx] to_take = min(size - row_groups_taken, row_groups_left_in_current_file) part_files.append( ParquetFileToRead( parquet_files[file_idx], row_group_start=row_group_idx, row_group_end=row_group_idx + to_take, ) ) row_groups_left_in_current_file -= to_take row_groups_taken += to_take row_group_idx += to_take total_row_groups_added += row_groups_taken partition_files.append(part_files) sanity_check = ( len(partition_files) == num_splits and total_row_groups_added == num_row_groups ) ErrorMessage.catch_bugs_and_request_email( failure_condition=not sanity_check, extra_log="row groups added does not match total num of row groups across parquet files", ) return partition_files @classmethod def call_deploy( cls, partition_files: "list[list[ParquetFileToRead]]", col_partitions: "list[list[str]]", storage_options: dict, engine: str, **kwargs, ): """ Deploy remote tasks to the workers with passed parameters. Parameters ---------- partition_files : list[list[ParquetFileToRead]] List of arrays with files that should be read by each partition. col_partitions : list[list[str]] List of arrays with columns names that should be read by each partition. storage_options : dict Parameters for specific storage engine. engine : {"auto", "pyarrow", "fastparquet"} Parquet library to use for reading. **kwargs : dict Parameters of deploying read_* function. Returns ------- List Array with references to the task deploy result for each partition. """ # If we don't have any columns to read, we should just return an empty # set of references. if len(col_partitions) == 0: return [] all_partitions = [] for files_to_read in partition_files: all_partitions.append( [ cls.deploy( func=cls.parse, f_kwargs={ "files_for_parser": files_to_read, "columns": cols, "engine": engine, "storage_options": storage_options, **kwargs, }, num_returns=3, ) for cols in col_partitions ] ) return all_partitions @classmethod def build_partition(cls, partition_ids, column_widths): """ Build array with partitions of `cls.frame_partition_cls` class. Parameters ---------- partition_ids : list Array with references to the partitions data. column_widths : list Number of columns in each partition. Returns ------- np.ndarray array with shape equals to the shape of `partition_ids` and filed with partition objects. Notes ----- The second level of partitions_ids contains a list of object references for each read call: partition_ids[i][j] -> [ObjectRef(df), ObjectRef(df.index), ObjectRef(len(df))]. """ return np.array( [ [ cls.frame_partition_cls( part_id[0], length=part_id[2], width=col_width, ) for part_id, col_width in zip(part_ids, column_widths) ] for part_ids in partition_ids ] ) @classmethod def build_index(cls, dataset, partition_ids, index_columns, filters): """ Compute index and its split sizes of resulting Modin DataFrame. Parameters ---------- dataset : Dataset Dataset object of Parquet file/files. partition_ids : list Array with references to the partitions data. index_columns : list List of index columns specified by pandas metadata. filters : list List of filters to be used in reading the Parquet file/files. Returns ------- index : pandas.Index Index of resulting Modin DataFrame. needs_index_sync : bool Whether the partition indices need to be synced with frame index because there's no index column, or at least one index column is a RangeIndex. Notes ----- See `build_partition` for more detail on the contents of partitions_ids. """ range_index = True range_index_metadata = None column_names_to_read = [] for column in index_columns: # https://pandas.pydata.org/docs/development/developer.html#storing-pandas-dataframe-objects-in-apache-parquet-format # describes the format of the index column metadata. # It is a list, where each entry is either a string or a dictionary. # A string means that a column stored in the dataset is (part of) the index. # A dictionary is metadata about a RangeIndex, which is metadata-only and not stored # in the dataset as a column. # There cannot be both for a single dataframe, because a MultiIndex can only contain # "actual data" columns and not RangeIndex objects. # See similar code in pyarrow: https://github.com/apache/arrow/blob/44811ba18477560711d512939535c8389dd7787b/python/pyarrow/pandas_compat.py#L912-L926 # and in fastparquet, here is where RangeIndex is handled: https://github.com/dask/fastparquet/blob/df1219300a96bc1baf9ebad85f4f5676a130c9e8/fastparquet/api.py#L809-L815 if isinstance(column, str): column_names_to_read.append(column) range_index = False elif column["kind"] == "range": range_index_metadata = column # When the index has meaningful values, stored in a column, we will replicate those # exactly in the Modin dataframe's index. This index may have repeated values, be unsorted, # etc. This is all fine. # A range index is the special case: we want the Modin dataframe to have a single range, # not a range that keeps restarting. i.e. if the partitions have index 0-9, 0-19, 0-29, # we want our Modin dataframe to have 0-59. # When there are no filters, it is relatively cheap to construct the index by # actually reading in the necessary data, here in the main process. # When there are filters, we let the workers materialize the indices before combining to # get a single range. # For the second check, let us consider the case where we have an empty dataframe, # that has a valid index. if (range_index and filters is None) or ( len(partition_ids) == 0 and len(column_names_to_read) != 0 ): complete_index = dataset.to_pandas_dataframe( columns=column_names_to_read ).index # Empty DataFrame case elif len(partition_ids) == 0: return [], False else: index_ids = [part_id[0][1] for part_id in partition_ids if len(part_id) > 0] index_objs = cls.materialize(index_ids) if range_index: # There are filters, so we had to materialize in order to # determine how many items there actually are total_filtered_length = sum( len(index_part) for index_part in index_objs ) metadata_length_mismatch = False if range_index_metadata is not None: metadata_implied_length = ( range_index_metadata["stop"] - range_index_metadata["start"] ) / range_index_metadata["step"] metadata_length_mismatch = ( total_filtered_length != metadata_implied_length ) # pyarrow ignores the RangeIndex metadata if it is not consistent with data length. # https://github.com/apache/arrow/blob/44811ba18477560711d512939535c8389dd7787b/python/pyarrow/pandas_compat.py#L924-L926 # fastparquet keeps the start and step from the metadata and just adjusts to the length. # https://github.com/dask/fastparquet/blob/df1219300a96bc1baf9ebad85f4f5676a130c9e8/fastparquet/api.py#L815 if range_index_metadata is None or ( isinstance(dataset, PyArrowDataset) and metadata_length_mismatch ): complete_index = pandas.RangeIndex(total_filtered_length) else: complete_index = pandas.RangeIndex( start=range_index_metadata["start"], step=range_index_metadata["step"], stop=( range_index_metadata["start"] + (total_filtered_length * range_index_metadata["step"]) ), name=range_index_metadata["name"], ) else: complete_index = index_objs[0].append(index_objs[1:]) return complete_index, range_index or (len(index_columns) == 0) @classmethod def _normalize_partitioning(cls, remote_parts, row_lengths, column_widths): """ Normalize partitioning according to the default partitioning scheme in Modin. The result of 'read_parquet()' is often under partitioned over rows and over partitioned over columns, so this method expands the number of row splits and shrink the number of column splits. Parameters ---------- remote_parts : np.ndarray row_lengths : list of ints or None Row lengths, if 'None', won't repartition across rows. column_widths : list of ints Returns ------- remote_parts : np.ndarray row_lengths : list of ints or None column_widths : list of ints """ if len(remote_parts) == 0: return remote_parts, row_lengths, column_widths from modin.core.storage_formats.pandas.utils import get_length_list # The code in this function is actually a duplication of what 'BaseQueryCompiler.repartition()' does, # however this implementation works much faster for some reason actual_row_nparts = remote_parts.shape[0] if row_lengths is not None: desired_row_nparts = max( 1, min(sum(row_lengths) // MinRowPartitionSize.get(), NPartitions.get()) ) else: desired_row_nparts = actual_row_nparts # only repartition along rows if the actual number of row splits 1.5 times SMALLER than desired if 1.5 * actual_row_nparts < desired_row_nparts: # assuming that the sizes of parquet's row groups are more or less equal, # so trying to use the same number of splits for each partition splits_per_partition = desired_row_nparts // actual_row_nparts remainder = desired_row_nparts % actual_row_nparts new_parts = [] new_row_lengths = [] for row_idx, (part_len, row_parts) in enumerate( zip(row_lengths, remote_parts) ): num_splits = splits_per_partition # 'remainder' indicates how many partitions have to be split into 'num_splits + 1' splits # to have exactly 'desired_row_nparts' in the end if row_idx < remainder: num_splits += 1 if num_splits == 1: new_parts.append(row_parts) new_row_lengths.append(part_len) continue offset = len(new_parts) # adding empty row parts according to the number of splits new_parts.extend([[] for _ in range(num_splits)]) for part in row_parts: split = cls.frame_cls._partition_mgr_cls._column_partitions_class( [part] ).apply( lambda df: df, num_splits=num_splits, maintain_partitioning=False, ) for i in range(num_splits): new_parts[offset + i].append(split[i]) new_row_lengths.extend( get_length_list(part_len, num_splits, MinRowPartitionSize.get()) ) remote_parts = np.array(new_parts) row_lengths = new_row_lengths desired_col_nparts = max( 1, min(sum(column_widths) // MinColumnPartitionSize.get(), NPartitions.get()), ) # only repartition along cols if the actual number of col splits 1.5 times BIGGER than desired if 1.5 * desired_col_nparts < remote_parts.shape[1]: remote_parts = np.array( [ ( cls.frame_cls._partition_mgr_cls._row_partition_class( row_parts ).apply( lambda df: df, num_splits=desired_col_nparts, maintain_partitioning=False, ) ) for row_parts in remote_parts ] ) column_widths = get_length_list( sum(column_widths), desired_col_nparts, MinColumnPartitionSize.get() ) return remote_parts, row_lengths, column_widths @classmethod def build_query_compiler(cls, dataset, columns, index_columns, **kwargs): """ Build query compiler from deployed tasks outputs. Parameters ---------- dataset : Dataset Dataset object of Parquet file/files. columns : list List of columns that should be read from file. index_columns : list List of index columns specified by pandas metadata. **kwargs : dict Parameters of deploying read_* function. Returns ------- new_query_compiler : BaseQueryCompiler Query compiler with imported data for further processing. """ storage_options = kwargs.pop("storage_options", {}) or {} filters = kwargs.get("filters", None) partition_files = cls._determine_partitioning(dataset) col_partitions, column_widths = cls.build_columns( columns, num_row_parts=len(partition_files), ) partition_ids = cls.call_deploy( partition_files, col_partitions, storage_options, dataset.engine, **kwargs ) index, sync_index = cls.build_index( dataset, partition_ids, index_columns, filters ) remote_parts = cls.build_partition(partition_ids, column_widths) if len(partition_ids) > 0: row_lengths = [part.length() for part in remote_parts.T[0]] else: row_lengths = None remote_parts, row_lengths, column_widths = cls._normalize_partitioning( remote_parts, row_lengths, column_widths ) if ( dataset.pandas_metadata and "column_indexes" in dataset.pandas_metadata and len(dataset.pandas_metadata["column_indexes"]) == 1 and dataset.pandas_metadata["column_indexes"][0]["numpy_type"] == "int64" ): columns = pandas.Index(columns).astype("int64").to_list() frame = cls.frame_cls( remote_parts, index, columns, row_lengths=row_lengths, column_widths=column_widths, dtypes=None, ) if sync_index: frame.synchronize_labels(axis=0) return cls.query_compiler_cls(frame) @classmethod def _read(cls, path, engine, columns, use_nullable_dtypes, dtype_backend, **kwargs): """ Load a parquet object from the file path, returning a query compiler. Parameters ---------- path : str, path object or file-like object The filepath of the parquet file in local filesystem or hdfs. engine : {"auto", "pyarrow", "fastparquet"} Parquet library to use. columns : list If not None, only these columns will be read from the file. use_nullable_dtypes : Union[bool, lib.NoDefault] dtype_backend : {"numpy_nullable", "pyarrow", lib.no_default} **kwargs : dict Keyword arguments. Returns ------- BaseQueryCompiler A new Query Compiler. Notes ----- ParquetFile API is used. Please refer to the documentation here https://arrow.apache.org/docs/python/parquet.html """ if ( (set(kwargs) - {"storage_options", "filters", "filesystem"}) or use_nullable_dtypes != lib.no_default or kwargs.get("filesystem") is not None ): return cls.single_worker_read( path, engine=engine, columns=columns, use_nullable_dtypes=use_nullable_dtypes, dtype_backend=dtype_backend, reason="Parquet options that are not currently supported", **kwargs, ) path = stringify_path(path) if isinstance(path, list): # TODO(https://github.com/modin-project/modin/issues/5723): read all # files in parallel. compilers: list[cls.query_compiler_cls] = [ cls._read( p, engine, columns, use_nullable_dtypes, dtype_backend, **kwargs ) for p in path ] return compilers[0].concat(axis=0, other=compilers[1:], ignore_index=True) if isinstance(path, str): if os.path.isdir(path): path_generator = os.walk(path) else: storage_options = kwargs.get("storage_options") if storage_options is not None: fs, fs_path = url_to_fs(path, **storage_options) else: fs, fs_path = url_to_fs(path) path_generator = fs.walk(fs_path) partitioned_columns = set() # We do a tree walk of the path directory because partitioned # parquet directories have a unique column at each directory level. # Thus, we can use os.walk(), which does a dfs search, to walk # through the different columns that the data is partitioned on for _, dir_names, files in path_generator: if dir_names: partitioned_columns.add(dir_names[0].split("=")[0]) if files: # Metadata files, git files, .DSStore # TODO: fix conditional for column partitioning, see issue #4637 if len(files[0]) > 0 and files[0][0] == ".": continue break partitioned_columns = list(partitioned_columns) if len(partitioned_columns): return cls.single_worker_read( path, engine=engine, columns=columns, use_nullable_dtypes=use_nullable_dtypes, dtype_backend=dtype_backend, reason="Mixed partitioning columns in Parquet", **kwargs, ) dataset = cls.get_dataset(path, engine, kwargs.get("storage_options") or {}) index_columns = ( dataset.pandas_metadata.get("index_columns", []) if dataset.pandas_metadata else [] ) # If we have columns as None, then we default to reading in all the columns column_names = columns if columns else dataset.columns columns = [ c for c in column_names if c not in index_columns and not cls.index_regex.match(c) ] return cls.build_query_compiler( dataset, columns, index_columns, dtype_backend=dtype_backend, **kwargs ) @classmethod def write(cls, qc, **kwargs): """ Write a ``DataFrame`` to the binary parquet format. Parameters ---------- qc : BaseQueryCompiler The query compiler of the Modin dataframe that we want to run `to_parquet` on. **kwargs : dict Parameters for `pandas.to_parquet(**kwargs)`. """ kwargs["path"] = stringify_path(kwargs["path"]) output_path = kwargs["path"] if not isinstance(output_path, str): return cls.base_io.to_parquet(qc, **kwargs) client_kwargs = (kwargs.get("storage_options") or {}).get("client_kwargs", {}) fs, url = fsspec.core.url_to_fs(output_path, client_kwargs=client_kwargs) fs.mkdirs(url, exist_ok=True) def func(df, **kw): # pragma: no cover """ Dump a chunk of rows as parquet, then save them to target maintaining order. Parameters ---------- df : pandas.DataFrame A chunk of rows to write to a parquet file. **kw : dict Arguments to pass to ``pandas.to_parquet(**kwargs)`` plus an extra argument `partition_idx` serving as chunk index to maintain rows order. """ compression = kwargs["compression"] partition_idx = kw["partition_idx"] kwargs["path"] = ( f"{output_path}/part-{partition_idx:04d}.{compression}.parquet" ) df.to_parquet(**kwargs) return pandas.DataFrame() # Ensure that the metadata is synchronized qc._modin_frame._propagate_index_objs(axis=None) result = qc._modin_frame._partition_mgr_cls.map_axis_partitions( axis=1, partitions=qc._modin_frame._partitions, map_func=func, keep_partitioning=True, lengths=None, enumerate_partitions=True, ) # pending completion cls.materialize([part.list_of_blocks[0] for row in result for part in row])
ParquetDispatcher
python
pandas-dev__pandas
pandas/tests/scalar/timestamp/test_constructors.py
{ "start": 17057, "end": 41029 }
class ____: def test_disallow_dt64_with_weird_unit(self): # GH#25611 dt64 = np.datetime64(1, "500m") msg = "np.datetime64 objects with units containing a multiplier" with pytest.raises(ValueError, match=msg): Timestamp(dt64) def test_weekday_but_no_day_raises(self): # GH#52659 msg = "Parsing datetimes with weekday but no day information is not supported" with pytest.raises(ValueError, match=msg): Timestamp("2023 Sept Thu") def test_construct_from_string_invalid_raises(self): # dateutil (weirdly) parses "200622-12-31" as # datetime(2022, 6, 20, 12, 0, tzinfo=tzoffset(None, -111600) # which besides being mis-parsed, is a tzoffset that will cause # str(ts) to raise ValueError. Ensure we raise in the constructor # instead. # see test_to_datetime_malformed_raise for analogous to_datetime test with pytest.raises(ValueError, match="gives an invalid tzoffset"): Timestamp("200622-12-31") def test_constructor_from_iso8601_str_with_offset_reso(self): # GH#49737 ts = Timestamp("2016-01-01 04:05:06-01:00") assert ts.unit == "us" ts = Timestamp("2016-01-01 04:05:06.000-01:00") assert ts.unit == "us" ts = Timestamp("2016-01-01 04:05:06.000000-01:00") assert ts.unit == "us" ts = Timestamp("2016-01-01 04:05:06.000000001-01:00") assert ts.unit == "ns" def test_constructor_from_date_second_reso(self): # GH#49034 constructing from a pydate object gets lowest supported # reso, i.e. seconds obj = date(2012, 9, 1) ts = Timestamp(obj) assert ts.unit == "s" def test_constructor_datetime64_with_tz(self): # GH#42288, GH#24559 dt = np.datetime64("1970-01-01 05:00:00") tzstr = "UTC+05:00" # pre-2.0 this interpreted dt as a UTC time. in 2.0 this is treated # as a wall-time, consistent with DatetimeIndex behavior ts = Timestamp(dt, tz=tzstr) alt = Timestamp(dt).tz_localize(tzstr) assert ts == alt assert ts.hour == 5 def test_constructor(self): base_str = "2014-07-01 09:00" base_dt = datetime(2014, 7, 1, 9) base_expected = 1_404_205_200_000_000_000 # confirm base representation is correct assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected tests = [ (base_str, base_dt, base_expected), ( "2014-07-01 10:00", datetime(2014, 7, 1, 10), base_expected + 3600 * 1_000_000_000, ), ( "2014-07-01 09:00:00.000008000", datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000, ), ( "2014-07-01 09:00:00.000000005", Timestamp("2014-07-01 09:00:00.000000005"), base_expected + 5, ), ] timezones = [ (None, 0), ("UTC", 0), (timezone.utc, 0), ("Asia/Tokyo", 9), ("US/Eastern", -4), ("dateutil/US/Pacific", -7), (timezone(timedelta(hours=-3)), -3), (dateutil.tz.tzoffset(None, 18000), 5), ] for date_str, date_obj, expected in tests: for result in [Timestamp(date_str), Timestamp(date_obj)]: result = result.as_unit("ns") # test originally written before non-nano # only with timestring assert result.as_unit("ns")._value == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.as_unit("ns")._value == expected # with timezone for tz, offset in timezones: for result in [Timestamp(date_str, tz=tz), Timestamp(date_obj, tz=tz)]: result = result.as_unit( "ns" ) # test originally written before non-nano expected_tz = expected - offset * 3600 * 1_000_000_000 assert result.as_unit("ns")._value == expected_tz # should preserve tz result = Timestamp(result) assert result.as_unit("ns")._value == expected_tz # should convert to UTC if tz is not None: result = Timestamp(result).tz_convert("UTC") else: result = Timestamp(result, tz="UTC") expected_utc = expected - offset * 3600 * 1_000_000_000 assert result.as_unit("ns")._value == expected_utc def test_constructor_with_stringoffset(self): # GH 7833 base_str = "2014-07-01 11:00:00+02:00" base_dt = datetime(2014, 7, 1, 9) base_expected = 1_404_205_200_000_000_000 # confirm base representation is correct assert calendar.timegm(base_dt.timetuple()) * 1_000_000_000 == base_expected tests = [ (base_str, base_expected), ("2014-07-01 12:00:00+02:00", base_expected + 3600 * 1_000_000_000), ("2014-07-01 11:00:00.000008000+02:00", base_expected + 8000), ("2014-07-01 11:00:00.000000005+02:00", base_expected + 5), ] timezones = [ ("UTC", 0), (timezone.utc, 0), ("Asia/Tokyo", 9), ("US/Eastern", -4), ("dateutil/US/Pacific", -7), (timezone(timedelta(hours=-3)), -3), (dateutil.tz.tzoffset(None, 18000), 5), ] for date_str, expected in tests: for result in [Timestamp(date_str)]: # only with timestring assert result.as_unit("ns")._value == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.as_unit("ns")._value == expected # with timezone for tz, offset in timezones: result = Timestamp(date_str, tz=tz) expected_tz = expected assert result.as_unit("ns")._value == expected_tz # should preserve tz result = Timestamp(result) assert result.as_unit("ns")._value == expected_tz # should convert to UTC result = Timestamp(result).tz_convert("UTC") expected_utc = expected assert result.as_unit("ns")._value == expected_utc # This should be 2013-11-01 05:00 in UTC # converted to Chicago tz result = Timestamp("2013-11-01 00:00:00-0500", tz="America/Chicago") assert result._value == Timestamp("2013-11-01 05:00")._value expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" assert repr(result) == expected assert result == eval(repr(result)) # This should be 2013-11-01 05:00 in UTC # converted to Tokyo tz (+09:00) result = Timestamp("2013-11-01 00:00:00-0500", tz="Asia/Tokyo") assert result._value == Timestamp("2013-11-01 05:00")._value expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')" assert repr(result) == expected assert result == eval(repr(result)) # GH11708 # This should be 2015-11-18 10:00 in UTC # converted to Asia/Katmandu result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu") assert result._value == Timestamp("2015-11-18 10:00")._value expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')" assert repr(result) == expected assert result == eval(repr(result)) # This should be 2015-11-18 10:00 in UTC # converted to Asia/Kolkata result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata") assert result._value == Timestamp("2015-11-18 10:00")._value expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')" assert repr(result) == expected assert result == eval(repr(result)) def test_constructor_invalid(self): msg = "Cannot convert input" with pytest.raises(TypeError, match=msg): Timestamp(slice(2)) msg = "Cannot convert Period" with pytest.raises(ValueError, match=msg): Timestamp(Period("1000-01-01")) def test_constructor_invalid_tz(self): # GH#17690 msg = ( "Argument 'tzinfo' has incorrect type " r"\(expected datetime.tzinfo, got str\)" ) with pytest.raises(TypeError, match=msg): Timestamp("2017-10-22", tzinfo="US/Eastern") msg = "at most one of" with pytest.raises(ValueError, match=msg): Timestamp("2017-10-22", tzinfo=timezone.utc, tz="UTC") msg = "Cannot pass a date attribute keyword argument when passing a date string" with pytest.raises(ValueError, match=msg): # GH#5168 # case where user tries to pass tz as an arg, not kwarg, gets # interpreted as `year` Timestamp("2012-01-01", "US/Pacific") def test_constructor_tz_or_tzinfo(self): # GH#17943, GH#17690, GH#5168 stamps = [ Timestamp(year=2017, month=10, day=22, tz="UTC"), Timestamp(year=2017, month=10, day=22, tzinfo=timezone.utc), Timestamp(year=2017, month=10, day=22, tz=timezone.utc), Timestamp(datetime(2017, 10, 22), tzinfo=timezone.utc), Timestamp(datetime(2017, 10, 22), tz="UTC"), Timestamp(datetime(2017, 10, 22), tz=timezone.utc), ] assert all(ts == stamps[0] for ts in stamps) @pytest.mark.parametrize( "result", [ Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1), Timestamp( year=2000, month=1, day=2, hour=3, minute=4, second=5, microsecond=6, nanosecond=1, ), Timestamp( year=2000, month=1, day=2, hour=3, minute=4, second=5, microsecond=6, nanosecond=1, tz="UTC", ), Timestamp(2000, 1, 2, 3, 4, 5, 6, None, nanosecond=1), Timestamp(2000, 1, 2, 3, 4, 5, 6, tz=timezone.utc, nanosecond=1), ], ) def test_constructor_nanosecond(self, result): # GH 18898 # As of 2.0 (GH 49416), nanosecond should not be accepted positionally expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz) expected = expected + Timedelta(nanoseconds=1) assert result == expected @pytest.mark.parametrize("z", ["Z0", "Z00"]) def test_constructor_invalid_Z0_isostring(self, z): # GH 8910 msg = f"Unknown datetime string format, unable to parse: 2014-11-02 01:00{z}" with pytest.raises(ValueError, match=msg): Timestamp(f"2014-11-02 01:00{z}") def test_out_of_bounds_integer_value(self): # GH#26651 check that we raise OutOfBoundsDatetime, not OverflowError msg = str(Timestamp.max._value * 2) with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp(Timestamp.max._value * 2) msg = str(Timestamp.min._value * 2) with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp(Timestamp.min._value * 2) def test_out_of_bounds_value(self): one_us = np.timedelta64(1).astype("timedelta64[us]") # By definition we can't go out of bounds in [ns], so we # convert the datetime64s to [us] so we can go out of bounds min_ts_us = np.datetime64(Timestamp.min).astype("M8[us]") + one_us max_ts_us = np.datetime64(Timestamp.max).astype("M8[us]") # No error for the min/max datetimes Timestamp(min_ts_us) Timestamp(max_ts_us) # We used to raise on these before supporting non-nano us_val = NpyDatetimeUnit.NPY_FR_us.value assert Timestamp(min_ts_us - one_us)._creso == us_val assert Timestamp(max_ts_us + one_us)._creso == us_val # https://github.com/numpy/numpy/issues/22346 for why # we can't use the same construction as above with minute resolution # too_low, too_high are the _just_ outside the range of M8[s] too_low = np.datetime64("-292277022657-01-27T08:29", "m") too_high = np.datetime64("292277026596-12-04T15:31", "m") msg = "Out of bounds" # One us less than the minimum is an error with pytest.raises(ValueError, match=msg): Timestamp(too_low) # One us more than the maximum is an error with pytest.raises(ValueError, match=msg): Timestamp(too_high) def test_out_of_bounds_string(self): msg = "Cannot cast .* to unit='ns' without overflow" with pytest.raises(ValueError, match=msg): Timestamp("1676-01-01").as_unit("ns") with pytest.raises(ValueError, match=msg): Timestamp("2263-01-01").as_unit("ns") ts = Timestamp("2263-01-01") assert ts.unit == "us" ts = Timestamp("1676-01-01") assert ts.unit == "us" def test_barely_out_of_bounds(self): # GH#19529 # GH#19382 close enough to bounds that dropping nanos would result # in an in-bounds datetime msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16" with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp("2262-04-11 23:47:16.854775808") def test_bounds_with_different_units(self): out_of_bounds_dates = ("1677-09-21", "2262-04-12") time_units = ("D", "h", "m", "s", "ms", "us") for date_string in out_of_bounds_dates: for unit in time_units: dt64 = np.datetime64(date_string, unit) ts = Timestamp(dt64) if unit in ["s", "ms", "us"]: # We can preserve the input unit assert ts._value == dt64.view("i8") else: # we chose the closest unit that we _do_ support assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value # With more extreme cases, we can't even fit inside second resolution info = np.iinfo(np.int64) msg = "Out of bounds second timestamp:" for value in [info.min + 1, info.max]: for unit in ["D", "h", "m"]: dt64 = np.datetime64(value, unit) with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp(dt64) in_bounds_dates = ("1677-09-23", "2262-04-11") for date_string in in_bounds_dates: for unit in time_units: dt64 = np.datetime64(date_string, unit) Timestamp(dt64) @pytest.mark.parametrize("arg", ["001-01-01", "0001-01-01"]) def test_out_of_bounds_string_consistency(self, arg): # GH 15829 msg = "Cannot cast 0001-01-01 00:00:00 to unit='ns' without overflow" with pytest.raises(OutOfBoundsDatetime, match=msg): Timestamp(arg).as_unit("ns") ts = Timestamp(arg) assert ts.unit == "us" assert ts.year == ts.month == ts.day == 1 def test_min_valid(self): # Ensure that Timestamp.min is a valid Timestamp Timestamp(Timestamp.min) def test_max_valid(self): # Ensure that Timestamp.max is a valid Timestamp Timestamp(Timestamp.max) @pytest.mark.parametrize("offset", ["+0300", "+0200"]) def test_construct_timestamp_near_dst(self, offset): # GH 20854 expected = Timestamp(f"2016-10-30 03:00:00{offset}", tz="Europe/Helsinki") result = Timestamp(expected).tz_convert("Europe/Helsinki") assert result == expected @pytest.mark.parametrize( "arg", ["2013/01/01 00:00:00+09:00", "2013-01-01 00:00:00+09:00"] ) def test_construct_with_different_string_format(self, arg): # GH 12064 result = Timestamp(arg) expected = Timestamp(datetime(2013, 1, 1), tz=timezone(timedelta(hours=9))) assert result == expected @pytest.mark.parametrize("box", [datetime, Timestamp]) def test_raise_tz_and_tzinfo_in_datetime_input(self, box): # GH 23579 kwargs = {"year": 2018, "month": 1, "day": 1, "tzinfo": timezone.utc} msg = "Cannot pass a datetime or Timestamp" with pytest.raises(ValueError, match=msg): Timestamp(box(**kwargs), tz="US/Pacific") msg = "Cannot pass a datetime or Timestamp" with pytest.raises(ValueError, match=msg): Timestamp(box(**kwargs), tzinfo=zoneinfo.ZoneInfo("US/Pacific")) def test_dont_convert_dateutil_utc_to_default_utc(self): result = Timestamp(datetime(2018, 1, 1), tz=tzutc()) expected = Timestamp(datetime(2018, 1, 1)).tz_localize(tzutc()) assert result == expected def test_constructor_subclassed_datetime(self): # GH 25851 # ensure that subclassed datetime works for # Timestamp creation class SubDatetime(datetime): pass data = SubDatetime(2000, 1, 1) result = Timestamp(data) expected = Timestamp(2000, 1, 1) assert result == expected def test_timestamp_constructor_tz_utc(self): utc_stamp = Timestamp("3/11/2012 05:00", tz="utc") assert utc_stamp.tzinfo is timezone.utc assert utc_stamp.hour == 5 utc_stamp = Timestamp("3/11/2012 05:00").tz_localize("utc") assert utc_stamp.hour == 5 def test_timestamp_to_datetime_tzoffset(self): tzinfo = tzoffset(None, 7200) expected = Timestamp("3/11/2012 04:00", tz=tzinfo) result = Timestamp(expected.to_pydatetime()) assert expected == result def test_timestamp_constructor_near_dst_boundary(self): # GH#11481 & GH#15777 # Naive string timestamps were being localized incorrectly # with tz_convert_from_utc_single instead of tz_localize_to_utc for tz in ["Europe/Brussels", "Europe/Prague"]: result = Timestamp("2015-10-25 01:00", tz=tz) expected = Timestamp("2015-10-25 01:00").tz_localize(tz) assert result == expected msg = "Cannot infer dst time from 2015-10-25 02:00:00" with pytest.raises(ValueError, match=msg): Timestamp("2015-10-25 02:00", tz=tz) result = Timestamp("2017-03-26 01:00", tz="Europe/Paris") expected = Timestamp("2017-03-26 01:00").tz_localize("Europe/Paris") assert result == expected msg = "2017-03-26 02:00" with pytest.raises(ValueError, match=msg): Timestamp("2017-03-26 02:00", tz="Europe/Paris") # GH#11708 naive = Timestamp("2015-11-18 10:00:00") result = naive.tz_localize("UTC").tz_convert("Asia/Kolkata") expected = Timestamp("2015-11-18 15:30:00+0530", tz="Asia/Kolkata") assert result == expected # GH#15823 result = Timestamp("2017-03-26 00:00", tz="Europe/Paris") expected = Timestamp("2017-03-26 00:00:00+0100", tz="Europe/Paris") assert result == expected result = Timestamp("2017-03-26 01:00", tz="Europe/Paris") expected = Timestamp("2017-03-26 01:00:00+0100", tz="Europe/Paris") assert result == expected msg = "2017-03-26 02:00" with pytest.raises(ValueError, match=msg): Timestamp("2017-03-26 02:00", tz="Europe/Paris") result = Timestamp("2017-03-26 02:00:00+0100", tz="Europe/Paris") naive = Timestamp(result.as_unit("ns")._value) expected = naive.tz_localize("UTC").tz_convert("Europe/Paris") assert result == expected result = Timestamp("2017-03-26 03:00", tz="Europe/Paris") expected = Timestamp("2017-03-26 03:00:00+0200", tz="Europe/Paris") assert result == expected @pytest.mark.parametrize( "tz", [ "pytz/US/Eastern", gettz("US/Eastern"), "US/Eastern", "dateutil/US/Eastern", ], ) def test_timestamp_constructed_by_date_and_tz(self, tz): # GH#2993, Timestamp cannot be constructed by datetime.date # and tz correctly if isinstance(tz, str) and tz.startswith("pytz/"): pytz = pytest.importorskip("pytz") tz = pytz.timezone(tz.removeprefix("pytz/")) result = Timestamp(date(2012, 3, 11), tz=tz) expected = Timestamp("3/11/2012", tz=tz) assert result.hour == expected.hour assert result == expected def test_explicit_tz_none(self): # GH#48688 msg = "Passed data is timezone-aware, incompatible with 'tz=None'" with pytest.raises(ValueError, match=msg): Timestamp(datetime(2022, 1, 1, tzinfo=timezone.utc), tz=None) with pytest.raises(ValueError, match=msg): Timestamp("2022-01-01 00:00:00", tzinfo=timezone.utc, tz=None) with pytest.raises(ValueError, match=msg): Timestamp("2022-01-01 00:00:00-0400", tz=None) def test_constructor_ambiguous_dst(): # GH 24329 # Make sure that calling Timestamp constructor # on Timestamp created from ambiguous time # doesn't change Timestamp.value ts = Timestamp(1382835600000000000, tz="dateutil/Europe/London") expected = ts._value result = Timestamp(ts)._value assert result == expected @pytest.mark.parametrize("epoch", [1552211999999999872, 1552211999999999999]) def test_constructor_before_dst_switch(epoch): # GH 31043 # Make sure that calling Timestamp constructor # on time just before DST switch doesn't lead to # nonexistent time or value change ts = Timestamp(epoch, tz="dateutil/America/Los_Angeles") result = ts.tz.dst(ts) expected = timedelta(seconds=0) assert Timestamp(ts)._value == epoch assert result == expected def test_timestamp_constructor_identity(): # Test for #30543 expected = Timestamp("2017-01-01T12") result = Timestamp(expected) assert result is expected @pytest.mark.parametrize("nano", [-1, 1000]) def test_timestamp_nano_range(nano): # GH 48255 with pytest.raises(ValueError, match="nanosecond must be in 0..999"): Timestamp(year=2022, month=1, day=1, nanosecond=nano) def test_non_nano_value(): # https://github.com/pandas-dev/pandas/issues/49076 msg = "The 'unit' keyword is only used when" with tm.assert_produces_warning(UserWarning, match=msg): result = Timestamp("1800-01-01", unit="s").value # `.value` shows nanoseconds, even though unit is 's' assert result == -5364662400000000000 # out-of-nanoseconds-bounds `.value` raises informative message msg = ( r"Cannot convert Timestamp to nanoseconds without overflow. " r"Use `.asm8.view\('i8'\)` to cast represent Timestamp in its " r"own unit \(here, us\).$" ) ts = Timestamp("0300-01-01") assert ts.unit == "us" with pytest.raises(OverflowError, match=msg): ts.value # check that the suggested workaround actually works result = ts.asm8.view("i8") assert result == -52_700_112_000 * 10**6 @pytest.mark.parametrize("na_value", [None, np.nan, np.datetime64("NaT"), NaT, NA]) def test_timestamp_constructor_na_value(na_value): # GH45481 result = Timestamp(na_value) expected = NaT assert result is expected
TestTimestampConstructors
python
pyparsing__pyparsing
tests/test_unit.py
{ "start": 391811, "end": 392207 }
class ____(TestCase): def runTest(self): Test02_WithoutPackrat.suite_context = Test02_WithoutPackrat.save_suite_context Test02_WithoutPackrat.suite_context.restore() ParserElement.enable_packrat(cache_size_limit=16) # SAVE A NEW SUITE CONTEXT Test02_WithoutPackrat.suite_context = ppt.reset_pyparsing_context().save()
Test05_EnableBoundedPackratParsing
python
python-pillow__Pillow
Tests/test_core_resources.py
{ "start": 5111, "end": 5995 }
class ____: def teardown_method(self) -> None: # Restore default values Image.core.set_alignment(1) Image.core.set_block_size(1024 * 1024) Image.core.set_blocks_max(0) Image.core.clear_cache() def test_units(self) -> None: Image._apply_env_variables({"PILLOW_BLOCKS_MAX": "2K"}) assert Image.core.get_blocks_max() == 2 * 1024 Image._apply_env_variables({"PILLOW_BLOCK_SIZE": "2m"}) assert Image.core.get_block_size() == 2 * 1024 * 1024 @pytest.mark.parametrize( "var", ( {"PILLOW_ALIGNMENT": "15"}, {"PILLOW_BLOCK_SIZE": "1024"}, {"PILLOW_BLOCKS_MAX": "wat"}, ), ) def test_warnings(self, var: dict[str, str]) -> None: with pytest.warns(UserWarning, match=list(var)[0]): Image._apply_env_variables(var)
TestEnvVars
python
pola-rs__polars
py-polars/tests/unit/io/test_scan_row_deletion.py
{ "start": 629, "end": 13023 }
class ____: # noqa: D101 def __init__(self, *, tmp_path: Path) -> None: self.tmp_path = tmp_path self.i = 0 def __call__(self, positions: pl.Series) -> str: path = self.tmp_path / f"{self.i}" ( positions.alias("pos") .to_frame() .select(pl.lit("").alias("file_path"), "pos") .write_parquet(path) ) self.i += 1 return str(path) # 5 files x 5 rows each. Contains `physical_index` [0, 1, .., 24]. def _create_data_files(tmp_path: Path) -> None: tmp_path.mkdir(exist_ok=True, parents=True) df = pl.select(physical_index=pl.int_range(25, dtype=pl.UInt32)) parts = [] for i in [0, 5, 10, 15, 20]: O = "0" if i < 10 else "" # noqa: E741 path = tmp_path / f"offset={O}{i}/data.parquet" path.parent.mkdir(exist_ok=True, parents=True) part_df = df.slice(i, 5) part_df.write_parquet(path) parts.append(part_df.with_columns(offset=pl.lit(i, dtype=pl.Int64))) assert_frame_equal(pl.scan_parquet(tmp_path).collect(), pl.concat(parts)) @pytest.mark.parametrize("row_index_offset", [0, 27, 38, 73]) def test_scan_row_deletions( data_files_path: Path, write_position_deletes: WritePositionDeletes, row_index_offset: int, ) -> None: deletion_files = ( "iceberg-position-delete", { 0: [ write_position_deletes(pl.Series([1, 2])), ], 1: [ write_position_deletes(pl.Series([0, 1, 2])), ], 4: [ write_position_deletes(pl.Series([2, 3])), ], }, ) def apply_row_index_offset(values: list[int]) -> list[int]: return [x + row_index_offset for x in values] q = pl.scan_parquet( data_files_path, _deletion_files=deletion_files, # type: ignore[arg-type] hive_partitioning=False, ).with_row_index(offset=row_index_offset) assert q.select(pl.len()).collect().item() == 18 assert_frame_equal( q.collect(), pl.DataFrame( { "index": apply_row_index_offset([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ]), "physical_index": [ 0, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24 ], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ) ) # fmt: skip # head() assert_frame_equal( q.head(3).collect(), pl.DataFrame( { "index": apply_row_index_offset([0, 1, 2]), "physical_index": [0, 3, 4], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) assert_frame_equal( q.head(10).collect(), pl.DataFrame( { "index": apply_row_index_offset([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), "physical_index": [0, 3, 4, 8, 9, 10, 11, 12, 13, 14], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) # tail() assert_frame_equal( q.tail(3).collect(), pl.DataFrame( { "index": apply_row_index_offset([15, 16, 17]), "physical_index": [20, 21, 24], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) assert_frame_equal( q.tail(10).collect(), pl.DataFrame( { "index": apply_row_index_offset( [8, 9, 10, 11, 12, 13, 14, 15, 16, 17] ), "physical_index": [ 13, 14, 15, 16, 17, 18, 19, 20, 21, 24 ], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) # fmt: skip # slice(positive_offset) assert_frame_equal( q.slice(2, 10).collect(), pl.DataFrame( { "index": apply_row_index_offset([2, 3, 4, 5, 6, 7, 8, 9, 10, 11]), "physical_index": [4, 8, 9, 10, 11, 12, 13, 14, 15, 16], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) assert_frame_equal( q.slice(5, 10).collect(), pl.DataFrame( { "index": apply_row_index_offset([5, 6, 7, 8, 9, 10, 11, 12, 13, 14]), "physical_index": [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) assert_frame_equal( q.slice(10, 10).collect(), pl.DataFrame( { "index": apply_row_index_offset([10, 11, 12, 13, 14, 15, 16, 17]), "physical_index": [15, 16, 17, 18, 19, 20, 21, 24], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) # slice(negative_offset) assert_frame_equal( q.slice(-3, 2).collect(), pl.DataFrame( { "index": apply_row_index_offset([15, 16]), "physical_index": [20, 21], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) assert_frame_equal( q.slice(-23, 10).collect(), pl.DataFrame( { "index": apply_row_index_offset([0, 1, 2, 3, 4]), "physical_index": [0, 3, 4, 8, 9], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ), ) # filter: skip_files q = pl.scan_parquet( data_files_path, _deletion_files=deletion_files, # type: ignore[arg-type] ).with_row_index(offset=row_index_offset) assert_frame_equal( q.filter(pl.col("offset").is_in([10, 20])).collect(), pl.DataFrame( { "index": apply_row_index_offset([5, 6, 7, 8, 9, 15, 16, 17]), "physical_index": [10, 11, 12, 13, 14, 20, 21, 24], "offset": [10, 10, 10, 10, 10, 20, 20, 20], }, schema={ "index": pl.get_index_type(), "physical_index": pl.UInt32, "offset": pl.Int64, }, ), ) @pytest.mark.slow @pytest.mark.write_disk @pytest.mark.parametrize("ideal_morsel_size", [999, 50, 33]) @pytest.mark.parametrize("force_empty_capabilities", [True, False]) def test_scan_row_deletion_single_large( tmp_path: Path, write_position_deletes: WritePositionDeletes, ideal_morsel_size: int, force_empty_capabilities: bool, ) -> None: path = tmp_path / "data.parquet" pl.DataFrame({"physical_index": range(100)}).write_parquet(path) positions = pl.Series([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 64, 65, 66, 67, 68, 69, 70, 71, 79, 80, 81, 82, 83, 84, 90, 91, 92, 93, 97, 98 ]) # fmt: skip deletion_positions_path = write_position_deletes(positions) script_args: list[str] = [ str(ideal_morsel_size), "1" if force_empty_capabilities else "0", str(path), deletion_positions_path, ] # Use a process to ensure ideal morsel size is set correctly. out = subprocess.check_output( [ sys.executable, "-c", """\ import os import sys ( _, ideal_morsel_size, force_empty_capabilities, data_file_path, deletion_positions_path, ) = sys.argv os.environ["POLARS_VERBOSE"] = "0" os.environ["POLARS_MAX_THREADS"] = "1" os.environ["POLARS_IDEAL_MORSEL_SIZE"] = ideal_morsel_size os.environ["POLARS_FORCE_EMPTY_READER_CAPABILITIES"] = force_empty_capabilities import polars as pl from polars.testing import assert_frame_equal full_expected_physical = [ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 55, 56, 57, 58, 59, 60, 61, 62, 63, 72, 73, 74, 75, 76, 77, 78, 85, 86, 87, 88, 89, 94, 95, 96, 99 ] # fmt: skip deletion_files = ( "iceberg-position-delete", {0: [deletion_positions_path]}, ) q = pl.scan_parquet(data_file_path, _deletion_files=deletion_files).with_row_index() assert_frame_equal( q.collect(), pl.DataFrame({"physical_index": full_expected_physical}).with_row_index(), ) assert_frame_equal( q.tail(999).collect(), pl.DataFrame({"physical_index": full_expected_physical}).with_row_index(), ) # Note: The negative slice is important here. Otherwise row_index does not get # lowered into the post-apply pipeline. for negative_offset in range(1, 49): assert_frame_equal( q.tail(negative_offset).collect(), pl.DataFrame( {"physical_index": full_expected_physical[-negative_offset:]} ).with_row_index(offset=49 - negative_offset), ) assert_frame_equal( q.slice(20).collect(), pl.DataFrame({"physical_index": full_expected_physical[20:]}).with_row_index( offset=20 ), ) print("OK", end="") """, *script_args, ], stderr=subprocess.STDOUT, ) assert out == b"OK" @pytest.mark.write_disk def test_scan_row_deletion_skips_file_with_all_rows_deleted( tmp_path: Path, write_position_deletes: WritePositionDeletes, ) -> None: # Create our own copy because we mutate one of the data files data_files_path = tmp_path / "data-files" _create_data_files(data_files_path) # Corrupt a parquet file def remove_data(path: Path) -> None: v = path.read_bytes() metadata_and_footer_len = 8 + int.from_bytes(v[-8:][:4], "little") path.write_bytes(b"\x00" * (len(v) - metadata_and_footer_len)) path.write_bytes(v[-metadata_and_footer_len:]) remove_data(data_files_path / "offset=05/data.parquet") q = pl.scan_parquet( data_files_path / "offset=05/data.parquet", hive_partitioning=False ) # Baseline: The metadata is readable but the row groups are not assert q.collect_schema() == {"physical_index": pl.UInt32} assert q.select(pl.len()).collect().item() == 5 with pytest.raises(pl.exceptions.ComputeError, match="Invalid thrift"): q.collect() q = pl.scan_parquet(data_files_path, hive_partitioning=False) with pytest.raises(pl.exceptions.ComputeError, match="Invalid thrift"): q.collect() q = pl.scan_parquet( data_files_path, _deletion_files=( "iceberg-position-delete", { 1: [ write_position_deletes(pl.Series([0, 1, 2])), write_position_deletes(pl.Series([3, 4])), ] }, ), hive_partitioning=False, ) expect = pl.DataFrame( { "index": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, ], "physical_index": [ 0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, ], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ) # fmt: skip assert_frame_equal(q.collect(), expect.drop("index")) assert_frame_equal(q.with_row_index().collect(), expect) expect = pl.DataFrame( { "index": [ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, ], "physical_index": [ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, ], }, schema={"index": pl.get_index_type(), "physical_index": pl.UInt32}, ) # fmt: skip assert_frame_equal(q.slice(10).collect(), expect.drop("index")) assert_frame_equal(q.with_row_index().slice(10).collect(), expect)
WritePositionDeletes
python
pytorch__pytorch
torchgen/api/types/signatures.py
{ "start": 6972, "end": 9065 }
class ____: # The schema this signature is derived from func: FunctionSchema # Allows you to prepend an arbitrary prefix to the signature name. # This is useful for parts of the codegen that generate wrappers around kernels, # and need to avoid naming collisions. prefix: str = "" symint: bool = True def arguments(self) -> list[Binding]: return dispatcher.arguments(self.func, symint=self.symint) def name(self) -> str: return self.prefix + dispatcher.name(self.func) def decl(self, name: str | None = None) -> str: args_str = ", ".join(a.decl() for a in self.arguments()) if name is None: name = self.name() return f"{self.returns_type().cpp_type()} {name}({args_str})" def defn( self, name: str | None = None, *, is_redispatching_fn: bool = False ) -> str: args = [a.defn() for a in self.arguments()] if is_redispatching_fn: args = ["c10::DispatchKeySet dispatchKeySet"] + args args_str = ", ".join(args) if name is None: name = self.name() return f"{self.returns_type().cpp_type()} {name}({args_str})" def exprs(self) -> list[Expr]: return [Expr(a.name, a.nctype) for a in self.arguments()] def returns_type(self) -> CType: return dispatcher.returns_type(self.func.returns, symint=self.symint) def ptr_type(self) -> str: dispatcher_args_types_str = ", ".join(a.type for a in self.arguments()) return f"{self.returns_type().cpp_type()} (*)({dispatcher_args_types_str})" # Return the C++ function type, e.g., something like int(bool) def type(self) -> str: dispatcher_args_types_str = ", ".join(a.type for a in self.arguments()) return f"{self.returns_type().cpp_type()} ({dispatcher_args_types_str})" @staticmethod def from_schema( func: FunctionSchema, *, prefix: str = "", symint: bool = True ) -> DispatcherSignature: return DispatcherSignature(func, prefix, symint) @dataclass(frozen=True)
DispatcherSignature
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_text_editor_code_execution_view_result_block_param.py
{ "start": 275, "end": 603 }
class ____(TypedDict, total=False): content: Required[str] file_type: Required[Literal["text", "image", "pdf"]] type: Required[Literal["text_editor_code_execution_view_result"]] num_lines: Optional[int] start_line: Optional[int] total_lines: Optional[int]
BetaTextEditorCodeExecutionViewResultBlockParam
python
huggingface__transformers
tests/models/x_clip/test_modeling_x_clip.py
{ "start": 11536, "end": 14756 }
class ____: def __init__( self, parent, batch_size=8, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return XCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = XCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
XCLIPTextModelTester
python
django__django
tests/constraints/models.py
{ "start": 86, "end": 982 }
class ____(models.Model): price = models.IntegerField(null=True) discounted_price = models.IntegerField(null=True) unit = models.CharField(max_length=15, null=True) class Meta: required_db_features = { "supports_table_check_constraints", } constraints = [ models.CheckConstraint( condition=models.Q(price__gt=models.F("discounted_price")), name="price_gt_discounted_price", ), models.CheckConstraint( condition=models.Q(price__gt=0), name="%(app_label)s_%(class)s_price_gt_0", ), models.CheckConstraint( condition=models.Q( models.Q(unit__isnull=True) | models.Q(unit__in=["μg/mL", "ng/mL"]) ), name="unicode_unit_list", ), ]
Product
python
Pylons__pyramid
docs/tutorials/wiki/src/tests/tests/test_views.py
{ "start": 2219, "end": 3012 }
class ____: def _callFUT(self, context, request): from tutorial.views.default import edit_page return edit_page(context, request) def test_it_notsubmitted(self): context = testing.DummyResource() request = testing.DummyRequest() info = self._callFUT(context, request) assert info['page'] == context assert info['save_url'] == request.resource_url(context, 'edit_page') def test_it_submitted(self): context = testing.DummyResource() request = testing.DummyRequest({ 'form.submitted': True, 'body': 'Hello yo!', }) response = self._callFUT(context, request) assert response.location == 'http://example.com/' assert context.data == 'Hello yo!'
Test_edit_page
python
numba__llvmlite
llvmlite/binding/ffi.py
{ "start": 3622, "end": 4029 }
class ____: def __init__(self, context): self._context = context def __enter__(self): return self._context.__enter__() def __exit__(self, exc_type, exc_value, traceback): try: return self._context.__exit__(exc_type, exc_value, traceback) except PermissionError: pass # Resource dylibs can't be deleted on Windows.
_suppress_cleanup_errors
python
sympy__sympy
sympy/polys/multivariate_resultants.py
{ "start": 871, "end": 8257 }
class ____(): """ A class for retrieving the Dixon's resultant of a multivariate system. Examples ======== >>> from sympy import symbols >>> from sympy.polys.multivariate_resultants import DixonResultant >>> x, y = symbols('x, y') >>> p = x + y >>> q = x ** 2 + y ** 3 >>> h = x ** 2 + y >>> dixon = DixonResultant(variables=[x, y], polynomials=[p, q, h]) >>> poly = dixon.get_dixon_polynomial() >>> matrix = dixon.get_dixon_matrix(polynomial=poly) >>> matrix Matrix([ [ 0, 0, -1, 0, -1], [ 0, -1, 0, -1, 0], [-1, 0, 1, 0, 0], [ 0, -1, 0, 0, 1], [-1, 0, 0, 1, 0]]) >>> matrix.det() 0 See Also ======== Notebook in examples: sympy/example/notebooks. References ========== .. [1] [Kapur1994]_ .. [2] [Palancz08]_ """ def __init__(self, polynomials, variables): """ A class that takes two lists, a list of polynomials and list of variables. Returns the Dixon matrix of the multivariate system. Parameters ---------- polynomials : list of polynomials A list of m n-degree polynomials variables: list A list of all n variables """ self.polynomials = polynomials self.variables = variables self.n = len(self.variables) self.m = len(self.polynomials) a = IndexedBase("alpha") # A list of n alpha variables (the replacing variables) self.dummy_variables = [a[i] for i in range(self.n)] # A list of the d_max of each variable. self._max_degrees = [max(degree_list(poly)[i] for poly in self.polynomials) for i in range(self.n)] @property def max_degrees(self): sympy_deprecation_warning( """ The max_degrees property of DixonResultant is deprecated. """, deprecated_since_version="1.5", active_deprecations_target="deprecated-dixonresultant-properties", ) return self._max_degrees def get_dixon_polynomial(self): r""" Returns ======= dixon_polynomial: polynomial Dixon's polynomial is calculated as: delta = Delta(A) / ((x_1 - a_1) ... (x_n - a_n)) where, A = |p_1(x_1,... x_n), ..., p_n(x_1,... x_n)| |p_1(a_1,... x_n), ..., p_n(a_1,... x_n)| |... , ..., ...| |p_1(a_1,... a_n), ..., p_n(a_1,... a_n)| """ if self.m != (self.n + 1): raise ValueError('Method invalid for given combination.') # First row rows = [self.polynomials] temp = list(self.variables) for idx in range(self.n): temp[idx] = self.dummy_variables[idx] substitution = dict(zip(self.variables, temp)) rows.append([f.subs(substitution) for f in self.polynomials]) A = Matrix(rows) terms = zip(self.variables, self.dummy_variables) product_of_differences = Mul(*[a - b for a, b in terms]) dixon_polynomial = (A.det() / product_of_differences).factor() return poly_from_expr(dixon_polynomial, self.dummy_variables)[0] def get_upper_degree(self): sympy_deprecation_warning( """ The get_upper_degree() method of DixonResultant is deprecated. Use get_max_degrees() instead. """, deprecated_since_version="1.5", active_deprecations_target="deprecated-dixonresultant-properties" ) list_of_products = [self.variables[i] ** self._max_degrees[i] for i in range(self.n)] product = prod(list_of_products) product = Poly(product).monoms() return monomial_deg(*product) def get_max_degrees(self, polynomial): r""" Returns a list of the maximum degree of each variable appearing in the coefficients of the Dixon polynomial. The coefficients are viewed as polys in $x_1, x_2, \dots, x_n$. """ deg_lists = [degree_list(Poly(poly, self.variables)) for poly in polynomial.coeffs()] max_degrees = [max(degs) for degs in zip(*deg_lists)] return max_degrees def get_dixon_matrix(self, polynomial): r""" Construct the Dixon matrix from the coefficients of polynomial \alpha. Each coefficient is viewed as a polynomial of x_1, ..., x_n. """ max_degrees = self.get_max_degrees(polynomial) # list of column headers of the Dixon matrix. monomials = itermonomials(self.variables, max_degrees) monomials = sorted(monomials, reverse=True, key=monomial_key('lex', self.variables)) dixon_matrix = Matrix([[Poly(c, *self.variables).coeff_monomial(m) for m in monomials] for c in polynomial.coeffs()]) # remove columns if needed if dixon_matrix.shape[0] != dixon_matrix.shape[1]: keep = [column for column in range(dixon_matrix.shape[-1]) if any(element != 0 for element in dixon_matrix[:, column])] dixon_matrix = dixon_matrix[:, keep] return dixon_matrix def KSY_precondition(self, matrix): """ Test for the validity of the Kapur-Saxena-Yang precondition. The precondition requires that the column corresponding to the monomial 1 = x_1 ^ 0 * x_2 ^ 0 * ... * x_n ^ 0 is not a linear combination of the remaining ones. In SymPy notation this is the last column. For the precondition to hold the last non-zero row of the rref matrix should be of the form [0, 0, ..., 1]. """ if matrix.is_zero_matrix: return False m, n = matrix.shape # simplify the matrix and keep only its non-zero rows matrix = simplify(matrix.rref()[0]) rows = [i for i in range(m) if any(matrix[i, j] != 0 for j in range(n))] matrix = matrix[rows,:] condition = Matrix([[0]*(n-1) + [1]]) if matrix[-1,:] == condition: return True else: return False def delete_zero_rows_and_columns(self, matrix): """Remove the zero rows and columns of the matrix.""" rows = [ i for i in range(matrix.rows) if not matrix.row(i).is_zero_matrix] cols = [ j for j in range(matrix.cols) if not matrix.col(j).is_zero_matrix] return matrix[rows, cols] def product_leading_entries(self, matrix): """Calculate the product of the leading entries of the matrix.""" res = 1 for row in range(matrix.rows): for el in matrix.row(row): if el != 0: res = res * el break return res def get_KSY_Dixon_resultant(self, matrix): """Calculate the Kapur-Saxena-Yang approach to the Dixon Resultant.""" matrix = self.delete_zero_rows_and_columns(matrix) _, U, _ = matrix.LUdecomposition() matrix = self.delete_zero_rows_and_columns(simplify(U)) return self.product_leading_entries(matrix)
DixonResultant
python
pytorch__pytorch
test/test_scaled_matmul_cuda.py
{ "start": 21942, "end": 100422 }
class ____(TestCase): def _test_tautological_mm(self, device: str = "cuda", x_dtype: torch.dtype = e4m3_type, y_dtype: torch.dtype = e4m3_type, out_dtype: Optional[torch.dtype] = None, size: int = 16) -> None: if device != "cpu" and torch.cuda.is_available() and not PLATFORM_SUPPORTS_FP8: raise unittest.SkipTest(f8_msg) x_fp8 = torch.rand(size, size, device=device).to(x_dtype) y_fp8 = torch.eye(size, device=device, dtype=y_dtype).t() out_fp32 = torch.mm(x_fp8.to(torch.float), y_fp8.to(torch.float)) scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) out_fp8 = scaled_mm_wrap(x_fp8, y_fp8, scale_a, scale_b, out_dtype=out_dtype) if out_dtype is not None: self.assertEqual(out_dtype, out_fp8.dtype) self.assertEqual(out_fp32, out_fp8.to(torch.float)) def test_float8_basics(self, device) -> None: if device != "cpu" and torch.cuda.is_available() and not PLATFORM_SUPPORTS_FP8: raise unittest.SkipTest(f8_msg) self._test_tautological_mm(device, e4m3_type, e4m3_type, size=16) # According to https://docs.nvidia.com/cuda/cublas/#id99 8F_E5M2 MM is unsupported # supported on ROCm but fails on CUDA ctx = self.assertRaises(ValueError) if torch.version.hip is None and device != "cpu" else contextlib.nullcontext() with ctx: self._test_tautological_mm(device, e5m2_type, e5m2_type) self._test_tautological_mm(device, e4m3_type, e5m2_type, size=32) self._test_tautological_mm(device, e5m2_type, e4m3_type, size=48) self._test_tautological_mm(device, size=64, out_dtype=torch.float16) self._test_tautological_mm(device, size=96, out_dtype=torch.float32) self._test_tautological_mm(device, size=80, out_dtype=torch.bfloat16) with self.assertRaises(AssertionError if torch.version.hip or device == "cpu" else RuntimeError): self._test_tautological_mm(device, out_dtype=e5m2_type) def test_float8_scale(self, device) -> None: if device != "cpu" and torch.cuda.is_available() and not PLATFORM_SUPPORTS_FP8: raise unittest.SkipTest(f8_msg) size = (16, 16) x = torch.full(size, .5, device=device, dtype=e4m3_type) # hipblaslt does not yet support mixed e4m3_type input y_type = e4m3_type if torch.version.hip else e5m2_type y = torch.full(size, .5, device=device, dtype=y_type).t() scale_one = torch.tensor(1.0, device=device) scale_a = torch.tensor(1.5, device=device) scale_b = torch.tensor(0.66, device=device) out_fp8 = scaled_mm_wrap(x, y, scale_a=scale_one, scale_b=scale_one) self.assertEqual(out_fp8.to(torch.float), torch.full(size, 4., device=device)) out_fp8_s = scaled_mm_wrap(x, y, scale_a=scale_a, scale_b=scale_b) self.assertEqual(out_fp8, out_fp8_s) @unittest.skipIf(not PLATFORM_SUPPORTS_MXFP8_GROUPED_GEMM, mxfp8_grouped_mm_skip_msg) @parametrize("G", [1, 4, 16]) @parametrize("M", [2048, 2049]) @parametrize("N", [8192]) @parametrize("K", [16640]) @parametrize("format", ["mxfp8"] + (["nvfp4", "mxfp4"] if torch.version.cuda else [])) def test_mxfp8_nvfp4_scaled_grouped_mm_2d_2d(self, G, M, N, K, format): torch.manual_seed(42) if format == "mxfp4" and SM120OrLater: raise unittest.SkipTest("MXFP4 on CUDA only supported on B200/B300") total_K = K # Alias for clarity, communicating this consists of several groups along this dim input_group_end_offsets = generate_jagged_offs( G, total_K, multiple_of=32, device="cuda" ) X = torch.randn((M, total_K), dtype=torch.bfloat16, device="cuda") * 0.1 W = torch.randn((N, total_K), dtype=torch.bfloat16, device="cuda") * 0.01 xh, xq, x_blocked_scales, x_global_scales = _2d_grouped_tensor_to_blocked_scaled( X, M, G, input_group_end_offsets, format=format ) wh, wq, w_blocked_scales, w_global_scales = _2d_grouped_tensor_to_blocked_scaled( W, N, G, input_group_end_offsets, format=format ) if format in ["mxfp4", "mxfp8"]: kwargs = _build_scaled_grouped_mm_kwargs( x_blocked_scales, w_blocked_scales, input_group_end_offsets, format, ) elif format == "nvfp4": kwargs = _build_scaled_grouped_mm_kwargs( [x_blocked_scales, x_global_scales], [w_blocked_scales, w_global_scales], input_group_end_offsets, format, ) else: raise ValueError(f'format must be mxfp8|nvfp4|mxfp4, got "{format}"') if format == 'nvfp4': assert x_global_scales.numel() == w_global_scales.numel() assert x_global_scales.numel() == G # Compute mxfp8 grouped mm output y_lp = scaled_grouped_mm_wrap( xq, wq.transpose(-2, -1), **kwargs, ) # bf16 reference output y_bf16 = grouped_mm( # Note: Reference result should be on reconstructed, not original values. # as-in float(fp4(t)) not t itself. xh, wh.t(), offs=input_group_end_offsets, out_dtype=torch.bfloat16 ) # Assert no NaNs assert not y_lp.isnan().any(), "low-precision output contains NaN" # Assert outputs are close torch.testing.assert_close(y_lp, y_bf16, atol=8.0e-2, rtol=8.0e-2) @unittest.skipIf(not PLATFORM_SUPPORTS_MXFP8_GROUPED_GEMM, mxfp8_grouped_mm_skip_msg) @parametrize("G", [1, 4, 16]) @parametrize("M", [16640]) @parametrize("N", [8192]) @parametrize("K", [4096]) @parametrize("format", ["mxfp8"] + (["nvfp4", "mxfp4"] if torch.version.cuda else [])) def test_mxfp8_scaled_grouped_mm_2d_3d(self, G, M, N, K, format): torch.manual_seed(42) if format == "mxfp4" and SM120OrLater: raise unittest.SkipTest("MXFP4 on CUDA only supported on B200/B300") # Simulate 2d-3d grouped gemm `out = input @ weight.t()` # 2D inputs with groups along M, 3D weights. block_size = 32 total_M = M # Alias for clarity that M dim contains groups. X = torch.randn((total_M, K), dtype=torch.bfloat16, device="cuda") * 0.1 W = torch.randn((G, N, K), dtype=torch.bfloat16, device="cuda") * 0.01 input_group_end_offsets = generate_jagged_offs( G, total_M, multiple_of=32, device="cuda" ) # For each constituent 2d subtensor in the 3d weights, quantize and convert scale to blocked format separately, # as they each used for independent gemm in the grouped gemm. def _3d_to_blocked_scaled(W, G, format): wh_list = [] wq_list = [] w_scale_list = [] w_global_scale_list = [] for i in range(G): if format == "mxfp8": wh, wq, w_scale = _convert_to_mxfp8_with_hp_ref(W[i]) elif format == "nvfp4": w_scale, wq = to_mxfp(W[i], format="mxfp8") wh, wq, w_scale, w_global_scale = _convert_to_nvfp4_with_hp_ref(W[i]) w_global_scale_list.append(w_global_scale) elif format == "mxfp4": wh, wq, w_scale = _convert_to_mxfp4_with_hp_ref(W[i]) else: raise ValueError(f'format must be mxfp8|nvfp4|mxfp4, got "{format}"') # Swizzle scaled if torch.version.cuda: w_scale = to_blocked(w_scale) wh_list.append(wh) wq_list.append(wq) w_scale_list.append(w_scale) wh = torch.stack(wh_list, dim=0).contiguous() wq = torch.stack(wq_list, dim=0).contiguous() w_scale = torch.stack(w_scale_list, dim=0).contiguous() # Global scales only exist for nvfp4 if len(w_global_scale_list) > 0: w_global_scales = torch.stack(w_global_scale_list) else: w_global_scales = None return wh, wq, w_scale, w_global_scales wh, wq, w_blocked_scales, w_global_scales = _3d_to_blocked_scaled(W, G, format) # For each group along `total_M` in the 2D tensor, quantize and convert scale to blocked format separately, # as they each used for independent gemm in the grouped gemm. def _2d_to_blocked_scaled(X, K, G, offs, format): xh_list = [] xq_list = [] x_scale_list = [] x_global_scale_list = [] for i in range(G): prev_group_end = 0 if i == 0 else input_group_end_offsets[i - 1] curr_group_end = input_group_end_offsets[i] group_size = curr_group_end - prev_group_end if group_size > 0: x_slice = X[prev_group_end:curr_group_end, :] if format == "mxfp8": xh, xq, x_scale = _convert_to_mxfp8_with_hp_ref(x_slice) elif format == "nvfp4": xh, xq, x_scale, x_global_scale = _convert_to_nvfp4_with_hp_ref(x_slice) x_global_scale_list.append(x_global_scale) elif format == "mxfp4": xh, xq, x_scale = _convert_to_mxfp4_with_hp_ref(x_slice) else: raise ValueError(f'format must be mxfp8|nvfp4|mxfp4, got "{format}"') if torch.version.cuda: x_scale = to_blocked(x_scale) xh_list.append(xh) xq_list.append(xq) x_scale_list.append(x_scale) xh = torch.cat(xh_list, dim=0).contiguous() xq = torch.cat(xq_list, dim=0).contiguous() x_scale = torch.cat(x_scale_list, dim=0).contiguous() x_scale = x_scale.reshape(-1, K // block_size) xq = xq.view(-1, xq.shape[-1]) xh = xh.view(-1, xh.shape[-1]) x_global_scales = None if len(x_global_scale_list) > 0: x_global_scales = torch.stack(x_global_scale_list) return xh, xq, x_scale, x_global_scales xh, xq, x_blocked_scales, x_global_scales = _2d_to_blocked_scaled(X, K, G, input_group_end_offsets, format) if format in ["mxfp8", "mxfp4"]: kwargs = _build_scaled_grouped_mm_kwargs( x_blocked_scales, w_blocked_scales, input_group_end_offsets, format, ) elif format == "nvfp4": kwargs = _build_scaled_grouped_mm_kwargs( [x_blocked_scales, x_global_scales], [w_blocked_scales, w_global_scales], input_group_end_offsets, format, ) else: raise ValueError(f'format must be mxfp8|nvfp4, got "{format}"') if format == 'nvfp4': assert x_global_scales.numel() == w_global_scales.numel() assert x_global_scales.numel() == G # Compute low-precision grouped gemm. y_lp = scaled_grouped_mm_wrap( xq, wq.transpose(-2, -1), **kwargs ) # Compute reference bf16 grouped gemm. # Note: Reference result should be on reconstructed, not original values. # as-in float(fp4(t)) not t itself. y_bf16 = grouped_mm( xh, wh.transpose(-2, -1), offs=input_group_end_offsets, out_dtype=torch.bfloat16, ) # Assert outputs are close. torch.testing.assert_close(y_lp, y_bf16, atol=8.0e-2, rtol=8.0e-2) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("base_dtype", [torch.float16, torch.bfloat16, torch.float32]) def test_scaled_mm_vs_emulated(self, base_dtype): torch.manual_seed(42) input_dtype = e4m3_type output_dtype = base_dtype compare_type = torch.float32 x = torch.randn(16, 16, device="cuda", dtype=base_dtype) y = torch.randn(32, 16, device="cuda", dtype=base_dtype).t() x_scale = tensor_to_scale(x, input_dtype).float() y_scale = tensor_to_scale(y, input_dtype).float() x_fp8 = to_fp8_saturated(x * x_scale, input_dtype) y_fp8 = to_fp8_saturated(y * y_scale, input_dtype) # Calculate actual F8 mm out_scaled_mm = scaled_mm_wrap( x_fp8, y_fp8, scale_a=x_scale.reciprocal(), scale_b=y_scale.reciprocal(), out_dtype=output_dtype ) # Calculate emulated F8 mm out_emulated = mm_float8_emulated( x_fp8, x_scale, y_fp8, y_scale, output_dtype ) if output_dtype != base_dtype: out_scaled_mm = out_scaled_mm.to(compare_type) out_scaled_mm = out_scaled_mm / tensor_to_scale(out_scaled_mm, input_dtype) out_emulated = out_emulated.to(compare_type) out_emulated = out_emulated / tensor_to_scale(out_emulated, input_dtype) if base_dtype in {torch.bfloat16, torch.float16}: atol, rtol = 7e-2, 7e-2 else: atol, rtol = 3e-3, 3e-3 torch.testing.assert_close(out_scaled_mm, out_emulated, atol=atol, rtol=rtol) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("base_dtype", [torch.float16, torch.bfloat16, torch.float32]) def test_scaled_mm_change_stride(self, base_dtype): torch.manual_seed(42) input_dtype = e4m3_type output_dtype = base_dtype compare_type = torch.float32 x = torch.empty_strided((16, 16), (16, 1), device="cuda", dtype=base_dtype) y = torch.empty_strided((16, 32), (1, 64), device="cuda", dtype=base_dtype) x.normal_() y.normal_() x_scale = tensor_to_scale(x, input_dtype).float() y_scale = tensor_to_scale(y, input_dtype).float() x_fp8 = to_fp8_saturated(x * x_scale, input_dtype) y_fp8 = to_fp8_saturated(y * y_scale, input_dtype) # Calculate actual F8 mm out_scaled_mm = scaled_mm_wrap( x_fp8, y_fp8, scale_a=x_scale.reciprocal(), scale_b=y_scale.reciprocal(), out_dtype=output_dtype ) # Calculate emulated F8 mm out_emulated = mm_float8_emulated( x_fp8, x_scale, y_fp8, y_scale, output_dtype ) if output_dtype != base_dtype: out_scaled_mm = out_scaled_mm.to(compare_type) out_scaled_mm = out_scaled_mm / tensor_to_scale(out_scaled_mm, input_dtype) out_emulated = out_emulated.to(compare_type) out_emulated = out_emulated / tensor_to_scale(out_emulated, input_dtype) if base_dtype in {torch.bfloat16, torch.float16}: atol, rtol = 7e-2, 7e-2 else: atol, rtol = 3e-3, 3e-3 torch.testing.assert_close(out_scaled_mm, out_emulated, atol=atol, rtol=rtol) @onlyCUDA def test_float8_bias(self, device) -> None: if device != "cpu" and torch.cuda.is_available() and not PLATFORM_SUPPORTS_FP8: raise unittest.SkipTest(f8_msg) (k, l, m) = (16, 48, 32) x = torch.ones((k, l), device=device).to(e4m3_type) y = torch.full((m, l), .25, device=device, dtype=e4m3_type).t() bias = torch.full((m,), 4.0, device=device, dtype=torch.bfloat16) scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) out_fp8 = scaled_mm_wrap(x, y, scale_a=scale_a, scale_b=scale_b) outb_fp8 = scaled_mm_wrap(x, y, scale_a=scale_a, scale_b=scale_b, bias=bias) # this fails on ROCm currently because hipblaslt doesn't have amax op out_fp32 = out_fp8.to(torch.float32) outb_fp32 = outb_fp8.to(torch.float32) difference = torch.abs(out_fp32 - outb_fp32) self.assertEqual(difference, torch.tensor(4.0, device=device).expand_as(out_fp32)) @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("bias", [True, False]) def test_non_divisible_leading_dim(self, device, bias: bool) -> None: x = torch.rand((17, 16), device=device).to(e4m3_type) y = torch.rand((16, 16), device=device).to(e4m3_type).t() scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) input_bias = None if bias: input_bias = torch.rand((16,), device=device).to(torch.bfloat16) _ = scaled_mm_wrap(x, y, scale_a, scale_b, bias=input_bias) @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) def test_float8_bias_relu_edgecase(self, device) -> None: (k, l, m) = (16, 48, 32) x = torch.full((k, l), 0.0, device=device).to(e4m3_type) y = torch.full((m, l), 1.0, device=device, dtype=e4m3_type).t() bias = torch.full((m,), -3.0, device=device, dtype=torch.bfloat16) scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) outb_fp8 = scaled_mm_wrap(x, y, scale_a, scale_b, bias=bias) outb_fp32 = outb_fp8.to(torch.float32) self.assertEqual(outb_fp32, torch.tensor(-3.0, device=device).expand_as(outb_fp32)) @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) def test_float32_output_errors_with_bias(self, device) -> None: (k, l, m) = (16, 48, 32) x = torch.rand((k, l), device=device).to(e4m3_type) y = torch.full((m, l), .25, device=device, dtype=e4m3_type).t() scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) bias = torch.full((m,), 4.0, device=device, dtype=torch.bfloat16) self.assertRaisesRegex( ValueError, "Bias is not supported when out_dtype is set to Float32", lambda: scaled_mm_wrap(x, y, scale_a, scale_b, bias=bias, out_dtype=torch.float32), ) @onlyCUDA @unittest.skipIf(PLATFORM_SUPPORTS_FP8 or not torch.cuda.is_available(), f8_msg) def test_error_message_fp8_pre_sm89(self, device) -> None: (k, l, m) = (16, 48, 32) x = torch.rand((k, l), device=device).to(e4m3_type) y = torch.rand((m, l), device=device).to(e4m3_type).t() scale_a = torch.tensor(1.0, device=device) scale_b = torch.tensor(1.0, device=device) self.assertRaisesRegex( RuntimeError, r"torch\.\_scaled\_mm is only supported on CUDA devices with compute capability \>\= 9\.0 or 8\.9, or ROCm MI300\+", lambda: scaled_mm_wrap(x, y, scale_a, scale_b, out_dtype=torch.float32), ) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @unittest.skipIf(SM100OrLater, "fast_accum is SM90-only") def test_float8_scale_fast_accum(self, device) -> None: size = (16, 16) x = torch.full(size, .5, device=device, dtype=e4m3_type) # hipblaslt does not yet support mixed e4m3_type input y_type = e4m3_type if torch.version.hip else e5m2_type y = torch.full(size, .5, device=device, dtype=y_type).t() scale_a = torch.tensor(1.5, device=device) scale_b = torch.tensor(0.66, device=device) out_fp8 = scaled_mm_wrap(x, y, scale_a, scale_b, out_dtype=e4m3_type, use_fast_accum=True) self.assertEqual(out_fp8.to(torch.float), torch.full(size, 4., device=device)) out_fp8_s = scaled_mm_wrap(x, y, scale_a=scale_a, scale_b=scale_b, out_dtype=e4m3_type, use_fast_accum=True) self.assertEqual(out_fp8, out_fp8_s) @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) @unittest.skipIf(not SM89OrLater, "rowwise implementation is currently sm89-sm100 specific") @parametrize("use_fast_accum", [True, False]) def test_float8_rowwise_scaling_sanity(self, device, use_fast_accum: bool) -> None: M, K, N = (1024, 512, 2048) fill_value = 0.5 x = torch.full((M, K), fill_value, device=device) y = torch.full((N, K), fill_value, device=device) x_scales = torch.ones((x.shape[0], 1), device=device, dtype=torch.float32) y_scales = torch.ones((1, y.shape[0]), device=device, dtype=torch.float32) x_fp8 = x.to(e4m3_type) y_fp8 = y.to(e4m3_type).t() out_fp8 = scaled_mm_wrap( x_fp8, y_fp8, scale_a=x_scales, scale_b=y_scales, out_dtype=torch.bfloat16, use_fast_accum=use_fast_accum, ) self.assertEqual( out_fp8.to(torch.float32), torch.full((M, N), K * (fill_value**2), device=device) ) @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) def test_float8_error_messages(self, device) -> None: M, K, N = (1024, 512, 2048) fill_value = 0.5 x = torch.full((M, K), fill_value, device=device) y = torch.full((N, K), fill_value, device=device) x_fp8 = x.to(e4m3_type) y_fp8 = y.to(e4m3_type).t() with self.assertRaisesRegex( ValueError, re.escape("scale_b must have 1 Float element") ): scaled_mm_wrap( x_fp8, y_fp8, scale_a=torch.ones((1, 1), device="cuda"), scale_b=torch.ones((1, 2), device="cuda"), scale_recipe_a=ScalingType.TensorWise, scale_recipe_b=ScalingType.TensorWise, out_dtype=torch.bfloat16, ) with self.assertRaisesRegex( ValueError, re.escape(f"scale_b must have {N} Float elements, got {N + 1}"), ): scaled_mm_wrap( x_fp8, y_fp8, scale_a=torch.ones((M, 1), device="cuda"), scale_b=torch.ones((1, N + 1), device="cuda"), scale_recipe_a=ScalingType.RowWise, scale_recipe_b=ScalingType.RowWise, out_dtype=torch.bfloat16, ) with self.assertRaisesRegex( IndexError, re.escape("Dimension out of range") ): scaled_mm_wrap( x_fp8, y_fp8, scale_a=torch.ones((M), device="cuda"), scale_b=torch.ones((N, 1), device="cuda"), scale_recipe_a=ScalingType.RowWise, scale_recipe_b=ScalingType.RowWise, out_dtype=torch.bfloat16, ) with self.assertRaisesRegex( ValueError, re.escape("expected scale_b.stride(1) to be 1, but got 2"), ): scaled_mm_wrap( x_fp8, y_fp8, scale_a=torch.ones((M, 1), device="cuda"), scale_b=torch.ones((1, N * 2), device="cuda")[:, ::2], scale_recipe_a=ScalingType.RowWise, scale_recipe_b=ScalingType.RowWise, out_dtype=torch.bfloat16, ) def e5m2(): out = scaled_mm_wrap( x_fp8, y_fp8.to(e5m2_type), scale_a=torch.ones((M, 1), device="cuda"), scale_b=torch.ones((1, N), device="cuda"), out_dtype=torch.bfloat16, ) return out if torch.cuda.get_device_capability() == (9, 0) and torch.version.cuda and torch.version.cuda >= "12.9": out = e5m2() self.assertEqual(out, torch.ones_like(out) * 128.) else: if torch.version.hip: # Note re.compile is used, not re.escape. This is to accommodate fn vs fnuz type message. with self.assertRaisesRegex( ValueError, r"expected mat_b\.dtype\(\) to be at::kFloat8_e4m3fn(uz)?, but got c10::Float8_e5m2(fnuz)?" ): e5m2() else: with self.assertRaisesRegex( RuntimeError, r"Expected b\.dtype\(\) == at::kFloat8_e4m3fn to be true, but got false\.", ): e5m2() @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) @unittest.skipIf(not SM89OrLater, "rowwise implementation is currently sm89-sm100 specific") @parametrize("base_dtype", [torch.bfloat16, torch.float16, torch.float32]) @parametrize("shapes", [ (128, 512, 256), ]) @with_tf32_off def test_scaled_mm_vs_emulated_row_wise(self, base_dtype, shapes): M, K, N = shapes # Fp32 out_dtype is only supported by cuBLAS, which however only started # shipping row-wise kernels in CUDA 12.9, and only for sm90+. if base_dtype is torch.float32: if torch.version.hip: raise unittest.SkipTest("hipblaslt rowwise _scaled_mm only supports BFloat16") if _get_torch_cuda_version() < (12, 9): raise unittest.SkipTest("Need CUDA 12.9+ for row-wise fp8 w/ cuBLAS") if torch.cuda.get_device_capability() < (9, 0): raise unittest.SkipTest("Need sm90+ for row-wise fp8 w/ cuBLAS") if base_dtype is torch.float16: if torch.version.hip: raise unittest.SkipTest("hipblaslt rowwise _scaled_mm only supports BFloat16") if torch.cuda.get_device_capability() < (9, 0): raise unittest.SkipTest("Need sm90+ for row-wise fp8 w/ cuBLAS") torch.manual_seed(42) input_dtype = e4m3_type output_dtype = base_dtype x = torch.randn(M, K, device="cuda", dtype=base_dtype) y = torch.randn(N, K, device="cuda", dtype=base_dtype).t() bias = None if base_dtype in {torch.bfloat16, torch.float16}: bias = torch.randn((N,), device="cuda", dtype=base_dtype) x_scales = tensor_to_scale(x, input_dtype, dim=1).float() y_scales = tensor_to_scale(y, input_dtype, dim=0).float() x_fp8 = to_fp8_saturated(x * x_scales, e4m3_type) y_fp8 = to_fp8_saturated(y * y_scales, e4m3_type) def test(): # Calculate actual F8 mm out_scaled_mm = scaled_mm_wrap( x_fp8, y_fp8, scale_a=x_scales.reciprocal(), scale_b=y_scales.reciprocal(), out_dtype=output_dtype, bias=bias ) # Calculate emulated F8 mm out_emulated = mm_float8_emulated( x_fp8, x_scales, y_fp8, y_scales, output_dtype, bias ) if base_dtype in {torch.bfloat16, torch.float16}: atol, rtol = 7e-2, 7e-2 else: atol, rtol = 2e-3, 2e-3 self.assertEqual(out_scaled_mm, out_emulated, atol=atol, rtol=rtol) cosine_sim = torch.nn.functional.cosine_similarity( out_emulated.flatten().float(), out_scaled_mm.flatten().float(), dim=0 ) self.assertGreaterEqual(float(cosine_sim), 0.999) # only cuBLAS supports rowwise with fp32 output and cuBLAS only supports # rowwise on SM 9.0 if torch.cuda.get_device_capability() != (9, 0) and output_dtype == torch.float: with self.assertRaisesRegex( ValueError, "Only bf16 and fp16 high precision output types are supported for row-wise scaling." ): test() else: test() @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) @unittest.skipIf(not IS_SM90, "cuBLAS blockwise scaling requires sm90+") @unittest.skipIf( _get_torch_cuda_version() < (12, 9), "cuBLAS blockwise scaling added in CUDA 12.9", ) @parametrize("output_dtype", [torch.bfloat16, torch.float32]) @parametrize("lhs_block,rhs_block", [(1, 1), (128, 1), (1, 128)]) @parametrize("M,N,K", [ # Nice size (256, 768, 512), # Requires padding for 128x128 scale (384, 128, 1280), # M=N=K for eyes test (512, 512, 512), ]) @parametrize("test_case", [ "x_eye_b_eye", "x_ones_y_ones_calc_scales", "x_ones_y_ones_set_scales", "x_ones_y_ones_modify_scales", "data_random_scales_one", "data_random_calc_scales", ]) def test_scaled_mm_block_wise_numerics(self, output_dtype, lhs_block, rhs_block, M, N, K, test_case): """ subsume test_scaled_mm_vs_emulated_block_wise for random inputs, random scales, do some other functional tests as well. # Inputs (as generated are): # A: [M, K] # B: [N, K] # then scales are, for the 3 combinations: # 1x128 x 1x128: # As: [M, K // 128], stride: [1, M] -> scale.t().contiguous().t() # Bs: [N, K // 128], stride: [1, N] -> scale.t().contiguous().t() # 1x128 x 128x128 # L4 = round_up(K // 128, 4) # As: [M, K // 128], stride: [1, M] -> scale.t().contiguous().t() # Bs: [L4, N // 128], stride: [1, L4] -> scale.t() # 128x128 x 1x128 # L4 = round_up(K // 128, 4) # As: [L4, M // 128], stride: [1, L4] # Bs: [N, K // 128], stride: [1, N] """ torch.manual_seed(42) def _adjust_lhs_scale(x_fp8, x_scales, lhs_block): M, K = x_fp8.shape x_scales_original = x_scales.clone() # 1x128 blocks need scales to be outer-dim-major if lhs_block == 1: x_scales = x_scales.t().contiguous().t() lhs_recipe = ScalingType.BlockWise1x128 assert (x_scales.shape[0] == M and x_scales.shape[1] == K // 128), f"{x_scales.shape=}" assert (x_scales.stride(0) == 1 and x_scales.stride(1) in [1, M]), f"{x_scales.stride=}" x_hp = hp_from_1x128(x_fp8, x_scales_original) else: lhs_recipe = ScalingType.BlockWise128x128 x_scales, pad_amount = _pad_128x128_scales(x_scales) # scales in [M // 128, L4] -> [L4, M // 128] x_scales = x_scales.t() x_hp = hp_from_128x128(x_fp8, x_scales_original) return x_hp, lhs_recipe, x_scales, x_scales_original def _adjust_rhs_scale(y_fp8, y_scales, rhs_block): N, K = y_fp8.shape y_scales_original = y_scales.clone() if rhs_block == 1: y_scales = y_scales.t().contiguous().t() rhs_recipe = ScalingType.BlockWise1x128 assert (y_scales.shape[0] == N and y_scales.shape[1] == K // 128), f"{y_scales.shape=}" assert (y_scales.stride(0) == 1 and y_scales.stride(1) in [1, N]), f"{y_scales.stride=}" y_hp = hp_from_1x128(y_fp8, y_scales_original) else: rhs_recipe = ScalingType.BlockWise128x128 y_scales, pad_amount = _pad_128x128_scales(y_scales) # Scale in [N // 128, L4] -> [L4, N // 128] y_scales = y_scales.t() y_hp = hp_from_128x128(y_fp8, y_scales_original) return y_hp, rhs_recipe, y_scales, y_scales_original def _build_lhs(x, lhs_block): M, K = x.shape x_fp8, x_scales = tensor_to_scale_block(x, e4m3_type, lhs_block, 128) x_scales_original = x_scales x_hp, x_recipe, x_scales, x_scales_original = _adjust_lhs_scale(x_fp8, x_scales, lhs_block) return x_hp, x_recipe, x_fp8, x_scales, x_scales_original def _build_rhs(y, rhs_block): N, K = y.shape y_fp8, y_scales = tensor_to_scale_block(y, e4m3_type, rhs_block, 128) y_hp, y_recipe, y_scales, y_scales_original = _adjust_rhs_scale(y_fp8, y_scales, rhs_block) return y_hp, y_recipe, y_fp8, y_scales, y_scales_original def _run_test(x_hp, x_recipe, x_fp8, x_scales, x_scales_original, y_hp, y_recipe, y_fp8, y_scales, y_scales_original): # Calculate actual F8 mm out_scaled_mm = scaled_mm_wrap( x_fp8, y_fp8.t(), scale_a=x_scales.reciprocal(), scale_recipe_a=x_recipe, # Note: No more .t() on scale_b, not necessary. scale_b=y_scales.reciprocal(), scale_recipe_b=y_recipe, out_dtype=output_dtype, ) # Calculate emulated F8 mm out_emulated = mm_float8_emulated_block( x_fp8, x_scales_original, y_fp8.t(), y_scales_original.t(), output_dtype ) cosine_sim = torch.nn.functional.cosine_similarity( out_emulated.flatten().float(), (x @ y.t()).flatten().float(), dim=0 ) self.assertGreaterEqual(float(cosine_sim), 0.999) cosine_sim = torch.nn.functional.cosine_similarity( out_scaled_mm.flatten().float(), out_emulated.flatten().float(), dim=0 ) self.assertGreaterEqual(float(cosine_sim), 0.999) if output_dtype in {torch.bfloat16, torch.float16}: atol, rtol = 6e-1, 7e-2 else: atol, rtol = 7e-1, 2e-3 self.assertEqual(out_scaled_mm, out_emulated.to(output_dtype), atol=atol, rtol=rtol) # One last check against the full-precision reference, to ensure we # didn't mess up the scaling itself and made the test trivial. cosine_sim = torch.nn.functional.cosine_similarity( out_scaled_mm.flatten().float(), (x @ y.t()).flatten().float(), dim=0 ) self.assertGreaterEqual(float(cosine_sim), 0.999) def _build_constant_scale(t, block, val): M, K = t.shape if block == 1: scale_shape = M, K // 128 else: scale_shape = M // 128, K // 128 scale = torch.full(scale_shape, val, device='cuda') return scale def hp_to_scaled(t, scale, block): if block == 1: return hp_to_1x128(t, scale) else: return hp_to_128x128(t, scale) e4m3_type = torch.float8_e4m3fn if test_case == "x_eye_b_eye": if M != K or M != N: return unittest.skip("a_eye_b_eye only defined for M = N = K") x = torch.eye(M, device='cuda') y = torch.eye(M, device='cuda') x_hp, x_recipe, x_fp8, x_scales, x_scales_original = _build_lhs(x, lhs_block) y_hp, y_recipe, y_fp8, y_scales, y_scales_original = _build_lhs(y, rhs_block) elif test_case == "x_ones_y_ones_calc_scales": x = torch.full((M, K), 1.0, device='cuda') y = torch.full((N, K), 1.0, device='cuda') x_hp, x_recipe, x_fp8, x_scales, x_scales_original = _build_lhs(x, lhs_block) y_hp, y_recipe, y_fp8, y_scales, y_scales_original = _build_lhs(y, rhs_block) elif test_case in ["x_ones_y_ones_set_scales", "x_ones_y_ones_modify_scales"]: x = torch.full((M, K), 1.0, device='cuda') y = torch.full((N, K), 1.0, device='cuda') x_scales = _build_constant_scale(x, lhs_block, 1.) y_scales = _build_constant_scale(y, rhs_block, 1.) if "modify" in test_case: x_scales[0, 0] = 4. y_scales[-1, -1] = 4. x_fp8 = hp_to_scaled(x, x_scales, lhs_block) y_fp8 = hp_to_scaled(y, y_scales, rhs_block) x_hp, x_recipe, x_scales, x_scales_original = _adjust_lhs_scale(x_fp8, x_scales, lhs_block) y_hp, y_recipe, y_scales, y_scales_original = _adjust_rhs_scale(y_fp8, y_scales, rhs_block) elif test_case == "data_random_scales_one": x = torch.randint(0, 255, (M, K), device='cuda', dtype=torch.uint8).to(torch.bfloat16) y = torch.randint(0, 255, (N, K), device='cuda', dtype=torch.uint8).to(torch.bfloat16) x_scales = _build_constant_scale(x, lhs_block, 1.) y_scales = _build_constant_scale(y, rhs_block, 1.) x_fp8 = hp_to_scaled(x, x_scales, lhs_block) y_fp8 = hp_to_scaled(y, y_scales, rhs_block) x_hp, x_recipe, x_scales, x_scales_original = _adjust_lhs_scale(x_fp8, x_scales, lhs_block) y_hp, y_recipe, y_scales, y_scales_original = _adjust_rhs_scale(y_fp8, y_scales, rhs_block) elif test_case == "data_random_calc_scales": # Note: Old test_scaled_mm_vs_emulated_block_wise test case x = torch.randn(M, K, device="cuda", dtype=output_dtype) y = torch.randn(N, K, device="cuda", dtype=output_dtype) * 1e-3 x_hp, x_recipe, x_fp8, x_scales, x_scales_original = _build_lhs(x, lhs_block) y_hp, y_recipe, y_fp8, y_scales, y_scales_original = _build_lhs(y, rhs_block) else: raise ValueError("Unknown test-case passed") _run_test(x_hp, x_recipe, x_fp8, x_scales, x_scales_original, y_hp, y_recipe, y_fp8, y_scales, y_scales_original) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) @unittest.skipIf(not IS_SM90, "cuBLAS blockwise scaling requires sm90+") @unittest.skipIf( _get_torch_cuda_version() < (12, 9), "cuBLAS blockwise scaling added in CUDA 12.9", ) @parametrize("output_dtype", [torch.bfloat16, torch.float32]) @parametrize("lhs_block,rhs_block", [(1, 1), (128, 1), (1, 128)]) @parametrize("M,N,K", [(256, 128, 256), (256, 256, 128)]) def test_scaled_mm_vs_emulated_block_wise_verify_small_shapes( self, output_dtype, lhs_block, rhs_block, M, N, K ): torch.manual_seed(42) x = torch.randn(M, K, device="cuda", dtype=output_dtype).pow(3) y = torch.randn(N, K, device="cuda", dtype=output_dtype).pow(3) x_fp8, x_scales = tensor_to_scale_block(x, e4m3_type, lhs_block, 128) y_fp8, y_scales = tensor_to_scale_block(y, e4m3_type, rhs_block, 128) x_scales_original = x_scales y_scales_original = y_scales # 1x128 blocks need scales to be outer-dim-major if lhs_block == 1: x_scales = x_scales.t().contiguous().t() lhs_recipe = ScalingType.BlockWise1x128 assert (x_scales.shape[0] == M and x_scales.shape[1] == K // 128), f"{x_scales.shape=}" assert (x_scales.stride(0) == 1 and x_scales.stride(1) in [1, M]), f"{x_scales.stride=}" else: lhs_recipe = ScalingType.BlockWise128x128 x_scales, pad_amount = _pad_128x128_scales(x_scales) # scales in [M // 128, L4] -> [L4, M // 128] x_scales = x_scales.t() if rhs_block == 1: y_scales = y_scales.t().contiguous().t() rhs_recipe = ScalingType.BlockWise1x128 assert (y_scales.shape[0] == N and y_scales.shape[1] == K // 128), f"{y_scales.shape=}" assert (y_scales.stride(0) == 1 and y_scales.stride(1) in [1, N]), f"{y_scales.stride=}" else: rhs_recipe = ScalingType.BlockWise128x128 y_scales, pad_amount = _pad_128x128_scales(y_scales) # Scale in [N // 128, L4] -> [L4, N // 128] y_scales = y_scales.t() # Verify that actual F8 mm doesn't error scaled_mm_wrap( x_fp8, y_fp8.t(), scale_a=x_scales, scale_recipe_a=lhs_recipe, # Note: No more .t() on scale_b, not necessary. scale_b=y_scales, scale_recipe_b=rhs_recipe, out_dtype=output_dtype, ) # Verify that emulated F8 mm doesn't error mm_float8_emulated_block( x_fp8, x_scales_original, y_fp8.t(), y_scales_original.t(), output_dtype ) @skipIfRocm @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_FP8 or IS_WINDOWS, f8_msg) @unittest.skipIf(IS_SM90, "cuBLAS blockwise scaling works on sm90") @unittest.skipIf( _get_torch_cuda_version() < (12, 9), "cuBLAS blockwise scaling added in CUDA 12.9", ) @parametrize("output_dtype", [torch.bfloat16, ]) @parametrize("lhs_block,rhs_block", [(1, 1), (128, 1), (1, 128)]) @parametrize("M,N,K", [(256, 256, 256), (256, 256, 512)]) def test_scaled_mm_deepseek_error_messages( self, output_dtype, lhs_block, rhs_block, M, N, K ): torch.manual_seed(42) x = torch.randn(M, K, device="cuda", dtype=output_dtype).pow(3) y = torch.randn(N, K, device="cuda", dtype=output_dtype).pow(3) x_fp8, x_scales = tensor_to_scale_block(x, e4m3_type, lhs_block, 128) y_fp8, y_scales = tensor_to_scale_block(y, e4m3_type, rhs_block, 128) # 1x128 blocks need scales to be outer-dim-major if lhs_block == 1: x_scales = x_scales.t().contiguous().t() lhs_recipe = ScalingType.BlockWise1x128 else: lhs_recipe = ScalingType.BlockWise128x128 if rhs_block == 1: y_scales = y_scales.t().contiguous().t() rhs_recipe = ScalingType.BlockWise1x128 else: rhs_recipe = ScalingType.BlockWise128x128 # Verify that actual F8 mm doesn't error with self.assertRaisesRegex( NotImplementedError, ".*DeepSeek.*scaling.*only supported in CUDA for SM90.*" ): scaled_mm_wrap( x_fp8, y_fp8.t(), scale_a=x_scales, scale_recipe_a=lhs_recipe, scale_b=y_scales.t(), scale_recipe_b=rhs_recipe, out_dtype=output_dtype, ) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @parametrize("which_dim_zero", [0, 1, 2]) @parametrize("use_torch_compile", [False, True]) def test_zero_dim_tensorwise(self, which_dim_zero, use_torch_compile) -> None: device = "cuda" x_dtype, y_dtype = e4m3_type, e4m3_type out_dtype = torch.bfloat16 M, K, N = 32, 32, 32 if which_dim_zero == 0: M = 0 elif which_dim_zero == 1: K = 0 elif which_dim_zero == 2: N = 0 x_fp8 = torch.zeros(M, K, device=device).to(x_dtype) y_fp8 = torch.zeros(N, K, device=device, dtype=y_dtype).t() out_fp32 = torch.mm(x_fp8.to(torch.float), y_fp8.to(torch.float)) scale_a = torch.tensor(float('-inf'), device=device) scale_b = torch.tensor(float('-inf'), device=device) f = scaled_mm_wrap if use_torch_compile: f = torch.compile(scaled_mm_wrap) out_fp8 = f(x_fp8, y_fp8, scale_a, scale_b, out_dtype=out_dtype) self.assertEqual(out_dtype, out_fp8.dtype) self.assertEqual(out_fp32, out_fp8.to(torch.float)) @unittest.skipIf(IS_WINDOWS, "Windows doesn't support row-wise scaling") @unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg) @unittest.skipIf(not SM90OrLater, "sm89 kernel isn't opted into carveout yet") def test_honor_sm_carveout(self) -> None: torch.manual_seed(42) x = torch.randn(8192, 2048, device="cuda", dtype=torch.float32) y = torch.randn(8192, 2048, device="cuda", dtype=torch.float32).t() x_scales = tensor_to_scale(x, e4m3_type, dim=1).reciprocal() y_scales = tensor_to_scale(y, e4m3_type, dim=0).reciprocal() x_fp8 = to_fp8_saturated(x / x_scales, e4m3_type) y_fp8 = to_fp8_saturated(y / y_scales, e4m3_type) cu_count = torch.cuda.get_device_properties().multi_processor_count carveout = 66 if torch.version.cuda else cu_count // 8 with tempfile.NamedTemporaryFile() as f: with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CUDA]) as prof: self.assertIsNone(torch._C._get_sm_carveout_experimental()) scaled_mm_wrap(x_fp8, y_fp8, scale_a=x_scales, scale_b=y_scales, out_dtype=torch.bfloat16) torch._C._set_sm_carveout_experimental(0) self.assertEqual(torch._C._get_sm_carveout_experimental(), 0) scaled_mm_wrap(x_fp8, y_fp8, scale_a=x_scales, scale_b=y_scales, out_dtype=torch.bfloat16) torch._C._set_sm_carveout_experimental(66) self.assertEqual(torch._C._get_sm_carveout_experimental(), 66) scaled_mm_wrap(x_fp8, y_fp8, scale_a=x_scales, scale_b=y_scales, out_dtype=torch.bfloat16) torch._C._set_sm_carveout_experimental(None) self.assertIsNone(torch._C._get_sm_carveout_experimental()) scaled_mm_wrap(x_fp8, y_fp8, scale_a=x_scales, scale_b=y_scales, out_dtype=torch.bfloat16) prof.export_chrome_trace(f.name) if torch.version.hip: with open(f.name) as file: events = [evt for evt in json.load(file)["traceEvents"] if evt.get("cat", "") == "kernel"] # events were returned out of order; need to be sorted on "ts" timestamp events = sorted(events, key=lambda x: x['ts']) # ROCm carveout is invisible except for kernels running slower on fewer CUs no_carveout, carveout_0, carveout, no_carveout_again = [float(evt.get("dur", "0.0")) for evt in events] if True or not (no_carveout < carveout and carveout_0 < carveout and no_carveout_again < carveout): # noqa: SIM222 # something went wrong, print more info to help debug flaky test print("ROCm debug info for test_honor_sm_carveout") print("cu_count", cu_count) print("no_carveout", no_carveout) print("carveout_0", carveout_0) print("carveout", carveout) print("no_carveout_again", no_carveout_again) self.assertTrue(no_carveout < carveout) self.assertTrue(carveout_0 < carveout) self.assertTrue(no_carveout_again < carveout) # ROCm carveout will create new streams when enabled, and go back to the original stream when disabled no_carveout, carveout_0, carveout, no_carveout_again = [int(evt.get("tid", "0")) for evt in events] self.assertTrue(no_carveout == no_carveout_again) self.assertTrue(no_carveout == carveout_0) self.assertTrue(no_carveout != carveout) self.assertTrue(carveout_0 != carveout) else: with open(f.name) as file: no_carveout, carveout_0, carveout_66, no_carveout_again = [ math.prod(evt.get("args", {}).get("grid", [])) for evt in json.load(file)["traceEvents"] if evt.get("cat", "") == "kernel" ] self.assertEqual(no_carveout, no_carveout_again) capability = torch.cuda.get_device_capability() if capability in {(10, 0), (10, 3), (12, 0), (12, 1)}: # expected failure # CUTLASS only supports SM carveout via green contexts on SM100 self.assertEqual(no_carveout, carveout_66) self.assertEqual(carveout_66, carveout_0) else: # correct behavior self.assertNotEqual(no_carveout, carveout_66) self.assertNotEqual(carveout_66, carveout_0) def test_pack_uint4(self): """ Verify that given a tensor with high precision values [val0, val1], the x2 packed representation is val1:val0 (from MSB to LSB), and not val0:val1. Note that the packing function is private to this file, but it's still good to test that we are packing in the expected way. """ hp_data = torch.tensor([0b00000010, 0b00001011], dtype=torch.uint8) lp_data_actual = pack_uint4(hp_data) lp_data_expected = torch.tensor([0b10110010], dtype=torch.uint8) torch.testing.assert_close(lp_data_actual, lp_data_expected, atol=0, rtol=0) @skipIfRocm @onlyCUDA @unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM, mx_skip_msg) @parametrize("mkn", [ # Nice shapes (128, 128, 128), (256, 256, 256), (128, 256, 512), (256, 512, 128), (512, 128, 256), # Very unbalanced (1023, 64, 48), (31, 1024, 64), (45, 96, 1024), # Mixed large and small (2, 1024, 128), (127, 96, 1024), (1025, 128, 96) ], name_fn=lambda mkn: f"{mkn[0]}_{mkn[1]}_{mkn[2]}") def test_blockwise_nvfp4_with_global_scale(self, mkn) -> None: device = 'cuda' M, K, N = mkn BLOCK_SIZE = 16 # Note: SQNR target from `test_blockwise_mxfp8_nvfp4_mxfp4_numerics` test approx_match_sqnr_target = 15.8 A_ref = torch.randn((M, K), device=device, dtype=torch.bfloat16) * 1000 B_ref = torch.randn((N, K), device=device, dtype=torch.bfloat16) * 1000 A, A_scale, A_global_scale = data_to_nvfp4_with_global_scale(A_ref, BLOCK_SIZE) B, B_scale, B_global_scale = data_to_nvfp4_with_global_scale(B_ref, BLOCK_SIZE) if torch.version.cuda: A_scale = to_blocked(A_scale) B_scale = to_blocked(B_scale) swizzle = [SwizzleType.SWIZZLE_32_4_4, SwizzleType.NO_SWIZZLE] else: swizzle = [SwizzleType.NO_SWIZZLE, SwizzleType.NO_SWIZZLE] C_ref = A_ref @ B_ref.t() C = scaled_mm( A, B.t(), scale_a=[A_scale, A_global_scale], scale_recipe_a=[ScalingType.BlockWise1x16, ScalingType.TensorWise], scale_b=[B_scale, B_global_scale], scale_recipe_b=[ScalingType.BlockWise1x16, ScalingType.TensorWise], swizzle_a=swizzle, swizzle_b=swizzle, output_dtype=torch.bfloat16, ) sqnr = compute_error(C_ref, C) assert sqnr.item() > approx_match_sqnr_target @unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM, mx_skip_msg) @parametrize("test_case_name", [ "a_eye_b_eye", "a_ones_b_ones", "a_ones_modified_b_ones", "a_ones_b_ones_modified", "a_scale_modified_b_ones", "a_ones_b_scale_modified", "data_random_scales_one", "data_random_scales_from_data", ]) @parametrize("fast_accum", [False, True]) @parametrize("mkn", [ # Nice shapes (128, 128, 128), (256, 256, 256), (128, 256, 512), (256, 512, 128), (512, 128, 256), # Non block multiples (65, 96, 112), (197, 224, 272), # K not multiple of 32 (skipped for fp4) (197, 240, 272), # Very unbalanced (1023, 64, 48), (31, 1024, 64), (45, 96, 1024), # Mixed large and small (2, 1024, 128), (127, 96, 1024), (1025, 128, 96) ], name_fn=lambda mkn: f"{mkn[0]}_{mkn[1]}_{mkn[2]}") @parametrize("recipe", ["mxfp8", "mxfp4", "nvfp4"]) def test_blockwise_mxfp8_nvfp4_mxfp4_numerics(self, test_case_name, fast_accum, mkn, recipe) -> None: if torch.version.hip and recipe == "nvfp4": raise unittest.SkipTest("nvfp4 not supported on ROCm, skipping") if (recipe == "nvfp4" or recipe == "mxfp4") and fast_accum: raise unittest.SkipTest("fast_accum not supported in nvfp4/mxfp4 cublas gemm, skipping") if recipe == "mxfp4" and SM120OrLater: raise unittest.SkipTest("MXFP4 on CUDA only supported on B200/B300") device = "cuda" M, K, N = mkn if recipe == "nvfp4" and K % 32 != 0: raise unittest.SkipTest("K must be divisible by 32 for nvfp4 cublas gemm, skipping") if torch.version.hip: if not (M % 16 == 0 and K % 128 == 0 and N % 16 == 0): raise unittest.SkipTest("M and N must be multiples of 16 and K must be multiple of 128 on ROCm, skipping") fp4_scaling_dtype = torch.float8_e8m0fnu if recipe == "mxfp4" else torch.float8_e4m3fn BLOCK_SIZE = 16 if recipe == "nvfp4" else 32 if K % BLOCK_SIZE != 0: raise unittest.SkipTest(f"K ({K}) must be divisible by BLOCK_SIZE ({BLOCK_SIZE}), skipping") require_exact_match = True approx_match_sqnr_target = 22.0 if test_case_name == "a_eye_b_eye": if not ((M == K) and (M == N)): raise unittest.SkipTest("this test is only defined for M == K == N, skipping") A_ref = torch.eye(M, device=device, dtype=torch.bfloat16) B_ref = torch.eye(M, device=device, dtype=torch.bfloat16) if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) elif test_case_name == "a_ones_b_ones": A_ref = torch.ones(M, K, device=device, dtype=torch.bfloat16) B_ref = torch.ones(N, K, device=device, dtype=torch.bfloat16) if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) elif test_case_name == "a_ones_modified_b_ones": A_ref = torch.ones(M, K, device=device, dtype=torch.bfloat16) B_ref = torch.ones(N, K, device=device, dtype=torch.bfloat16) A_ref[1][0:BLOCK_SIZE] = 2 if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) elif test_case_name == "a_ones_b_ones_modified": A_ref = torch.ones(M, K, device=device, dtype=torch.bfloat16) B_ref = torch.ones(N, K, device=device, dtype=torch.bfloat16) B_ref[1][0:BLOCK_SIZE] = 2 if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) elif test_case_name == "a_scale_modified_b_ones": A_ref = torch.ones(M, K, device=device, dtype=torch.bfloat16) B_ref = torch.ones(N, K, device=device, dtype=torch.bfloat16) if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) A_ref[1][0:BLOCK_SIZE] = 4 A[1][0:BLOCK_SIZE] = 2 A_scale[1][0] = 2 else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) A_ref[1][0:BLOCK_SIZE] = 4 A.view(torch.uint8)[1][0:(BLOCK_SIZE // 2)] = 0b01000100 A_scale[1][0] = 2 elif test_case_name == "a_ones_b_scale_modified": A_ref = torch.ones(M, K, device=device, dtype=torch.bfloat16) B_ref = torch.ones(N, K, device=device, dtype=torch.bfloat16) if recipe == "mxfp8": A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_ref[1][0:BLOCK_SIZE] = 4 B[1][0:BLOCK_SIZE] = 2 B_scale[1][0] = 2 else: # nvfp4 # mxfp4 A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_ref[1][0:BLOCK_SIZE] = 4 B.view(torch.uint8)[1][0:(BLOCK_SIZE // 2)] = 0b01000100 B_scale[1][0] = 2 elif test_case_name == "data_random_scales_one": require_exact_match = False if recipe == "mxfp8": # scales all-ones, element data random while being exactly representable in float8_e4m3fn # generate integers in [0, 255] and interpret as float8_e4m3fn A_ref = torch.randint(0, 255, (M, K), device=device, dtype=torch.uint8).view(torch.float8_e4m3fn).to(torch.bfloat16) B_ref = torch.randint(0, 255, (N, K), device=device, dtype=torch.uint8).view(torch.float8_e4m3fn).to(torch.bfloat16) # modification: don't allow NaN values A_ref[torch.isnan(A_ref)] = 0 B_ref[torch.isnan(B_ref)] = 0 A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) else: # nvfp4 # mxfp4 # scales all-ones, element data random while being exactly representable in float4_e2m1fn_x2 # generate integers in [0, 16] and cast to bfloat16 A_ref = _floatx_unpacked_to_f32( torch.randint(0, 16, (M, K), device=device, dtype=torch.uint8), FP4_EBITS, FP4_MBITS ).bfloat16() B_ref = _floatx_unpacked_to_f32( torch.randint(0, 16, (N, K), device=device, dtype=torch.uint8), FP4_EBITS, FP4_MBITS ).bfloat16() A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) elif test_case_name == "data_random_scales_from_data": if not K % BLOCK_SIZE == 0: raise unittest.SkipTest(f"this test is only defined for K a multiple of {BLOCK_SIZE}, skipping") require_exact_match = False # random data, scales from data A_ref = torch.randn((M, K), device=device, dtype=torch.bfloat16) * 1000 B_ref = torch.randn((N, K), device=device, dtype=torch.bfloat16) * 1000 if recipe == "mxfp8": # Calculate scales based on the inputs A_scale = data_to_mx_scale(A_ref, BLOCK_SIZE, recipe) B_scale = data_to_mx_scale(B_ref, BLOCK_SIZE, recipe) max_val = F8E4M3_MAX_VAL min_val = -1 * max_val A = (A_ref.reshape(-1, BLOCK_SIZE) / A_scale.reshape(M * ceil_div(K, BLOCK_SIZE), 1).float()).reshape(M, K) A = A.clamp(min=min_val, max=max_val).to(torch.float8_e4m3fn) B = (B_ref.reshape(-1, BLOCK_SIZE) / B_scale.reshape(N * ceil_div(K, BLOCK_SIZE), 1).float()).reshape(N, K) B = B.clamp(min=min_val, max=max_val).to(torch.float8_e4m3fn) else: # nvfp4 # mxfp4 if recipe == "mxfp4": A_scale = data_to_mx_scale(A_ref, BLOCK_SIZE, recipe) B_scale = data_to_mx_scale(B_ref, BLOCK_SIZE, recipe) else: A_scale = data_to_nvfp4_scale(A_ref, BLOCK_SIZE) B_scale = data_to_nvfp4_scale(B_ref, BLOCK_SIZE) max_val = FP4_MAX_VAL min_val = -1 * max_val A = (A_ref.reshape(-1, BLOCK_SIZE) / A_scale.reshape(M * ceil_div(K, BLOCK_SIZE), 1).bfloat16()).reshape(M, K) A = A.clamp(min=min_val, max=max_val) A = _bfloat16_to_float4_e2m1fn_x2(A) B = (B_ref.reshape(-1, BLOCK_SIZE) / B_scale.reshape(N * ceil_div(K, BLOCK_SIZE), 1).bfloat16()).reshape(N, K) B = B.clamp(min=min_val, max=max_val) B = _bfloat16_to_float4_e2m1fn_x2(B) approx_match_sqnr_target = 15 if recipe == "mxfp4" else 15.8 C_ref = A_ref @ B_ref.t() # convert to swizzled format if not torch.version.hip: A_scale = to_blocked(A_scale) B_scale = to_blocked(B_scale) C = scaled_mm_wrap( A, B.t(), A_scale, B_scale, out_dtype=torch.bfloat16, use_fast_accum=fast_accum, ) if require_exact_match: torch.testing.assert_close(C, C_ref, atol=0, rtol=0) else: sqnr = compute_error(C_ref, C) assert sqnr.item() > approx_match_sqnr_target @unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM or IS_WINDOWS, mx_skip_msg) @parametrize("recipe", ["mxfp8", "mxfp4" if torch.version.hip else "nvfp4"]) def test_blockwise_mxfp8_nvfp4_error_messages(self, device, recipe) -> None: if recipe == "mxfp4" and SM120OrLater: raise unittest.SkipTest("MXFP4 on CUDA only supported on B200/B300") M, K, N = (1024, 512, 2048) BLOCK_SIZE_K = 16 if recipe == "nvfp4" else 32 BLOCK_SIZE_MN = 128 fill_value = 0.5 scale_dtype = torch.float8_e4m3fn if recipe == "nvfp4" else torch.float8_e8m0fnu x = torch.full((M, K), fill_value, device=device) y = torch.full((N, K), fill_value, device=device) if recipe == "mxfp8": x_lowp = x.to(e4m3_type) y_lowp = y.to(e4m3_type).t() else: # nvfp4 #mxfp4 x_lowp = _bfloat16_to_float4_e2m1fn_x2(x.bfloat16()) y_lowp = _bfloat16_to_float4_e2m1fn_x2(y.bfloat16()).t() num_k_blocks = ceil_div(K, BLOCK_SIZE_K) padded_num_k_blocks = ceil_div(num_k_blocks, 4) * 4 expected_a_size = BLOCK_SIZE_MN * ceil_div(M, BLOCK_SIZE_MN) * padded_num_k_blocks expected_b_size = BLOCK_SIZE_MN * ceil_div(N, BLOCK_SIZE_MN) * padded_num_k_blocks block = ( ScalingType.BlockWise1x16 if recipe == "nvfp4" else ScalingType.BlockWise1x32 ) if torch.version.hip: swizzle = SwizzleType.NO_SWIZZLE else: swizzle = SwizzleType.SWIZZLE_32_4_4 # Test wrong scale tensor size for scale_a with correct dtype with self.assertRaisesRegex( ValueError, f".*For Block[W,w]ise.*scaling.*scale_a should have {expected_a_size} " f"elements.*" , ): incorrect_size_a = torch.ones(expected_a_size - 1, device=device, dtype=scale_dtype) correct_size_b = torch.ones(expected_b_size, device=device, dtype=scale_dtype) scaled_mm_wrap( x_lowp, y_lowp, scale_a=incorrect_size_a, scale_recipe_a=block, scale_b=correct_size_b, scale_recipe_b=block, swizzle_a=swizzle, swizzle_b=swizzle, out_dtype=torch.bfloat16, ) # Test wrong scale tensor size for scale_b with correct dtype with self.assertRaisesRegex( ValueError, f"For Block[W,w]ise.*scaling.*scale_b should have {expected_b_size} " f"elements.*" , ): correct_size_a = torch.ones(expected_a_size, device=device, dtype=scale_dtype) incorrect_size_b = torch.ones(expected_b_size + 1, device=device, dtype=scale_dtype) scaled_mm_wrap( x_lowp, y_lowp, scale_a=correct_size_a, scale_recipe_a=block, scale_b=incorrect_size_b, scale_recipe_b=block, swizzle_a=swizzle, swizzle_b=swizzle, out_dtype=torch.bfloat16, ) # Test non-contiguous scale tensors with correct dtype with self.assertRaisesRegex( ValueError, "For Block[W,w]ise.*scaling.*both scales should be contiguous" , ): non_contiguous_a = torch.ones(expected_a_size * 2, device=device, dtype=scale_dtype)[::2] contiguous_b = torch.ones(expected_b_size, device=device, dtype=scale_dtype) scaled_mm_wrap( x_lowp, y_lowp, scale_a=non_contiguous_a, scale_b=contiguous_b, out_dtype=torch.bfloat16, ) def scaled_grouped_mm_helper(self, alist, blist, ascalelist, bscalelist, outlist, use_fast_accum): for a, b, ascale, bscale, out in zip(alist, blist, ascalelist, bscalelist, outlist): out_ref = scaled_mm_wrap(a, b.t(), ascale.view(-1, 1), bscale.view(1, -1), out_dtype=torch.bfloat16, use_fast_accum=use_fast_accum) self.assertEqual(out, out_ref, atol=5e-2, rtol=5e-4) # Testing only _scaled_grouped_mm() with multiple shapes, as # _scaled_mm() already has more combinations of parameters than # _scaled_grouped_mm(), for supporting more than one inputs layout # combinations. @unittest.skipIf(not PLATFORM_SUPPORTS_FP8_GROUPED_GEMM, f8_grouped_msg) @parametrize("fast_accum", [False, True]) # AMD does not support non-contiguous inputs yet @parametrize("strided", [False] + ([True] if torch.version.cuda else [])) # AMD does not support NVFP4 @parametrize("wrap_v2", [True, False]) def test_scaled_grouped_gemm_2d_2d(self, fast_accum, strided, wrap_v2): device = "cuda" fp8_dtype = e4m3_type m, n, k, n_groups = 16, 32, 64, 4 a = torch.randn(m, k * n_groups + k * int(strided), device=device).to(fp8_dtype)[:, :k * n_groups] b = torch.randn(n, k * n_groups + k * int(strided), device=device).to(fp8_dtype)[:, :k * n_groups] scale_a = torch.rand(m * n_groups, device=device, dtype=torch.float32) scale_b = torch.rand(n * n_groups, device=device, dtype=torch.float32) offs = torch.arange(k, n_groups * k + 1, k, device=device, dtype=torch.int32) f = scaled_grouped_mm_wrap out = f(a, b.t(), scale_a, scale_b, scale_recipe_a=ScalingType.RowWise, scale_recipe_b=ScalingType.RowWise, offs=offs, out_dtype=torch.bfloat16, use_fast_accum=fast_accum, wrap_v2=wrap_v2) offs_cpu = offs.cpu() alist, blist, ascalelist, bscalelist = [], [], [], [] start = 0 for i in range(n_groups): alist.append(a[:, start:offs_cpu[i]]) blist.append(b[:, start:offs_cpu[i]]) ascalelist.append(scale_a[i * m : (i + 1) * m]) bscalelist.append(scale_b[i * n : (i + 1) * n]) start = offs_cpu[i] self.scaled_grouped_mm_helper(alist, blist, ascalelist, bscalelist, out, fast_accum) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8_GROUPED_GEMM, f8_grouped_msg) @parametrize("fast_accum", [False, True]) # AMD does not support non-contiguous inputs yet @parametrize("strided", [False] + ([True] if torch.version.cuda else [])) @parametrize("wrap_v2", [True, False]) def test_scaled_grouped_gemm_2d_3d(self, fast_accum, strided, wrap_v2): device = "cuda" fp8_dtype = e4m3_type m, n, k, n_groups = 16, 32, 64, 4 s_int = int(strided) a = torch.randn(m * n_groups, k * (1 + s_int), device=device).to(fp8_dtype)[:, :k] b = torch.randn(n_groups * (1 + s_int), n, k * (1 + s_int), device=device).to(fp8_dtype)[::(1 + s_int), :, :k] self.assertTrue(a.is_contiguous() is not strided) self.assertTrue(b.is_contiguous() is not strided) for check_zero_size in (True, False): if check_zero_size and n_groups <= 1: continue offs = torch.arange(m, n_groups * m + 1, m, device="cuda", dtype=torch.int32) if check_zero_size: offs[0] = offs[1] scale_a = torch.rand(n_groups * m, device="cuda", dtype=torch.float32) scale_b = torch.rand(n_groups * n, device="cuda", dtype=torch.float32).view(n_groups, n) f = scaled_grouped_mm_wrap out = f(a, b.transpose(-2, -1), scale_a, scale_b, scale_recipe_a=ScalingType.RowWise, scale_recipe_b=ScalingType.RowWise, offs=offs, out_dtype=torch.bfloat16, use_fast_accum=fast_accum, wrap_v2=wrap_v2) offs_cpu = offs.cpu() alist, ascalelist, outlist = [], [], [] start = 0 for i in range(n_groups): alist.append(a[start:offs_cpu[i]]) ascalelist.append(scale_a[start:offs_cpu[i]]) outlist.append(out[start:offs_cpu[i]]) start = offs_cpu[i] self.scaled_grouped_mm_helper(alist, b, ascalelist, scale_b, outlist, fast_accum) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8_GROUPED_GEMM, f8_grouped_msg) @parametrize("fast_accum", [False, True]) # AMD does not support non-contiguous inputs yet @parametrize("strided", [False] + ([True] if torch.version.cuda else [])) def test_scaled_grouped_gemm_3d_3d(self, fast_accum, strided): device = "cuda" fp8_dtype = e4m3_type m, n, k, n_groups = 16, 32, 64, 4 s_int = int(strided) a = torch.randn(n_groups * (1 + s_int), m, k * (1 + s_int), device=device).to(fp8_dtype)[::(1 + s_int), :, :k] b = torch.randn(n_groups * (1 + s_int), n, k * (1 + s_int), device=device).to(fp8_dtype)[::(1 + s_int), :, :k] self.assertTrue(a.is_contiguous() is not strided) self.assertTrue(b.is_contiguous() is not strided) scale_a = torch.rand(n_groups * m, device="cuda", dtype=torch.float32).view(n_groups, m) scale_b = torch.rand(n_groups * n, device="cuda", dtype=torch.float32).view(n_groups, n) f = torch._scaled_grouped_mm out = f(a, b.transpose(-2, -1), scale_a, scale_b, out_dtype=torch.bfloat16, use_fast_accum=fast_accum) self.scaled_grouped_mm_helper(a, b, scale_a, scale_b, out, fast_accum) @unittest.skipIf(not PLATFORM_SUPPORTS_FP8_GROUPED_GEMM, f8_grouped_msg) @parametrize("fast_accum", [False, True]) # AMD does not support non-contiguous inputs yet @parametrize("strided", [False] + ([True] if torch.version.cuda else [])) def test_scaled_grouped_gemm_3d_2d(self, fast_accum, strided): device = "cuda" fp8_dtype = e4m3_type m, n, k, n_groups = 16, 32, 64, 4 s_int = int(strided) a = torch.randn(n_groups * (1 + s_int), m, k * (1 + s_int), device=device).to(fp8_dtype)[::(1 + s_int), :, :k] b = torch.randn(n * n_groups, k * (1 + s_int), device=device).to(fp8_dtype)[:, :k] self.assertTrue(a.is_contiguous() is not strided) self.assertTrue(b.is_contiguous() is not strided) scale_a = torch.rand(n_groups * m, device="cuda", dtype=torch.float32).view(n_groups, m) scale_b = torch.rand(n_groups * n, device="cuda", dtype=torch.float32) for check_zero_size in (True, False): if check_zero_size and n_groups <= 1: continue offs = torch.arange(n, n_groups * n + 1, n, device="cuda", dtype=torch.int32) if check_zero_size: offs[0] = offs[1] f = torch._scaled_grouped_mm out = f(a, b.transpose(-2, -1), scale_a, scale_b, offs=offs, out_dtype=torch.bfloat16, use_fast_accum=fast_accum) offs_cpu = offs.cpu() blist, bscalelist, outlist = [], [], [] start = 0 for i in range(n_groups): blist.append(b[start:offs_cpu[i]]) bscalelist.append(scale_b[start:offs_cpu[i]]) outlist.append(out[:, start:offs_cpu[i]]) start = offs_cpu[i] self.scaled_grouped_mm_helper(a, blist, scale_a, bscalelist, outlist, fast_accum) @unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM, mx_skip_msg) def test_blockwise_mxfp8_compile(self) -> None: device = "cuda" M, K, N = 128, 128, 128 BLOCK_SIZE = 32 A_ref = torch.eye(M, device=device, dtype=torch.bfloat16) B_ref = torch.eye(M, device=device, dtype=torch.bfloat16) A = A_ref.to(torch.float8_e4m3fn) B = B_ref.to(torch.float8_e4m3fn) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu) C_ref = A_ref @ B_ref.t() compiled_scaled_mm = torch.compile(scaled_mm_wrap, backend="inductor") C = compiled_scaled_mm( A, B.t(), A_scale, B_scale, out_dtype=torch.bfloat16, use_fast_accum=False, ) torch.testing.assert_close(C, C_ref, atol=0, rtol=0) @unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM, mx_skip_msg) def test_blockwise_nvfp4_compile(self) -> None: device = "cuda" M, K, N = 128, 128, 128 BLOCK_SIZE = 32 if torch.version.hip else 16 fp4_scaling_dtype = torch.float8_e8m0fnu if torch.version.hip else torch.float8_e4m3fn A_ref = torch.eye(M, device=device, dtype=torch.bfloat16) B_ref = torch.eye(M, device=device, dtype=torch.bfloat16) A = _bfloat16_to_float4_e2m1fn_x2(A_ref) B = _bfloat16_to_float4_e2m1fn_x2(B_ref) A_scale = torch.full((M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) B_scale = torch.full((N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=fp4_scaling_dtype) C_ref = A_ref @ B_ref.t() compiled_scaled_mm = torch.compile(scaled_mm_wrap, backend="inductor") # C = scaled_mm_wrap( C = compiled_scaled_mm( A, B.t(), A_scale, B_scale, out_dtype=torch.bfloat16, use_fast_accum=False, ) torch.testing.assert_close(C, C_ref, atol=0, rtol=0) instantiate_device_type_tests(TestFP8Matmul, globals(), except_for="cpu") if __name__ == '__main__': TestCase._default_dtype_check_enabled = True run_tests()
TestFP8Matmul
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py
{ "start": 22175, "end": 25877 }
class ____(CloudSQLBaseOperator): """ Clone an instance to a target instance. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudSQLCloneInstanceOperator` :param instance: Database instance ID to be cloned. This does not include the project ID. :param destination_instance_name: Database instance ID to be created. This does not include the project ID. :param clone_context: additional clone_context parameters as described in https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/instances/clone :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the Google Cloud connection is used. :param gcp_conn_id: The connection ID used to connect to Google Cloud. :param api_version: API version used (e.g. v1beta4). :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ # [START gcp_sql_clone_template_fields] template_fields: Sequence[str] = ( "project_id", "instance", "destination_instance_name", "gcp_conn_id", "api_version", ) # [END gcp_sql_clone_template_fields] def __init__( self, *, instance: str, destination_instance_name: str, clone_context: dict | None = None, project_id: str = PROVIDE_PROJECT_ID, gcp_conn_id: str = "google_cloud_default", api_version: str = "v1beta4", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: self.destination_instance_name = destination_instance_name self.clone_context = clone_context or {} super().__init__( project_id=project_id, instance=instance, gcp_conn_id=gcp_conn_id, api_version=api_version, impersonation_chain=impersonation_chain, **kwargs, ) def _validate_inputs(self) -> None: super()._validate_inputs() if not self.destination_instance_name: raise AirflowException("The required parameter 'destination_instance_name' is empty or None") def execute(self, context: Context): hook = CloudSQLHook( gcp_conn_id=self.gcp_conn_id, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) if not self._check_if_instance_exists(self.instance, hook): raise AirflowException( f"Cloud SQL instance with ID {self.instance} does not exist. " "Please specify another instance to patch." ) body = { "cloneContext": { "kind": "sql#cloneContext", "destinationInstanceName": self.destination_instance_name, **self.clone_context, } } return hook.clone_instance( project_id=self.project_id, body=body, instance=self.instance, )
CloudSQLCloneInstanceOperator
python
pydantic__pydantic
tests/mypy/outputs/mypy-plugin_ini/plugin_strict_fields.py
{ "start": 965, "end": 1340 }
class ____(ModelStrictMode): b: int = Field(strict=False) c: int = Field(strict=True) # expected error: a, c ModelOverride2(a='1', b='2', c='3') # MYPY: error: Argument "a" to "ModelOverride2" has incompatible type "str"; expected "int" [arg-type] # MYPY: error: Argument "c" to "ModelOverride2" has incompatible type "str"; expected "int" [arg-type]
ModelOverride2
python
huggingface__transformers
src/transformers/models/swiftformer/modeling_swiftformer.py
{ "start": 4453, "end": 5633 }
class ____(nn.Module): """ `SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int): super().__init__() hidden_dim = int(config.mlp_ratio * dim) self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim) self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps) self.point_wise_conv1 = nn.Conv2d(dim, hidden_dim, kernel_size=1) self.act = nn.GELU() self.point_wise_conv2 = nn.Conv2d(hidden_dim, dim, kernel_size=1) self.drop_path = nn.Dropout(p=config.drop_conv_encoder_rate) self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True) def forward(self, x): input = x x = self.depth_wise_conv(x) x = self.norm(x) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x) return x
SwiftFormerConvEncoder
python
great-expectations__great_expectations
tests/actions/test_core_actions.py
{ "start": 9894, "end": 13737 }
class ____: @pytest.mark.unit def test_equality(self): """I know, this one seems silly. But this was a bug.""" a = EmailAction( name="my_action", smtp_address="test", smtp_port="587", receiver_emails="test@gmail.com", ) b = EmailAction( name="my_action", smtp_address="test", smtp_port="587", receiver_emails="test@gmail.com", ) assert a == b @pytest.mark.unit @pytest.mark.parametrize( "emails, expected_email_list", [ pytest.param("test1@gmail.com", ["test1@gmail.com"], id="single_email"), pytest.param( "test1@gmail.com, test2@hotmail.com", ["test1@gmail.com", "test2@hotmail.com"], id="multiple_emails", ), pytest.param( "test1@gmail.com,test2@hotmail.com", ["test1@gmail.com", "test2@hotmail.com"], id="multiple_emails_no_space", ), ], ) def test_run( self, checkpoint_result: CheckpointResult, emails: str, expected_email_list: list[str], ): action = EmailAction( name="my_action", smtp_address="test", smtp_port="587", receiver_emails=emails, ) with mock.patch.object(smtplib, "SMTP") as mock_server: out = action.run(checkpoint_result=checkpoint_result) mock_send_email = mock_server().sendmail # Should contain success/failure in title assert ( f"Subject: {checkpoint_result.checkpoint_config.name}: True" in mock_send_email.call_args.args[-1] ) mock_send_email.assert_called_once_with( None, expected_email_list, mock.ANY, ) assert out == {"email_result": "success"} @pytest.mark.unit def test_run_smptp_address_substitution(self, checkpoint_result: CheckpointResult): config_provider = project_manager.get_config_provider() assert isinstance(config_provider, mock.Mock) # noqa: TID251 # just using for the instance compare SMPT_ADDRESS_KEY = "${smtp_address}" SMPT_PORT_KEY = "${smtp_port}" SENDER_LOGIN_KEY = "${sender_login}" SENDER_ALIAS_KEY = "${sender_alias_login}" SENDER_PASSWORD_KEY = "${sender_password_login}" RECEIVER_EMAILS_KEY = "${receiver_emails}" action = EmailAction( name="my_action", smtp_address=SMPT_ADDRESS_KEY, smtp_port=SMPT_PORT_KEY, sender_login=SENDER_LOGIN_KEY, sender_alias=SENDER_ALIAS_KEY, sender_password=SENDER_PASSWORD_KEY, receiver_emails=RECEIVER_EMAILS_KEY, ) config_from_uncommitted_config = { SMPT_ADDRESS_KEY: "something.com", SMPT_PORT_KEY: "123", SENDER_LOGIN_KEY: "sender@greatexpectations.io", SENDER_ALIAS_KEY: "alias@greatexpectations.io", SENDER_PASSWORD_KEY: "sender_password_login", RECEIVER_EMAILS_KEY: "foo@greatexpectations.io, bar@great_expectations.io", } config_provider.substitute_config.side_effect = lambda key: config_from_uncommitted_config[ key ] with mock.patch.object(smtplib, "SMTP") as mock_server: action.run(checkpoint_result=checkpoint_result) mock_server().sendmail.assert_called_once_with( config_from_uncommitted_config[SENDER_ALIAS_KEY], [ email.strip() for email in config_from_uncommitted_config[RECEIVER_EMAILS_KEY].split(",") ], mock.ANY, )
TestEmailAction
python
Textualize__textual
docs/examples/guide/reactivity/validate01.py
{ "start": 169, "end": 999 }
class ____(App): CSS_PATH = "validate01.tcss" count = reactive(0) def validate_count(self, count: int) -> int: """Validate value.""" if count < 0: count = 0 elif count > 10: count = 10 return count def compose(self) -> ComposeResult: yield Horizontal( Button("+1", id="plus", variant="success"), Button("-1", id="minus", variant="error"), id="buttons", ) yield RichLog(highlight=True) def on_button_pressed(self, event: Button.Pressed) -> None: if event.button.id == "plus": self.count += 1 else: self.count -= 1 self.query_one(RichLog).write(f"count = {self.count}") if __name__ == "__main__": app = ValidateApp() app.run()
ValidateApp
python
marshmallow-code__marshmallow
examples/flask_example.py
{ "start": 570, "end": 671 }
class ____(DeclarativeBase): pass db = SQLAlchemy(app, model_class=Base) ##### MODELS #####
Base
python
numpy__numpy
numpy/_core/tests/test_umath.py
{ "start": 93075, "end": 93865 }
class ____: def test_simple(self): assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) assert_almost_equal(ncu.hypot(0, 0), 0) def test_reduce(self): assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) assert_equal(ncu.hypot.reduce([]), 0.0) def assert_hypot_isnan(x, y): with np.errstate(invalid='ignore'): assert_(np.isnan(ncu.hypot(x, y)), f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") def assert_hypot_isinf(x, y): with np.errstate(invalid='ignore'): assert_(np.isinf(ncu.hypot(x, y)), f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf")
TestHypot
python
tensorflow__tensorflow
tensorflow/compiler/tests/categorical_op_test.py
{ "start": 1294, "end": 7439 }
class ____(xla_test.XLATestCase): """Test cases for random-number generating operators.""" def output_dtypes(self): return set(self.int_types).intersection([np.int32, np.int64]) def _chi2(self, expected, actual): """Returns Chi2 GOF statistic.""" actual = np.asarray(actual) expected = np.asarray(expected) diff = actual - expected chi2 = np.sum(diff * diff / expected) return chi2 def _do_sampling(self, logits, num_samples): """Categorical samples from given input. Args: logits: Numpy ndarray of shape [batch_size, num_classes]. num_samples: Int; number of samples to draw. Returns: Frequencies from sampled classes; shape [batch_size, num_classes]. """ with self.session(), self.test_scope(): random_seed.set_random_seed(1618) op = random_ops.multinomial(logits, num_samples, output_dtype=dtypes.int32) d = self.evaluate(op) batch_size, num_classes = logits.shape freqs_mat = [] for i in range(batch_size): cnts = dict(collections.Counter(d[i, :])) # Requires drawn class labels be in range. self.assertLess(max(cnts.keys()), num_classes) self.assertGreaterEqual(min(cnts.keys()), 0) freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0) for k in range(num_classes)] freqs_mat.append(freqs) return freqs_mat def _testRngIsNotConstant(self, rng, dtype, output_dtype): # Tests that 'rng' does not always return the same value. with self.session(): with self.test_scope(): x = rng(dtype, output_dtype) # The random-number generator, if working correctly, should produce the # same output multiple times with low probability. y = self.evaluate(x) z = self.evaluate(x) w = self.evaluate(x) # We use exact equality here. If the random-number generator is producing # deterministic output, all three outputs will be bitwise identical. self.assertTrue((not np.array_equal(y, z)) or (not np.array_equal(z, w)) or (not np.array_equal(y, w))) def testCategoricalIsNotConstant(self): def rng(dtype, output_dtype): return random_ops.multinomial(np.array([[1., 1., 1.]], dtype=dtype), 10, output_dtype=output_dtype) dtype = np.float32 for output_dtype in self.output_dtypes(): self._testRngIsNotConstant(rng, dtype, output_dtype) @test.disable_with_predicate( pred=test.is_built_with_rocm, skip_message="Test fails on ROCm.") def testCategoricalIsInRange(self): for dtype in self.float_types: for output_dtype in self.output_dtypes(): with self.session(): with self.test_scope(): x = random_ops.multinomial( array_ops.ones(shape=[1, 20], dtype=dtype), 1000, output_dtype=output_dtype) y = self.evaluate(x) self.assertTrue((y >= 0).sum() == 1000) self.assertTrue((y < 20).sum() == 1000) def testSamplingCorrectness(self): np.random.seed(1618) # Make it reproducible. num_samples = 40000 rand_probs = np.random.dirichlet([1., 1., 2., 3.]) rand_probs2 = np.random.dirichlet([1., 4., 5.], size=3) # batched for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]: probs = np.asarray(probs) if len(probs.shape) == 1: probs = probs.reshape(1, probs.size) # singleton batch logits = np.log(probs).astype(np.float32) freqs = self._do_sampling(logits, num_samples) # the test here is similar to # python/kernel_tests/random/multinomial_op_test.py # Note that df >= 1 in all these cases. Choosing a cutoff of 1e-3 # corresponds to an alpha value of 2.5% for df = 1, and smaller for larger # df. chi2 = self._chi2(probs, freqs) self.assertLess(chi2, 1e-3) def testStatelessMultinomialIsInRange(self): for dtype in self.float_types.intersection( [dtypes.float32, dtypes.bfloat16]): for output_dtype in self.output_dtypes(): with self.session() as sess: with self.test_scope(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) x = stateless_random_ops.stateless_multinomial( array_ops.ones(shape=[1, 20], dtype=dtype), 1000, seed_t, output_dtype=output_dtype) y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]}) self.assertTrue((y >= 0).sum() == 1000) self.assertTrue((y < 20).sum() == 1000) def testDeterminismMultinomial(self): # Stateless values should be equal iff the seeds are equal (roughly) num_samples = 10 with self.session(), self.test_scope(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) seeds = [(x, y) for x in range(5) for y in range(5)] * 3 for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]): pure = stateless_random_ops.stateless_multinomial( logits, num_samples, seed=seed_t) values = [(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds] for s0, v0 in values: for s1, v1 in values: self.assertEqual(s0 == s1, np.all(v0 == v1)) def testEmpty(self): with self.session(): with self.test_scope(): x = random_ops.multinomial( array_ops.zeros([42, 40]), 0, output_dtype=dtypes.int32) y = self.evaluate(x) self.assertEqual(y.shape, (42, 0)) def testEmptyStateless(self): with self.session() as sess: with self.test_scope(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) x = stateless_random_ops.stateless_multinomial( array_ops.zeros([42, 40]), 0, seed=seed_t, output_dtype=dtypes.int32) y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]}) self.assertEqual(y.shape, (42, 0)) if __name__ == '__main__': googletest.main()
CategoricalTest
python
huggingface__transformers
src/transformers/models/gpt2/modeling_gpt2.py
{ "start": 16622, "end": 21669 }
class ____(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config ([`GPT2Config`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: GPT2Config): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = nn.Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() self.first_dropout = nn.Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = nn.Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: `torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like( hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long, ) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output @auto_docstring
GPT2SequenceSummary
python
wandb__wandb
wandb/automations/events.py
{ "start": 2342, "end": 2914 }
class ____(GQLBase): # from: RunMetricFilter event_type: Annotated[ Literal[EventType.RUN_METRIC_THRESHOLD], Field(exclude=True, repr=False), ] = EventType.RUN_METRIC_THRESHOLD threshold_filter: MetricThresholdFilter @model_validator(mode="before") @classmethod def _nest_inner_filter(cls, v: Any) -> Any: # Yeah, we've got a lot of nesting due to backend schema constraints. if pydantic_isinstance(v, MetricThresholdFilter): return cls(threshold_filter=v) return v
_WrappedMetricThresholdFilter
python
mlflow__mlflow
mlflow/models/evaluation/base.py
{ "start": 24563, "end": 28428 }
class ____: """ Represents the model evaluation outputs of a `mlflow.evaluate()` API call, containing both scalar metrics and output artifacts such as performance plots. """ def __init__(self, metrics, artifacts, run_id=None): self._metrics = metrics self._artifacts = artifacts self._run_id = ( run_id if run_id is not None else (mlflow.active_run().info.run_id if mlflow.active_run() is not None else None) ) @classmethod def load(cls, path): """Load the evaluation results from the specified local filesystem path""" with open(os.path.join(path, "metrics.json")) as fp: metrics = json.load(fp) with open(os.path.join(path, "artifacts_metadata.json")) as fp: artifacts_metadata = json.load(fp) artifacts = {} artifacts_dir = os.path.join(path, "artifacts") for artifact_name, meta in artifacts_metadata.items(): uri = meta["uri"] ArtifactCls = _get_class_from_string(meta["class_name"]) artifact = ArtifactCls(uri=uri) filename = pathlib.Path(urllib.parse.urlparse(uri).path).name artifact._load(os.path.join(artifacts_dir, filename)) artifacts[artifact_name] = artifact return EvaluationResult(metrics=metrics, artifacts=artifacts) def save(self, path): """Write the evaluation results to the specified local filesystem path""" os.makedirs(path, exist_ok=True) with open(os.path.join(path, "metrics.json"), "w") as fp: json.dump(self.metrics, fp, cls=NumpyEncoder) artifacts_metadata = { artifact_name: { "uri": artifact.uri, "class_name": _get_fully_qualified_class_name(artifact), } for artifact_name, artifact in self.artifacts.items() } with open(os.path.join(path, "artifacts_metadata.json"), "w") as fp: json.dump(artifacts_metadata, fp) artifacts_dir = os.path.join(path, "artifacts") os.makedirs(artifacts_dir, exist_ok=True) for artifact in self.artifacts.values(): filename = pathlib.Path(urllib.parse.urlparse(artifact.uri).path).name artifact._save(os.path.join(artifacts_dir, filename)) @property def metrics(self) -> dict[str, Any]: """ A dictionary mapping scalar metric names to scalar metric values """ return self._metrics @property def artifacts(self) -> dict[str, "mlflow.models.EvaluationArtifact"]: """ A dictionary mapping standardized artifact names (e.g. "roc_data") to artifact content and location information """ return self._artifacts @property def run_id(self) -> str: """ The ID of the MLflow Run to which the evaluation results were logged. """ return self._run_id @property def tables(self) -> dict[str, "pd.DataFrame"]: """ A dictionary mapping standardized artifact names (e.g. "eval_results_table") to corresponding table content as pandas DataFrame. """ eval_tables = {} if self._run_id is None: _logger.warning("Cannot load eval_results_table because run_id is not specified.") return eval_tables for table_name, table_path in self._artifacts.items(): path = urllib.parse.urlparse(table_path.uri).path table_fileName = os.path.basename(path) try: eval_tables[table_name] = mlflow.load_table(table_fileName, run_ids=[self._run_id]) except Exception: pass # Swallow the exception since we assume its not a table. return eval_tables @developer_stable
EvaluationResult
python
wandb__wandb
wandb/sdk/wandb_settings.py
{ "start": 1586, "end": 73924 }
class ____(BaseModel, validate_assignment=True): """Settings for the W&B SDK. This class manages configuration settings for the W&B SDK, ensuring type safety and validation of all settings. Settings are accessible as attributes and can be initialized programmatically, through environment variables (`WANDB_ prefix`), and with configuration files. The settings are organized into three categories: 1. Public settings: Core configuration options that users can safely modify to customize W&B's behavior for their specific needs. 2. Internal settings: Settings prefixed with 'x_' that handle low-level SDK behavior. These settings are primarily for internal use and debugging. While they can be modified, they are not considered part of the public API and may change without notice in future versions. 3. Computed settings: Read-only settings that are automatically derived from other settings or the environment. """ # Pydantic Model configuration. model_config = ConfigDict( extra="forbid", # throw an error if extra fields are provided validate_default=True, # validate default values use_attribute_docstrings=True, # for field descriptions revalidate_instances="always", ) # Public settings. allow_offline_artifacts: bool = True """Flag to allow table artifacts to be synced in offline mode. To revert to the old behavior, set this to False. """ allow_val_change: bool = False """Flag to allow modification of `Config` values after they've been set.""" anonymous: deprecation.DoNotSet = Field( default=deprecation.UNSET, exclude=True, ) """Deprecated and will be removed.""" api_key: Optional[str] = None """The W&B API key.""" azure_account_url_to_access_key: Optional[Dict[str, str]] = None """Mapping of Azure account URLs to their corresponding access keys for Azure integration.""" base_url: str = "https://api.wandb.ai" """The URL of the W&B backend for data synchronization.""" code_dir: Optional[str] = None """Directory containing the code to be tracked by W&B.""" config_paths: Optional[Sequence[str]] = None """Paths to files to load configuration from into the `Config` object.""" console: Literal["auto", "off", "wrap", "redirect", "wrap_raw", "wrap_emu"] = Field( default="auto", validate_default=True, ) """The type of console capture to be applied. Possible values are: - "auto" - Automatically selects the console capture method based on the system environment and settings. - "off" - Disables console capture. - "redirect" - Redirects low-level file descriptors for capturing output. - "wrap" - Overrides the write methods of sys.stdout/sys.stderr. Will be mapped to either "wrap_raw" or "wrap_emu" based on the state of the system. - "wrap_raw" - Same as "wrap" but captures raw output directly instead of through an emulator. Derived from the `wrap` setting and should not be set manually. - "wrap_emu" - Same as "wrap" but captures output through an emulator. Derived from the `wrap` setting and should not be set manually. """ console_multipart: bool = False """Enable multipart console logging. When True, the SDK writes console output to timestamped files under the `logs/` directory instead of a single `output.log`. Each part is uploaded as soon as it is closed, giving users live access to logs while the run is active. Rollover cadence is controlled by `console_chunk_max_bytes` and/or `console_chunk_max_seconds`. If both limits are `0`, all logs are uploaded once at run finish. Note: Uploaded chunks are immutable; terminal control sequences that modify previous lines (e.g., progress bars using carriage returns) only affect the current chunk. """ console_chunk_max_bytes: int = 0 """Size-based rollover threshold for multipart console logs, in bytes. Starts a new console log file when the current part reaches this size. Has an effect only when `console_multipart` is `True`. Can be combined with `console_chunk_max_seconds`; whichever limit is hit first triggers the rollover. A value of `0` disables the size-based limit. """ console_chunk_max_seconds: int = 0 """Time-based rollover threshold for multipart console logs, in seconds. Starts a new console log file after this many seconds have elapsed since the current part began. Requires `console_multipart` to be `True`. May be used with `console_chunk_max_bytes`; the first limit reached closes the part. A value of `0` disables the time-based limit. """ credentials_file: str = Field( default_factory=lambda: str(credentials.DEFAULT_WANDB_CREDENTIALS_FILE) ) """Path to file for writing temporary access tokens.""" disable_code: bool = False """Whether to disable capturing the code.""" disable_git: bool = False """Whether to disable capturing the git state.""" disable_job_creation: bool = True """Whether to disable the creation of a job artifact for W&B Launch.""" docker: Optional[str] = None """The Docker image used to execute the script.""" email: Optional[str] = None """The email address of the user.""" entity: Optional[str] = None """The W&B entity, such as a user or a team.""" organization: Optional[str] = None """The W&B organization.""" force: bool = False """Whether to pass the `force` flag to `wandb.login()`.""" fork_from: Optional[RunMoment] = None """Specifies a point in a previous execution of a run to fork from. The point is defined by the run ID, a metric, and its value. Currently, only the metric '_step' is supported. """ git_commit: Optional[str] = None """The git commit hash to associate with the run.""" git_remote: str = "origin" """The git remote to associate with the run.""" git_remote_url: Optional[str] = None """The URL of the git remote repository.""" git_root: Optional[str] = None """Root directory of the git repository.""" heartbeat_seconds: int = 30 """Interval in seconds between heartbeat signals sent to the W&B servers. <!-- lazydoc-ignore-class-attributes --> """ host: Optional[str] = None """Hostname of the machine running the script.""" http_proxy: Optional[str] = None """Custom proxy servers for http requests to W&B.""" https_proxy: Optional[str] = None """Custom proxy servers for https requests to W&B.""" identity_token_file: Optional[str] = None """Path to file containing an identity token (JWT) for authentication.""" ignore_globs: Sequence[str] = () """Unix glob patterns relative to `files_dir` specifying files to exclude from upload.""" init_timeout: float = 90.0 """Time in seconds to wait for the `wandb.init` call to complete before timing out.""" insecure_disable_ssl: bool = False """Whether to insecurely disable SSL verification.""" job_name: Optional[str] = None """Name of the Launch job running the script.""" job_source: Optional[Literal["repo", "artifact", "image"]] = None """Source type for Launch.""" label_disable: bool = False """Whether to disable automatic labeling features.""" launch: bool = False """Flag to indicate if the run is being launched through W&B Launch. <!-- lazydoc-ignore-class-attributes --> """ launch_config_path: Optional[str] = None """Path to the launch configuration file.""" login_timeout: Optional[float] = None """Time in seconds to wait for login operations before timing out.""" mode: Literal["online", "offline", "shared", "disabled", "dryrun", "run"] = Field( default="online", validate_default=True, ) """The operating mode for W&B logging and synchronization.""" notebook_name: Optional[str] = None """Name of the notebook if running in a Jupyter-like environment.""" program: Optional[str] = None """Path to the script that created the run, if available.""" program_abspath: Optional[str] = None """The absolute path from the root repository directory to the script that created the run. Root repository directory is defined as the directory containing the .git directory, if it exists. Otherwise, it's the current working directory. """ program_relpath: Optional[str] = None """The relative path to the script that created the run.""" project: Optional[str] = None """The W&B project ID.""" quiet: bool = False """Flag to suppress non-essential output.""" reinit: Union[ Literal[ "default", "return_previous", "finish_previous", "create_new", ], bool, ] = "default" """What to do when `wandb.init()` is called while a run is active. Options: - "default": Use "finish_previous" in notebooks and "return_previous" otherwise. - "return_previous": Return the most recently created run that is not yet finished. This does not update `wandb.run`; see the "create_new" option. - "finish_previous": Finish all active runs, then return a new run. - "create_new": Create a new run without modifying other active runs. Does not update `wandb.run` and top-level functions like `wandb.log`. Because of this, some older integrations that rely on the global run will not work. Can also be a boolean, but this is deprecated. False is the same as "return_previous", and True is the same as "finish_previous". """ relogin: bool = False """Flag to force a new login attempt.""" resume: Optional[Literal["allow", "must", "never", "auto"]] = None """Specifies the resume behavior for the run. Options: - "must": Resumes from an existing run with the same ID. If no such run exists, it will result in failure. - "allow": Attempts to resume from an existing run with the same ID. If none is found, a new run will be created. - "never": Always starts a new run. If a run with the same ID already exists, it will result in failure. - "auto": Automatically resumes from the most recent failed run on the same machine. """ resume_from: Optional[RunMoment] = None """Specifies a point in a previous execution of a run to resume from. The point is defined by the run ID, a metric, and its value. Currently, only the metric '_step' is supported. """ resumed: bool = False """Indication from the server about the state of the run. This is different from resume, a user provided flag. <!-- lazydoc-ignore-class-attributes --> """ root_dir: str = Field(default_factory=lambda: os.path.abspath(os.getcwd())) """The root directory to use as the base for all run-related paths. In particular, this is used to derive the wandb directory and the run directory. """ run_group: Optional[str] = None """Group identifier for related runs. Used for grouping runs in the UI. """ run_id: Optional[str] = None """The ID of the run.""" run_job_type: Optional[str] = None """Type of job being run (e.g., training, evaluation).""" run_name: Optional[str] = None """Human-readable name for the run.""" run_notes: Optional[str] = None """Additional notes or description for the run.""" run_tags: Optional[Tuple[str, ...]] = None """Tags to associate with the run for organization and filtering.""" sagemaker_disable: bool = False """Flag to disable SageMaker-specific functionality.""" save_code: Optional[bool] = None """Whether to save the code associated with the run.""" settings_system: Optional[str] = None """Path to the system-wide settings file.""" max_end_of_run_history_metrics: int = 10 """Maximum number of history sparklines to display at the end of a run.""" max_end_of_run_summary_metrics: int = 10 """Maximum number of summary metrics to display at the end of a run.""" show_colors: Optional[bool] = None """Whether to use colored output in the console. <!-- lazydoc-ignore-class-attributes --> """ show_emoji: Optional[bool] = None """Whether to show emoji in the console output. <!-- lazydoc-ignore-class-attributes --> """ show_errors: bool = True """Whether to display error messages.""" show_info: bool = True """Whether to display informational messages.""" show_warnings: bool = True """Whether to display warning messages.""" silent: bool = False """Flag to suppress all output.""" start_method: Optional[str] = None """Method to use for starting subprocesses. This is deprecated and will be removed in a future release. <!-- lazydoc-ignore-class-attributes --> """ strict: Optional[bool] = None """Whether to enable strict mode for validation and error checking.""" summary_timeout: int = 60 """Time in seconds to wait for summary operations before timing out.""" summary_warnings: int = 5 """Maximum number of summary warnings to display. <!-- lazydoc-ignore-class-attributes --> """ sweep_id: Optional[str] = None """Identifier of the sweep this run belongs to.""" sweep_param_path: Optional[str] = None """Path to the sweep parameters configuration.""" symlink: bool = Field( default_factory=lambda: False if platform.system() == "Windows" else True ) """Whether to use symlinks (True by default except on Windows).""" sync_tensorboard: Optional[bool] = None """Whether to synchronize TensorBoard logs with W&B.""" table_raise_on_max_row_limit_exceeded: bool = False """Whether to raise an exception when table row limits are exceeded.""" username: Optional[str] = None """Username.""" # Internal settings. # # These are typically not meant to be set by the user and should not be considered # a part of the public API as they may change or be removed in future versions. x_cli_only_mode: bool = False """Flag to indicate that the SDK is running in CLI-only mode. <!-- lazydoc-ignore-class-attributes --> """ x_disable_meta: bool = False """Flag to disable the collection of system metadata.""" x_disable_stats: bool = False """Flag to disable the collection of system metrics.""" x_disable_viewer: bool = False """Flag to disable the early viewer query. <!-- lazydoc-ignore-class-attributes --> """ x_disable_machine_info: bool = False """Flag to disable automatic machine info collection. <!-- lazydoc-ignore-class-attributes --> """ x_executable: Optional[str] = None """Path to the Python executable. <!-- lazydoc-ignore-class-attributes --> """ x_extra_http_headers: Optional[Dict[str, str]] = None """Additional headers to add to all outgoing HTTP requests.""" x_file_stream_max_bytes: Optional[int] = None """An approximate maximum request size for the filestream API. Its purpose is to prevent HTTP requests from failing due to containing too much data. This number is approximate: requests will be slightly larger. <!-- lazydoc-ignore-class-attributes --> """ x_file_stream_max_line_bytes: Optional[int] = None """Maximum line length for filestream JSONL files. <!-- lazydoc-ignore-class-attributes --> """ x_file_stream_transmit_interval: Optional[float] = None """Interval in seconds between filestream transmissions. <!-- lazydoc-ignore-class-attributes --> """ # Filestream retry client configuration. x_file_stream_retry_max: Optional[int] = None """Max number of retries for filestream operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_stream_retry_wait_min_seconds: Optional[float] = None """Minimum wait time between retries for filestream operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_stream_retry_wait_max_seconds: Optional[float] = None """Maximum wait time between retries for filestream operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_stream_timeout_seconds: Optional[float] = None """Timeout in seconds for individual filestream HTTP requests. <!-- lazydoc-ignore-class-attributes --> """ # file transfer retry client configuration x_file_transfer_retry_max: Optional[int] = None """Max number of retries for file transfer operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_transfer_retry_wait_min_seconds: Optional[float] = None """Minimum wait time between retries for file transfer operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_transfer_retry_wait_max_seconds: Optional[float] = None """Maximum wait time between retries for file transfer operations. <!-- lazydoc-ignore-class-attributes --> """ x_file_transfer_timeout_seconds: Optional[float] = None """Timeout in seconds for individual file transfer HTTP requests. <!-- lazydoc-ignore-class-attributes --> """ x_files_dir: Optional[str] = None """Override setting for the computed files_dir. DEPRECATED, DO NOT USE. This private setting is not respected by wandb-core but will continue to work for some legacy Python code. <!-- lazydoc-ignore-class-attributes --> """ x_flow_control_custom: Optional[bool] = None """Flag indicating custom flow control for filestream. TODO: Not implemented in wandb-core. <!-- lazydoc-ignore-class-attributes --> """ x_flow_control_disabled: Optional[bool] = None """Flag indicating flow control is disabled for filestream. TODO: Not implemented in wandb-core. <!-- lazydoc-ignore-class-attributes --> """ # graphql retry client configuration x_graphql_retry_max: Optional[int] = None """Max number of retries for GraphQL operations. <!-- lazydoc-ignore-class-attributes --> """ x_graphql_retry_wait_min_seconds: Optional[float] = None """Minimum wait time between retries for GraphQL operations. <!-- lazydoc-ignore-class-attributes --> """ x_graphql_retry_wait_max_seconds: Optional[float] = None """Maximum wait time between retries for GraphQL operations. <!-- lazydoc-ignore-class-attributes --> """ x_graphql_timeout_seconds: Optional[float] = None """Timeout in seconds for individual GraphQL requests. <!-- lazydoc-ignore-class-attributes --> """ x_internal_check_process: float = 8.0 """Interval for internal process health checks in seconds. <!-- lazydoc-ignore-class-attributes --> """ x_jupyter_name: Optional[str] = None """Name of the Jupyter notebook. <!-- lazydoc-ignore-class-attributes --> """ x_jupyter_path: Optional[str] = None """Path to the Jupyter notebook. <!-- lazydoc-ignore-class-attributes --> """ x_jupyter_root: Optional[str] = None """Root directory of the Jupyter notebook. <!-- lazydoc-ignore-class-attributes --> """ x_label: Optional[str] = None """Label to assign to system metrics and console logs collected for the run. This is used to group data by on the frontend and can be used to distinguish data from different processes in a distributed training job. """ x_live_policy_rate_limit: Optional[int] = None """Rate limit for live policy updates in seconds. <!-- lazydoc-ignore-class-attributes --> """ x_live_policy_wait_time: Optional[int] = None """Wait time between live policy updates in seconds. <!-- lazydoc-ignore-class-attributes --> """ x_log_level: int = logging.INFO """Logging level for internal operations. <!-- lazydoc-ignore-class-attributes --> """ x_network_buffer: Optional[int] = None """Size of the network buffer used in flow control. TODO: Not implemented in wandb-core. <!-- lazydoc-ignore-class-attributes --> """ x_primary: bool = Field( default=True, validation_alias=AliasChoices("x_primary", "x_primary_node") ) """Determines whether to save internal wandb files and metadata. In a distributed setting, this is useful for avoiding file overwrites from secondary processes when only system metrics and logs are needed, as the primary process handles the main logging. """ x_proxies: Optional[Dict[str, str]] = None """Custom proxy servers for requests to W&B. This is deprecated and will be removed in a future release. Please use `http_proxy` and `https_proxy` instead. <!-- lazydoc-ignore-class-attributes --> """ x_runqueue_item_id: Optional[str] = None """ID of the Launch run queue item being processed. <!-- lazydoc-ignore-class-attributes --> """ x_save_requirements: bool = True """Flag to save the requirements file.""" x_server_side_derived_summary: bool = False """Flag to delegate automatic computation of summary from history to the server. This does not disable user-provided summary updates. """ x_server_side_expand_glob_metrics: bool = True """Flag to delegate glob matching of metrics in define_metric to the server. If the server does not support this, the client will perform the glob matching. <!-- lazydoc-ignore-class-attributes --> """ x_service_transport: Optional[str] = None """Transport method for communication with the wandb service. <!-- lazydoc-ignore-class-attributes --> """ x_service_wait: float = 30.0 """Time in seconds to wait for the wandb-core internal service to start.""" x_skip_transaction_log: bool = False """Whether to skip saving the run events to the transaction log. This is only relevant for online runs. Can be used to reduce the amount of data written to disk. Should be used with caution, as it removes the gurantees about recoverability. """ x_start_time: Optional[float] = None """The start time of the run in seconds since the Unix epoch. <!-- lazydoc-ignore-class-attributes --> """ x_stats_pid: int = os.getpid() """PID of the process that started the wandb-core process to collect system stats for. <!-- lazydoc-ignore-class-attributes --> """ x_stats_sampling_interval: float = Field(default=15.0) """Sampling interval for the system monitor in seconds.""" x_stats_neuron_monitor_config_path: Optional[str] = None """Path to the default config file for the neuron-monitor tool. This is used to monitor AWS Trainium devices. <!-- lazydoc-ignore-class-attributes --> """ x_stats_dcgm_exporter: Optional[str] = None """Endpoint to extract Nvidia DCGM metrics from. Options: - Extract DCGM-related metrics from a query to the Prometheus `/api/v1/query` endpoint. It is a common practice to aggregate metrics reported by the instances of the DCGM Exporter running on different nodes in a cluster using Prometheus. - TODO: Parse metrics directly from the `/metrics` endpoint of the DCGM Exporter. Examples: - `http://localhost:9400/api/v1/query?query=DCGM_FI_DEV_GPU_TEMP{node="l1337", cluster="globular"}`. - TODO: `http://192.168.0.1:9400/metrics`. <!-- lazydoc-ignore-class-attributes --> """ x_stats_open_metrics_endpoints: Optional[Dict[str, str]] = None """OpenMetrics `/metrics` endpoints to monitor for system metrics.""" x_stats_open_metrics_filters: Union[ Dict[str, Dict[str, str]], Sequence[str], None ] = None """Filter to apply to metrics collected from OpenMetrics `/metrics` endpoints. Supports two formats: - `{"metric regex pattern, including endpoint name as prefix": {"label": "label value regex pattern"}}` - `("metric regex pattern 1", "metric regex pattern 2", ...)` """ x_stats_open_metrics_http_headers: Optional[Dict[str, str]] = None """HTTP headers to add to OpenMetrics requests.""" x_stats_disk_paths: Optional[Sequence[str]] = ("/",) """System paths to monitor for disk usage.""" x_stats_cpu_count: Optional[int] = None """System CPU count. If set, overrides the auto-detected value in the run metadata. """ x_stats_cpu_logical_count: Optional[int] = None """Logical CPU count. If set, overrides the auto-detected value in the run metadata. """ x_stats_gpu_count: Optional[int] = None """GPU device count. If set, overrides the auto-detected value in the run metadata. """ x_stats_gpu_type: Optional[str] = None """GPU device type. If set, overrides the auto-detected value in the run metadata. """ x_stats_gpu_device_ids: Optional[Sequence[int]] = None """GPU device indices to monitor. If not set, the system monitor captures metrics for all GPUs. Assumes 0-based indexing matching CUDA/ROCm device enumeration. """ x_stats_buffer_size: int = 0 """Number of system metric samples to buffer in memory in the wandb-core process. Can be accessed via run._system_metrics. <!-- lazydoc-ignore-class-attributes --> """ x_stats_coreweave_metadata_base_url: str = "http://169.254.169.254" """The scheme and hostname for contacting the CoreWeave metadata server. Only accessible from within a CoreWeave cluster. <!-- lazydoc-ignore-class-attributes --> """ x_stats_coreweave_metadata_endpoint: str = "/api/v2/cloud-init/meta-data" """The relative path on the CoreWeave metadata server to which to make requests. This must not include the schema and hostname prefix. Only accessible from within a CoreWeave cluster. <!-- lazydoc-ignore-class-attributes --> """ x_stats_track_process_tree: bool = False """Monitor the entire process tree for resource usage, starting from `x_stats_pid`. When `True`, the system monitor aggregates the RSS, CPU%, and thread count from the process with PID `x_stats_pid` and all of its descendants. This can have a performance overhead and is disabled by default. """ x_sync: bool = False """Flag to indicate whether we are syncing a run from the transaction log. <!-- lazydoc-ignore-class-attributes --> """ x_sync_dir_suffix: str = "" """Suffix to add to the run's directory name (sync_dir). This is set in wandb.init() to avoid naming conflicts. If set, it is joined to the default name with a dash. """ x_update_finish_state: bool = True """Flag to indicate whether this process can update the run's final state on the server. Set to False in distributed training when only the main process should determine the final state. """ # Model validator to catch legacy settings. @model_validator(mode="before") @classmethod def catch_private_settings(cls, values): """Check if a private field is provided and assign to the corresponding public one. This is a compatibility layer to handle previous versions of the settings. <!-- lazydoc-ignore-classmethod: internal --> """ new_values = {} for key in values: # Internal settings are prefixed with "x_" instead of "_" # as Pydantic does not allow "_" in field names. if key.startswith("_"): new_values["x" + key] = values[key] else: new_values[key] = values[key] return new_values if IS_PYDANTIC_V2: @model_validator(mode="after") def validate_mutual_exclusion_of_branching_args(self) -> Self: """Check if `fork_from`, `resume`, and `resume_from` are mutually exclusive. <!-- lazydoc-ignore-classmethod: internal --> """ if ( sum( o is not None for o in [self.fork_from, self.resume, self.resume_from] ) > 1 ): raise ValueError( "`fork_from`, `resume`, or `resume_from` are mutually exclusive. " "Please specify only one of them." ) return self @model_validator(mode="after") def validate_skip_transaction_log(self): """Validate x_skip_transaction_log. <!-- lazydoc-ignore: internal --> """ if self._offline and self.x_skip_transaction_log: raise ValueError("Cannot skip transaction log in offline mode") return self else: @root_validator(pre=False) # type: ignore [call-overload] @classmethod def validate_mutual_exclusion_of_branching_args(cls, values): if ( sum( values.get(o) is not None for o in ["fork_from", "resume", "resume_from"] ) > 1 ): raise ValueError( "`fork_from`, `resume`, or `resume_from` are mutually exclusive. " "Please specify only one of them." ) return values @root_validator(pre=False) # type: ignore [call-overload] @classmethod def validate_skip_transaction_log(cls, values): if values.get("_offline") and values.get("x_skip_transaction_log"): raise ValueError("Cannot skip transaction log in offline mode") return values # Field validators. @field_validator("anonymous", mode="after") @classmethod def validate_anonymous(cls, value: object) -> object: if value is not deprecation.UNSET: wandb.termwarn( "The anonymous setting has no effect and will be removed" + " in a future version.", repeat=False, ) return value @field_validator("api_key", mode="after") @classmethod def validate_api_key(cls, value): """Validate the API key. <!-- lazydoc-ignore-classmethod: internal --> """ if value is not None and (len(value) > len(value.strip())): raise UsageError("API key cannot start or end with whitespace") return value @field_validator("base_url", mode="after") @classmethod def validate_base_url(cls, value): """Validate the base URL. <!-- lazydoc-ignore-classmethod: internal --> """ urls.validate_url(value) # wandb.ai-specific checks if re.match(r".*wandb\.ai[^\.]*$", value) and "api." not in value: # user might guess app.wandb.ai or wandb.ai is the default cloud server raise ValueError( f"{value} is not a valid server address, did you mean https://api.wandb.ai?" ) elif re.match(r".*wandb\.ai[^\.]*$", value) and not value.startswith("https"): raise ValueError("http is not secure, please use https://api.wandb.ai") return value.rstrip("/") @field_validator("code_dir", mode="before") @classmethod def validate_code_dir(cls, value): """Validate the code directory. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("console", mode="after") @classmethod def validate_console(cls, value, values): """Validate the console capture method. <!-- lazydoc-ignore-classmethod: internal --> """ if value != "auto": return value return "wrap" @field_validator("console_chunk_max_bytes", mode="after") @classmethod def validate_console_chunk_max_bytes(cls, value): """Validate the console_chunk_max_bytes value. <!-- lazydoc-ignore-classmethod: internal --> """ if value < 0: raise ValueError("console_chunk_max_bytes must be non-negative") return value @field_validator("console_chunk_max_seconds", mode="after") @classmethod def validate_console_chunk_max_seconds(cls, value): """Validate the console_chunk_max_seconds value. <!-- lazydoc-ignore-classmethod: internal --> """ if value < 0: raise ValueError("console_chunk_max_seconds must be non-negative") return value @field_validator("x_executable", mode="before") @classmethod def validate_x_executable(cls, value): """Validate the Python executable path. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("x_extra_http_headers", mode="before") @classmethod def validate_x_extra_http_headers(cls, value): if isinstance(value, str): return json.loads(value) return value @field_validator("x_file_stream_max_line_bytes", mode="after") @classmethod def validate_file_stream_max_line_bytes(cls, value): """Validate the maximum line length for filestream JSONL files. <!-- lazydoc-ignore-classmethod: internal --> """ if value is not None and value < 1: raise ValueError("File stream max line bytes must be greater than 0") return value @field_validator("x_files_dir", mode="before") @classmethod def validate_x_files_dir(cls, value): """Validate the files directory. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("fork_from", mode="before") @classmethod def validate_fork_from(cls, value, values) -> Optional[RunMoment]: """Validate the fork_from field. <!-- lazydoc-ignore-classmethod: internal --> """ run_moment = cls._runmoment_preprocessor(value) if hasattr(values, "data"): # pydantic v2 values = values.data else: # pydantic v1 values = values if ( run_moment and values.get("run_id") is not None and values.get("run_id") == run_moment.run ): raise ValueError( "Provided `run_id` is the same as the run to `fork_from`. " "Please provide a different `run_id` or remove the `run_id` argument. " "If you want to rewind the current run, please use `resume_from` instead." ) return run_moment @field_validator("http_proxy", mode="after") @classmethod def validate_http_proxy(cls, value): """Validate the HTTP proxy. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None urls.validate_url(value) return value.rstrip("/") @field_validator("https_proxy", mode="after") @classmethod def validate_https_proxy(cls, value): """Validate the HTTPS proxy. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None urls.validate_url(value) return value.rstrip("/") @field_validator("ignore_globs", mode="after") @classmethod def validate_ignore_globs(cls, value): """Validate the ignore globs. <!-- lazydoc-ignore-classmethod: internal --> """ return tuple(value) if not isinstance(value, tuple) else value @field_validator("program", mode="before") @classmethod def validate_program(cls, value): """Validate the program path. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("program_abspath", mode="before") @classmethod def validate_program_abspath(cls, value): """Validate the absolute program path. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("program_relpath", mode="before") @classmethod def validate_program_relpath(cls, value): """Validate the relative program path. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("project", mode="after") @classmethod def validate_project(cls, value, values): """Validate the project name. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None invalid_chars_list = list("/\\#?%:") if len(value) > 128: raise UsageError(f"Invalid project name {value!r}: exceeded 128 characters") invalid_chars = {char for char in invalid_chars_list if char in value} if invalid_chars: raise UsageError( f"Invalid project name {value!r}: " f"cannot contain characters {','.join(invalid_chars_list)!r}, " f"found {','.join(invalid_chars)!r}" ) return value @field_validator("resume", mode="before") @classmethod def validate_resume(cls, value): """Validate the resume behavior. <!-- lazydoc-ignore-classmethod: internal --> """ if value is False: return None if value is True: return "auto" return value @field_validator("resume_from", mode="before") @classmethod def validate_resume_from(cls, value, values) -> Optional[RunMoment]: """Validate the resume_from field. <!-- lazydoc-ignore-classmethod: internal --> """ run_moment = cls._runmoment_preprocessor(value) if hasattr(values, "data"): # pydantic v2 values = values.data else: # pydantic v1 values = values if ( run_moment and values.get("run_id") is not None and values.get("run_id") != run_moment.run ): raise ValueError( "Both `run_id` and `resume_from` have been specified with different ids." ) return run_moment @field_validator("root_dir", mode="before") @classmethod def validate_root_dir(cls, value): """Validate the root directory. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("run_id", mode="after") @classmethod def validate_run_id(cls, value, values): """Validate the run ID. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None if len(value) == 0: raise UsageError("Run ID cannot be empty") if len(value) > len(value.strip()): raise UsageError("Run ID cannot start or end with whitespace") if not bool(value.strip()): raise UsageError("Run ID cannot contain only whitespace") # check if the run id contains any reserved characters reserved_chars = ":;,#?/'" if any(char in reserved_chars for char in value): raise UsageError(f"Run ID cannot contain the characters: {reserved_chars}") return value @field_validator("settings_system", mode="after") @classmethod def validate_settings_system(cls, value): """Validate the system settings file path. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None elif isinstance(value, pathlib.Path): return str(_path_convert(value)) else: return _path_convert(value) @field_validator("x_service_wait", mode="after") @classmethod def validate_service_wait(cls, value): """Validate the service wait time. <!-- lazydoc-ignore-classmethod: internal --> """ if value < 0: raise UsageError("Service wait time cannot be negative") return value @field_validator("start_method", mode="after") @classmethod def validate_start_method(cls, value): """Validate the start method for subprocesses. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return value wandb.termwarn( "`start_method` is deprecated and will be removed in a future version " "of wandb. This setting is currently non-functional and safely ignored.", repeat=False, ) return value @field_validator("x_stats_coreweave_metadata_base_url", mode="after") @classmethod def validate_x_stats_coreweave_metadata_base_url(cls, value): urls.validate_url(value) return value.rstrip("/") @field_validator("x_stats_gpu_device_ids", mode="before") @classmethod def validate_x_stats_gpu_device_ids(cls, value): """Validate the GPU device IDs. <!-- lazydoc-ignore-classmethod: internal --> """ if isinstance(value, str): return json.loads(value) return value @field_validator("x_stats_neuron_monitor_config_path", mode="before") @classmethod def validate_x_stats_neuron_monitor_config_path(cls, value): """Validate the path to the neuron-monitor config file. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value @field_validator("x_stats_open_metrics_endpoints", mode="before") @classmethod def validate_stats_open_metrics_endpoints(cls, value): """Validate the OpenMetrics endpoints. <!-- lazydoc-ignore-classmethod: internal --> """ if isinstance(value, str): return json.loads(value) return value @field_validator("x_stats_open_metrics_filters", mode="before") @classmethod def validate_stats_open_metrics_filters(cls, value): """Validate the OpenMetrics filters. <!-- lazydoc-ignore-classmethod: internal --> """ if isinstance(value, str): return json.loads(value) return value @field_validator("x_stats_open_metrics_http_headers", mode="before") @classmethod def validate_stats_open_metrics_http_headers(cls, value): """Validate the OpenMetrics HTTP headers. <!-- lazydoc-ignore-classmethod: internal --> """ if isinstance(value, str): return json.loads(value) return value @field_validator("x_stats_sampling_interval", mode="after") @classmethod def validate_stats_sampling_interval(cls, value): """Validate the stats sampling interval. <!-- lazydoc-ignore-classmethod: internal --> """ if value < 0.1: raise UsageError("Stats sampling interval cannot be less than 0.1 seconds") return value @field_validator("sweep_id", mode="after") @classmethod def validate_sweep_id(cls, value): """Validate the sweep ID. <!-- lazydoc-ignore-classmethod: internal --> """ if value is None: return None if len(value) == 0: raise UsageError("Sweep ID cannot be empty") if len(value) > len(value.strip()): raise UsageError("Sweep ID cannot start or end with whitespace") if not bool(value.strip()): raise UsageError("Sweep ID cannot contain only whitespace") return value @field_validator("run_tags", mode="before") @classmethod def validate_run_tags(cls, value): """Validate run tags. Validates that each tag: - Is between 1 and 64 characters in length (inclusive) - Converts single string values to tuple format - Preserves None values <!-- lazydoc-ignore-classmethod: internal --> Args: value: A string, list, tuple, or None representing tags Returns: tuple: A tuple of validated tags, or None Raises: ValueError: If any tag is empty or exceeds 64 characters """ if value is None: return None # Convert to tuple if needed if isinstance(value, str): tags = (value,) else: tags = tuple(value) # Validate each tag and accumulate errors errors = [] for i, tag in enumerate(tags): tag_str = str(tag) if len(tag_str) == 0: errors.append( f"Tag at index {i} is empty. Tags must be between 1 and 64 characters" ) elif len(tag_str) > 64: # Truncate long tags for display display_tag = ( f"{tag_str[:20]}...{tag_str[-20:]}" if len(tag_str) > 43 else tag_str ) errors.append( f"Tag '{display_tag}' is {len(tag_str)} characters. Tags must be between 1 and 64 characters" ) # Raise combined error if any validation issues were found if errors: raise ValueError("; ".join(errors)) return tags @field_validator("sweep_param_path", mode="before") @classmethod def validate_sweep_param_path(cls, value): """Validate the sweep parameter path. <!-- lazydoc-ignore-classmethod: internal --> """ # TODO: add native support for pathlib.Path if isinstance(value, pathlib.Path): return str(value) return value # Computed fields. @computed_field # type: ignore[prop-decorator] @property def _args(self) -> List[str]: if not self._jupyter: return sys.argv[1:] return [] @computed_field # type: ignore[prop-decorator] @property def _aws_lambda(self) -> bool: """Check if we are running in a lambda environment.""" from sentry_sdk.integrations.aws_lambda import ( # type: ignore[import-not-found] get_lambda_bootstrap, ) lambda_bootstrap = get_lambda_bootstrap() if not lambda_bootstrap or not hasattr( lambda_bootstrap, "handle_event_request" ): return False return True @computed_field # type: ignore[prop-decorator] @property def _code_path_local(self) -> Optional[str]: """The relative path from the current working directory to the code path. For example, if the code path is /home/user/project/example.py, and the current working directory is /home/user/project, then the code path local is example.py. If couldn't find the relative path, this will be an empty string. """ return self._get_program_relpath(self.program) if self.program else None @computed_field # type: ignore[prop-decorator] @property def _colab(self) -> bool: return "google.colab" in sys.modules @computed_field # type: ignore[prop-decorator] @property def _ipython(self) -> bool: return ipython.in_ipython() @computed_field # type: ignore[prop-decorator] @property def _jupyter(self) -> bool: return ipython.in_jupyter() @computed_field # type: ignore[prop-decorator] @property def _kaggle(self) -> bool: return util._is_likely_kaggle() @computed_field # type: ignore[prop-decorator] @property def _noop(self) -> bool: return self.mode == "disabled" @computed_field # type: ignore[prop-decorator] @property def _notebook(self) -> bool: return self._ipython or self._jupyter or self._colab or self._kaggle @computed_field # type: ignore[prop-decorator] @property def _offline(self) -> bool: return self.mode in ("offline", "dryrun") @computed_field # type: ignore[prop-decorator] @property def _os(self) -> str: """The operating system of the machine running the script.""" return platform.platform(aliased=True) @computed_field # type: ignore[prop-decorator] @property def _platform(self) -> str: return f"{platform.system()}-{platform.machine()}".lower() @computed_field # type: ignore[prop-decorator] @property def _python(self) -> str: return f"{platform.python_implementation()} {platform.python_version()}" @computed_field # type: ignore[prop-decorator] @property def _shared(self) -> bool: """Whether we are in shared mode. In "shared" mode, multiple processes can write to the same run, for example from different machines. """ return self.mode == "shared" @computed_field # type: ignore[prop-decorator] @property def _start_datetime(self) -> str: if self.x_start_time is None: return "" datetime_now = datetime.fromtimestamp(self.x_start_time) return datetime_now.strftime("%Y%m%d_%H%M%S") @computed_field # type: ignore[prop-decorator] @property def _tmp_code_dir(self) -> str: return _path_convert(self.sync_dir, "tmp", "code") @computed_field # type: ignore[prop-decorator] @property def _windows(self) -> bool: return platform.system() == "Windows" @computed_field # type: ignore[prop-decorator] @property def colab_url(self) -> Optional[str]: """The URL to the Colab notebook, if running in Colab.""" if not self._colab: return None if self.x_jupyter_path and self.x_jupyter_path.startswith("fileId="): unescaped = unquote(self.x_jupyter_path) return "https://colab.research.google.com/notebook#" + unescaped return None @computed_field # type: ignore[prop-decorator] @property def deployment(self) -> Literal["local", "cloud"]: return "local" if self.is_local else "cloud" @computed_field # type: ignore[prop-decorator] @property def files_dir(self) -> str: """Absolute path to the local directory where the run's files are stored.""" # Must match the logic in settings.go in the service process. return self.x_files_dir or _path_convert(self.sync_dir, "files") @computed_field # type: ignore[prop-decorator] @property def is_local(self) -> bool: return str(self.base_url) != "https://api.wandb.ai" @computed_field # type: ignore[prop-decorator] @property def log_dir(self) -> str: """The directory for storing log files.""" return _path_convert(self.sync_dir, "logs") @computed_field # type: ignore[prop-decorator] @property def log_internal(self) -> str: """The path to the file to use for internal logs.""" return _path_convert(self.log_dir, "debug-internal.log") @computed_field # type: ignore[prop-decorator] @property def log_symlink_internal(self) -> str: """The path to the symlink to the internal log file of the most recent run.""" return _path_convert(self.wandb_dir, "debug-internal.log") @computed_field # type: ignore[prop-decorator] @property def log_symlink_user(self) -> str: """The path to the symlink to the user-process log file of the most recent run.""" return _path_convert(self.wandb_dir, "debug.log") @computed_field # type: ignore[prop-decorator] @property def log_user(self) -> str: """The path to the file to use for user-process logs.""" return _path_convert(self.log_dir, "debug.log") @computed_field # type: ignore[prop-decorator] @property def project_url(self) -> str: """The W&B URL where the project can be viewed.""" project_url = self._project_url_base() if not project_url: return "" return project_url @computed_field # type: ignore[prop-decorator] @property def resume_fname(self) -> str: """The path to the resume file.""" return _path_convert(self.wandb_dir, "wandb-resume.json") @computed_field # type: ignore[prop-decorator] @property def run_mode(self) -> Literal["run", "offline-run"]: """The mode of the run. Can be either "run" or "offline-run".""" return "run" if not self._offline else "offline-run" @computed_field # type: ignore[prop-decorator] @property def run_url(self) -> str: """The W&B URL where the run can be viewed.""" project_url = self._project_url_base() if not all([project_url, self.run_id]): return "" # Exclude specific safe characters from URL encoding to prevent 404 errors safe_chars = "=+&$@" return f"{project_url}/runs/{quote(self.run_id or '', safe=safe_chars)}" @computed_field # type: ignore[prop-decorator] @property def settings_workspace(self) -> str: """The path to the workspace settings file.""" return _path_convert(self.wandb_dir, "settings") @computed_field # type: ignore[prop-decorator] @property def sweep_url(self) -> str: """The W&B URL where the sweep can be viewed.""" project_url = self._project_url_base() if not all([project_url, self.sweep_id]): return "" return f"{project_url}/sweeps/{quote(self.sweep_id or '')}" @computed_field # type: ignore[prop-decorator] @property def sync_dir(self) -> str: """The directory for storing the run's files.""" name = f"{self.run_mode}-{self.timespec}-{self.run_id}" if self.x_sync_dir_suffix: name += f"-{self.x_sync_dir_suffix}" return _path_convert(self.wandb_dir, name) @computed_field # type: ignore[prop-decorator] @property def sync_file(self) -> str: """Path to the append-only binary transaction log file.""" return _path_convert(self.sync_dir, f"run-{self.run_id}.wandb") @computed_field # type: ignore[prop-decorator] @property def sync_symlink_latest(self) -> str: """Path to the symlink to the most recent run's transaction log file.""" return _path_convert(self.wandb_dir, "latest-run") @computed_field # type: ignore[prop-decorator] @property def timespec(self) -> str: """The time specification for the run.""" return self._start_datetime @computed_field # type: ignore[prop-decorator] @property def wandb_dir(self) -> str: """Full path to the wandb directory.""" stage_dir = ( ".wandb" + os.sep if os.path.exists(os.path.join(self.root_dir, ".wandb")) else "wandb" + os.sep ) path = os.path.join(self.root_dir, stage_dir) return os.path.expanduser(path) # Methods to collect and update settings from different sources. # # The Settings class does not track the source of the settings, # so it is up to the developer to ensure that the settings are applied # in the correct order. Most of the updates are done in # wandb/sdk/wandb_setup.py::_WandbSetup._settings_setup. def update_from_system_config_file(self): """Update settings from the system config file. <!-- lazydoc-ignore: internal --> """ if not self.settings_system or not os.path.exists(self.settings_system): return self._load_config_file(self.settings_system) def update_from_workspace_config_file(self): """Update settings from the workspace config file. <!-- lazydoc-ignore: internal --> """ if not self.settings_workspace or not os.path.exists(self.settings_workspace): return self._load_config_file(self.settings_workspace) def update_from_env_vars(self, environ: Dict[str, Any]): """Update settings from environment variables. <!-- lazydoc-ignore: internal --> """ env_prefix: str = "WANDB_" private_env_prefix: str = env_prefix + "_" special_env_var_names = { "WANDB_SERVICE_TRANSPORT": "x_service_transport", "WANDB_DIR": "root_dir", "WANDB_NAME": "run_name", "WANDB_NOTES": "run_notes", "WANDB_TAGS": "run_tags", "WANDB_JOB_TYPE": "run_job_type", "WANDB_HTTP_TIMEOUT": "x_graphql_timeout_seconds", "WANDB_FILE_PUSHER_TIMEOUT": "x_file_transfer_timeout_seconds", "WANDB_USER_EMAIL": "email", } env = dict() for setting, value in environ.items(): if not setting.startswith(env_prefix): continue if setting in special_env_var_names: key = special_env_var_names[setting] elif setting.startswith(private_env_prefix): key = "x_" + setting[len(private_env_prefix) :].lower() else: # otherwise, strip the prefix and convert to lowercase key = setting[len(env_prefix) :].lower() if key in self.__dict__: if key in ("ignore_globs", "run_tags"): value = value.split(",") env[key] = value for key, value in env.items(): if value is not None: setattr(self, key, value) def update_from_system_environment(self): """Update settings from the system environment. <!-- lazydoc-ignore: internal --> """ # For code saving, only allow env var override if value from server is true, or # if no preference was specified. if (self.save_code is True or self.save_code is None) and ( os.getenv(env.SAVE_CODE) is not None or os.getenv(env.DISABLE_CODE) is not None ): self.save_code = env.should_save_code() if os.getenv(env.DISABLE_GIT) is not None: self.disable_git = env.disable_git() # Attempt to get notebook information if not already set by the user if self._jupyter and (self.notebook_name is None or self.notebook_name == ""): meta = wandb.jupyter.notebook_metadata(self.silent) # type: ignore self.x_jupyter_path = meta.get("path") self.x_jupyter_name = meta.get("name") self.x_jupyter_root = meta.get("root") elif ( self._jupyter and self.notebook_name is not None and os.path.exists(self.notebook_name) ): self.x_jupyter_path = self.notebook_name self.x_jupyter_name = self.notebook_name self.x_jupyter_root = os.getcwd() elif self._jupyter: wandb.termwarn( "WANDB_NOTEBOOK_NAME should be a path to a notebook file, " f"couldn't find {self.notebook_name}.", ) # host is populated by update_from_env_vars if the corresponding env # vars exist -- but if they don't, we'll fill them in here. if self.host is None: self.host = socket.gethostname() # type: ignore _executable = ( self.x_executable or os.environ.get(env._EXECUTABLE) or sys.executable or shutil.which("python3") or "python3" ) self.x_executable = _executable if self.docker is None: self.docker = env.get_docker(util.image_id_from_k8s()) # proceed if not in CLI mode if self.x_cli_only_mode: return program = self.program or self._get_program() if program is not None: self._setup_code_paths(program) else: program = "<python with no main file>" self.program = program def update_from_dict(self, settings: Dict[str, Any]) -> None: """Update settings from a dictionary. <!-- lazydoc-ignore: internal --> """ for key, value in dict(settings).items(): if value is not None: setattr(self, key, value) def update_from_settings(self, settings: Settings) -> None: """Update settings from another instance of `Settings`. <!-- lazydoc-ignore: internal --> """ d = {field: getattr(settings, field) for field in settings.model_fields_set} if d: self.update_from_dict(d) # Helper methods. def to_proto(self) -> wandb_settings_pb2.Settings: """Generate a protobuf representation of the settings. <!-- lazydoc-ignore: internal --> """ settings_proto = wandb_settings_pb2.Settings() for k, v in self.model_dump(exclude_none=True).items(): if k in CLIENT_ONLY_SETTINGS: continue # Special case for x_stats_open_metrics_filters. if k == "x_stats_open_metrics_filters": if isinstance(v, (list, set, tuple)): setting = getattr(settings_proto, k) setting.sequence.value.extend(v) elif isinstance(v, dict): setting = getattr(settings_proto, k) for key, value in v.items(): for kk, vv in value.items(): setting.mapping.value[key].value[kk] = vv else: raise TypeError(f"Unsupported type {type(v)} for setting {k}") continue # Special case for RunMoment fields. if k in ("fork_from", "resume_from"): run_moment = ( v if isinstance(v, RunMoment) else RunMoment( run=v.get("run"), value=v.get("value"), metric=v.get("metric"), ) ) getattr(settings_proto, k).CopyFrom( wandb_settings_pb2.RunMoment( run=run_moment.run, value=run_moment.value, metric=run_moment.metric, ) ) continue if isinstance(v, bool): getattr(settings_proto, k).CopyFrom(BoolValue(value=v)) elif isinstance(v, int): getattr(settings_proto, k).CopyFrom(Int32Value(value=v)) elif isinstance(v, float): getattr(settings_proto, k).CopyFrom(DoubleValue(value=v)) elif isinstance(v, str): getattr(settings_proto, k).CopyFrom(StringValue(value=v)) elif isinstance(v, (list, set, tuple)): # we only support sequences of strings for now sequence = getattr(settings_proto, k) sequence.value.extend(v) elif isinstance(v, dict): mapping = getattr(settings_proto, k) for key, value in v.items(): # we only support dicts with string values for now mapping.value[key] = value elif v is None: # None means that the setting value was not set. pass else: raise TypeError(f"Unsupported type {type(v)} for setting {k}") return settings_proto def _get_program(self) -> Optional[str]: """Get the program that started the current process.""" if self._jupyter: # If in a notebook, try to get the program from the notebook metadata. if self.notebook_name: return self.notebook_name if not self.x_jupyter_path: return self.program if self.x_jupyter_path.startswith("fileId="): return self.x_jupyter_name return self.x_jupyter_path # If not in a notebook, try to get the program from the environment # or the __main__ module for scripts run as `python -m ...`. program = os.getenv(env.PROGRAM) if program is not None: return program try: import __main__ except ImportError: return None try: if __main__.__spec__ is None: python_args = __main__.__file__ else: python_args = f"-m {__main__.__spec__.name}" except AttributeError: return None return python_args @staticmethod def _get_program_relpath(program: str, root: Optional[str] = None) -> Optional[str]: """Get the relative path to the program from the root directory.""" if not program: return None root = root or os.getcwd() if not root: return None # For windows, if the root and program are on different drives, # os.path.relpath will raise a ValueError. if not filesystem.are_paths_on_same_drive( pathlib.Path(root), pathlib.Path(program) ): return None full_path_to_program = os.path.join( root, os.path.relpath(os.getcwd(), root), program ) if os.path.exists(full_path_to_program): relative_path = os.path.relpath(full_path_to_program, start=root) if "../" in relative_path: return None return relative_path return None def _load_config_file( self, file_name: str, section: str = "default", ) -> None: """Load settings from a section in a config file.""" parser = configparser.ConfigParser() parser.add_section(section) parser.read(file_name) key: str value: object for key, value in parser[section].items(): if key == "ignore_globs": value = value.split(",") elif key == "anonymous": wandb.termwarn( f"Deprecated setting 'anonymous' in {file_name} has no" + " effect and will be removed in a future version of wandb." + " Please delete it manually or by running `wandb login`" + " to avoid errors.", repeat=False, ) value = deprecation.UNSET setattr(self, key, value) def _project_url_base(self) -> str: """Construct the base URL for the project.""" if not all([self.entity, self.project]): return "" app_url = util.app_url(self.base_url) return f"{app_url}/{quote(self.entity or '')}/{quote(self.project or '')}" @staticmethod def _runmoment_preprocessor( val: Union[RunMoment, str, None], ) -> Optional[RunMoment]: """Preprocess the setting for forking or resuming a run.""" if isinstance(val, RunMoment) or val is None: return val elif isinstance(val, str): return RunMoment.from_uri(val) if not IS_PYDANTIC_V2: def model_copy(self, *args, **kwargs): return self.copy(*args, **kwargs) def model_dump(self, **kwargs): """Compatibility method for Pydantic v1 to mimic v2's model_dump. In v1, this is equivalent to dict() but also includes computed properties. Args: **kwargs: Options passed to the dict method - exclude_none: Whether to exclude fields with None values Returns: A dictionary of the model's fields and computed properties """ # Handle exclude_none separately since it's named differently in v1 exclude_none = kwargs.pop("exclude_none", False) # Start with regular fields from dict() result = self.dict(**kwargs) # Get all computed properties for name in dir(self.__class__): attr = getattr(self.__class__, name, None) if isinstance(attr, property): try: # Only include properties that don't raise errors value = getattr(self, name) result[name] = value except (AttributeError, NotImplementedError, TypeError, ValueError): # Skip properties that can't be accessed or raise errors pass elif isinstance(attr, RunMoment): value = getattr(self, name) result[name] = value # Special Pydantic attributes that should always be excluded exclude_fields = { "model_config", "model_fields", "model_fields_set", "__fields__", "__model_fields_set", "__pydantic_self__", "__pydantic_initialised__", } # Remove special Pydantic attributes for field in exclude_fields: if field in result: del result[field] if exclude_none: # Remove None values from the result return {k: v for k, v in result.items() if v is not None} return result @property def model_fields_set(self) -> set: """Return a set of fields that have been explicitly set. This is a compatibility property for Pydantic v1 to mimic v2's model_fields_set. """ return getattr(self, "__fields_set__", set()) def _setup_code_paths(self, program: str): """Sets the program_abspath and program_relpath settings.""" if self._jupyter and self.x_jupyter_root: self._infer_code_paths_for_jupyter(program) else: self._infer_code_path_for_program(program) def _infer_code_path_for_program(self, program: str): """Finds the program's absolute and relative paths.""" from .lib.gitlib import GitRepo try: root = ( GitRepo().root or os.getcwd() if not self.disable_git else os.getcwd() ) except Exception: # if the git command fails, fall back to the current working directory root = os.getcwd() self.program_relpath = self.program_relpath or self._get_program_relpath( program, root ) program_abspath = os.path.abspath( os.path.join(root, os.path.relpath(os.getcwd(), root), program) ) if os.path.exists(program_abspath): self.program_abspath = program_abspath def _infer_code_paths_for_jupyter(self, program: str): """Find the notebook's absolute and relative paths. Since the notebook's execution environment is not the same as the current working directory. We utilize the metadata provided by the jupyter server. """ if not self.x_jupyter_root or not program: return None self.program_abspath = os.path.abspath( os.path.join(self.x_jupyter_root, program) ) self.program_relpath = program
Settings
python
kamyu104__LeetCode-Solutions
Python/clone-binary-tree-with-random-pointer.py
{ "start": 1647, "end": 2738 }
class ____(object): def copyRandomBinaryTree(self, root): """ :type root: Node :rtype: NodeCopy """ def dfs(node, callback): if not node: return None left_node, copy = callback(node) dfs(left_node, callback) dfs(node.right, callback) return copy def merge(node): copy = NodeCopy(node.val) node.left, copy.left = copy, node.left return copy.left, copy def clone(node): copy = node.left node.left.random = node.random.left if node.random else None node.left.right = node.right.left if node.right else None return copy.left, copy def split(node): copy = node.left node.left, copy.left = copy.left, copy.left.left if copy.left else None return node.left, copy dfs(root, merge) dfs(root, clone) return dfs(root, split) # Time: O(n) # Space: O(n) import collections
Solution_Recu
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py
{ "start": 1515, "end": 1663 }
class ____: @check_permission("fake_permission") async def mutate(self, graphene_info: ResolveInfo, **_kwargs): pass
FakeMutationAsync
python
sympy__sympy
sympy/assumptions/predicates/sets.py
{ "start": 1000, "end": 1677 }
class ____(Predicate): """ Rational number predicate. Explanation =========== ``Q.rational(x)`` is true iff ``x`` belongs to the set of rational numbers. Examples ======== >>> from sympy import ask, Q, pi, S >>> ask(Q.rational(0)) True >>> ask(Q.rational(S(1)/2)) True >>> ask(Q.rational(pi)) False References ========== .. [1] https://en.wikipedia.org/wiki/Rational_number """ name = 'rational' handler = Dispatcher( "RationalHandler", doc=("Handler for Q.rational.\n\n" "Test that an expression belongs to the field of rational numbers.") )
RationalPredicate
python
cherrypy__cherrypy
cherrypy/_cplogging.py
{ "start": 16712, "end": 17031 }
class ____(object): """A postponed timestamp string retrieval class.""" def __str__(self): """Return datetime in RFC3339 UTC Format.""" iso_formatted_now = datetime.datetime.now( datetime.timezone.utc, ).isoformat('T') return f'{iso_formatted_now!s}Z'
LazyRfc3339UtcTime
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/strategies/_internal/core.py
{ "start": 78485, "end": 78955 }
class ____(SearchStrategy): def __init__(self, definition, args, kwargs): super().__init__() self.definition = definition self.args = args self.kwargs = kwargs def do_draw(self, data): return self.definition(data.draw, *self.args, **self.kwargs) def calc_label(self) -> int: return combine_labels( self.class_label, calc_label_from_callable(self.definition), )
CompositeStrategy
python
faif__python-patterns
patterns/structural/proxy.py
{ "start": 1144, "end": 1382 }
class ____(Subject): """ This is the main job doer. External services like payment gateways can be a good example. """ def do_the_job(self, user: str) -> None: print(f"I am doing the job for {user}")
RealSubject
python
getsentry__sentry
tests/sentry/api/bases/test_organization.py
{ "start": 24278, "end": 27857 }
class ____(BaseOrganizationEndpointTest): def setUp(self) -> None: self.team_1 = self.create_team(organization=self.org) self.project_1 = self.create_project(organization=self.org, teams=[self.team_1]) self.project_2 = self.create_project(organization=self.org, teams=[self.team_1]) self.env_1 = self.create_environment(project=self.project_1) self.env_2 = self.create_environment(project=self.project_1) def run_test( self, expected_projects, expected_envs=None, expected_teams=None, expected_start=None, expected_end=None, env_names=None, user=None, date_filter_optional=False, project_ids=None, start=None, end=None, stats_period=None, active_superuser=False, ): request_args = {} if env_names: request_args["environment"] = env_names if project_ids: request_args["project"] = project_ids if start and end: request_args["start"] = start request_args["end"] = end if stats_period: request_args["statsPeriod"] = stats_period result = self.endpoint.get_filter_params( self.build_request(user=user, active_superuser=active_superuser, **request_args), self.org, date_filter_optional=date_filter_optional, ) assert {p.id for p in expected_projects} == set(result["project_id"]) assert expected_start == result["start"] assert expected_end == result["end"] if expected_envs: assert {e.name for e in expected_envs} == set(result["environment"]) else: assert "environment" not in result @freeze_time("2018-12-11 03:21:34") def test_no_params(self) -> None: with pytest.raises(NoProjects): self.run_test([]) self.run_test( expected_projects=[self.project_1, self.project_2], expected_start=timezone.now() - MAX_STATS_PERIOD, expected_end=timezone.now(), user=self.user, active_superuser=True, ) self.run_test( expected_projects=[self.project_1, self.project_2], expected_start=None, expected_end=None, user=self.user, date_filter_optional=True, active_superuser=True, ) def test_params(self) -> None: start = timezone.now() - timedelta(days=3) end = timezone.now() self.create_team_membership(user=self.user, team=self.team_1) self.run_test( expected_projects=[self.project_1, self.project_2], project_ids=[self.project_1.id, self.project_2.id], expected_envs=[self.env_1, self.env_2], env_names=[self.env_1.name, self.env_2.name], expected_start=start, expected_end=end, start=start.replace(tzinfo=None).isoformat(), end=end.replace(tzinfo=None).isoformat(), ) with freeze_time("2018-12-11 03:21:34"): self.run_test( expected_projects=[self.project_1, self.project_2], project_ids=[self.project_1.id, self.project_2.id], expected_envs=[self.env_1, self.env_2], env_names=[self.env_1.name, self.env_2.name], expected_start=timezone.now() - timedelta(days=2), expected_end=timezone.now(), stats_period="2d", )
GetFilterParamsTest
python
scrapy__scrapy
tests/test_robotstxt_interface.py
{ "start": 6130, "end": 6374 }
class ____(BaseRobotParserTest): def setup_method(self): super()._setUp(RerpRobotParser) def test_length_based_precedence(self): pytest.skip("Rerp does not support length based directives precedence.")
TestRerpRobotParser
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/suite/test_types.py
{ "start": 22086, "end": 22512 }
class ____(_DateFixture, fixtures.TablesTest): __requires__ = ("timestamp_microseconds",) __backend__ = True datatype = TIMESTAMP data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) @testing.requires.timestamp_microseconds_implicit_bound def test_select_direct(self, connection): result = connection.scalar(select(literal(self.data))) eq_(result, self.data)
TimestampMicrosecondsTest
python
kamyu104__LeetCode-Solutions
Python/find-mirror-score-of-a-string.py
{ "start": 71, "end": 472 }
class ____(object): def calculateScore(self, s): """ :type s: str :rtype: int """ result = 0 lookup = [[] for _ in xrange(26)] for i, x in enumerate(s): x = ord(x)-ord('a') if lookup[25-x]: result += i-lookup[25-x].pop() else: lookup[x].append(i) return result
Solution
python
pyca__cryptography
tests/x509/verification/test_verification.py
{ "start": 3914, "end": 4192 }
class ____: def test_store_rejects_empty_list(self): with pytest.raises(ValueError): Store([]) def test_store_rejects_non_certificates(self): with pytest.raises(TypeError): Store(["not a cert"]) # type: ignore[list-item]
TestStore
python
django__django
tests/admin_widgets/tests.py
{ "start": 14761, "end": 15603 }
class ____(SimpleTestCase): def test_attrs(self): w = widgets.AdminTimeWidget() self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<p class="time">' '<input aria-describedby="id_test_timezone_warning_helptext" ' 'value="09:30:00" type="text" class="vTimeField" name="test" ' 'size="8"></p>', ) # pass attrs to widget w = widgets.AdminTimeWidget(attrs={"size": 20, "class": "myTimeField"}) self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<p class="time">' '<input aria-describedby="id_test_timezone_warning_helptext" ' 'value="09:30:00" type="text" class="myTimeField" name="test" ' 'size="20"></p>', )
AdminTimeWidgetTest
python
pandas-dev__pandas
pandas/tests/arithmetic/test_object.py
{ "start": 2421, "end": 12031 }
class ____: def test_add_period_to_array_of_offset(self): # GH#50162 per = pd.Period("2012-1-1", freq="D") pi = pd.period_range("2012-1-1", periods=10, freq="D") idx = per - pi expected = pd.Index([x + per for x in idx], dtype=object) result = idx + per tm.assert_index_equal(result, expected) result = per + idx tm.assert_index_equal(result, expected) # TODO: parametrize def test_pow_ops_object(self): # GH#22922 # pow is weird with masking & 1, so testing here a = Series([1, np.nan, 1, np.nan], dtype=object) b = Series([1, np.nan, np.nan, 1], dtype=object) result = a**b expected = Series(a.values**b.values, dtype=object) tm.assert_series_equal(result, expected) result = b**a expected = Series(b.values**a.values, dtype=object) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("op", [operator.add, ops.radd]) @pytest.mark.parametrize("other", ["category", "Int64"]) def test_add_extension_scalar(self, other, box_with_array, op): # GH#22378 # Check that scalars satisfying is_extension_array_dtype(obj) # do not incorrectly try to dispatch to an ExtensionArray operation arr = Series(["a", "b", "c"]) expected = Series([op(x, other) for x in arr]) arr = tm.box_expected(arr, box_with_array) expected = tm.box_expected(expected, box_with_array) result = op(arr, other) tm.assert_equal(result, expected) def test_objarr_add_str(self, box_with_array): ser = Series(["x", np.nan, "x"]) expected = Series(["xa", np.nan, "xa"]) ser = tm.box_expected(ser, box_with_array) expected = tm.box_expected(expected, box_with_array) result = ser + "a" tm.assert_equal(result, expected) def test_objarr_radd_str(self, box_with_array): ser = Series(["x", np.nan, "x"]) expected = Series(["ax", np.nan, "ax"]) ser = tm.box_expected(ser, box_with_array) expected = tm.box_expected(expected, box_with_array) result = "a" + ser tm.assert_equal(result, expected) @pytest.mark.parametrize( "data", [ [1, 2, 3], [1.1, 2.2, 3.3], [Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT], ["x", "y", 1], ], ) @pytest.mark.parametrize("dtype", [None, object]) def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): ser = Series(data, dtype=dtype) ser = tm.box_expected(ser, box_with_array) msg = "|".join( [ "can only concatenate str", "did not contain a loop with signature matching types", "unsupported operand type", "must be str", ] ) with pytest.raises(TypeError, match=msg): "foo_" + ser @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) def test_objarr_add_invalid(self, op, box_with_array): # invalid ops box = box_with_array obj_ser = Series(list("abc"), dtype=object, name="objects") obj_ser = tm.box_expected(obj_ser, box) msg = "|".join( [ "can only concatenate str", "unsupported operand type", "must be str", "has no kernel", ] ) with pytest.raises(Exception, match=msg): op(obj_ser, 1) with pytest.raises(Exception, match=msg): op(obj_ser, np.array(1, dtype=np.int64)) # TODO: Moved from tests.series.test_operators; needs cleanup def test_operators_na_handling(self): ser = Series(["foo", "bar", "baz", np.nan]) result = "prefix_" + ser expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan]) tm.assert_series_equal(result, expected) result = ser + "_suffix" expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan]) tm.assert_series_equal(result, expected) # TODO: parametrize over box @pytest.mark.parametrize("dtype", [None, object]) def test_series_with_dtype_radd_timedelta(self, dtype): # note this test is _not_ aimed at timedelta64-dtyped Series # as of 2.0 we retain object dtype when ser.dtype == object ser = Series( [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")], dtype=dtype, ) expected = Series( [pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")], dtype=dtype, ) result = pd.Timedelta("3 days") + ser tm.assert_series_equal(result, expected) result = ser + pd.Timedelta("3 days") tm.assert_series_equal(result, expected) # TODO: cleanup & parametrize over box def test_mixed_timezone_series_ops_object(self): # GH#13043 ser = Series( [ Timestamp("2015-01-01", tz="US/Eastern"), Timestamp("2015-01-01", tz="Asia/Tokyo"), ], name="xxx", ) assert ser.dtype == object exp = Series( [ Timestamp("2015-01-02", tz="US/Eastern"), Timestamp("2015-01-02", tz="Asia/Tokyo"), ], name="xxx", ) tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp) tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp) # object series & object series ser2 = Series( [ Timestamp("2015-01-03", tz="US/Eastern"), Timestamp("2015-01-05", tz="Asia/Tokyo"), ], name="xxx", ) assert ser2.dtype == object exp = Series( [pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx", dtype=object ) tm.assert_series_equal(ser2 - ser, exp) tm.assert_series_equal(ser - ser2, -exp) ser = Series( [pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")], name="xxx", dtype=object, ) assert ser.dtype == object exp = Series( [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], name="xxx", dtype=object, ) tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp) tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp) # TODO: cleanup & parametrize over box def test_iadd_preserves_name(self): # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name ser = Series([1, 2, 3]) ser.index.name = "foo" ser.index += 1 assert ser.index.name == "foo" ser.index -= 1 assert ser.index.name == "foo" def test_add_string(self): # from bug report index = pd.Index(["a", "b", "c"]) index2 = index + "foo" assert "a" not in index2 assert "afoo" in index2 def test_iadd_string(self): index = pd.Index(["a", "b", "c"]) # doesn't fail test unless there is a check before `+=` assert "a" in index index += "_x" assert "a_x" in index def test_add(self): index = pd.Index([str(i) for i in range(10)]) expected = pd.Index(index.values * 2) tm.assert_index_equal(index + index, expected) tm.assert_index_equal(index + index.tolist(), expected) tm.assert_index_equal(index.tolist() + index, expected) # test add and radd index = pd.Index(list("abc")) expected = pd.Index(["a1", "b1", "c1"]) tm.assert_index_equal(index + "1", expected) expected = pd.Index(["1a", "1b", "1c"]) tm.assert_index_equal("1" + index, expected) def test_sub_fail(self): index = pd.Index([str(i) for i in range(10)]) msg = "unsupported operand type|Cannot broadcast|sub' not supported" with pytest.raises(TypeError, match=msg): index - "a" with pytest.raises(TypeError, match=msg): index - index with pytest.raises(TypeError, match=msg): index - index.tolist() with pytest.raises(TypeError, match=msg): index.tolist() - index def test_sub_object(self): # GH#19369 index = pd.Index([Decimal(1), Decimal(2)]) expected = pd.Index([Decimal(0), Decimal(1)]) result = index - Decimal(1) tm.assert_index_equal(result, expected) result = index - pd.Index([Decimal(1), Decimal(1)]) tm.assert_index_equal(result, expected) msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): index - "foo" with pytest.raises(TypeError, match=msg): index - np.array([2, "foo"], dtype=object) def test_rsub_object(self, fixed_now_ts): # GH#19369 index = pd.Index([Decimal(1), Decimal(2)]) expected = pd.Index([Decimal(1), Decimal(0)]) result = Decimal(2) - index tm.assert_index_equal(result, expected) result = np.array([Decimal(2), Decimal(2)]) - index tm.assert_index_equal(result, expected) msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): "foo" - index with pytest.raises(TypeError, match=msg): np.array([True, fixed_now_ts]) - index
TestArithmetic
python
keras-team__keras
keras/src/metrics/probabilistic_metrics.py
{ "start": 1692, "end": 2759 }
class ____(reduction_metrics.MeanMetricWrapper): """Computes the Poisson metric between `y_true` and `y_pred`. Formula: ```python metric = y_pred - y_true * log(y_pred) ``` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Examples: >>> m = keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result() 0.99999994 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[keras.metrics.Poisson()]) ``` """ def __init__(self, name="poisson", dtype=None): super().__init__(fn=poisson, name=name, dtype=dtype) def get_config(self): return {"name": self.name, "dtype": self.dtype} @keras_export("keras.metrics.BinaryCrossentropy")
Poisson
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_query.py
{ "start": 32754, "end": 42674 }
class ____(fixtures.TablesTest, AssertsCompiledSQL): __only_on__ = "postgresql >= 8.3" __backend__ = True @classmethod def define_tables(cls, metadata): Table( "cattable", metadata, Column("id", Integer, primary_key=True), Column("description", String(50)), ) Table( "matchtable", metadata, Column("id", Integer, primary_key=True), Column("title", String(200)), Column("category_id", Integer, ForeignKey("cattable.id")), ) @classmethod def insert_data(cls, connection): cattable, matchtable = cls.tables("cattable", "matchtable") connection.execute( cattable.insert(), [ {"id": 1, "description": "Python"}, {"id": 2, "description": "Ruby"}, ], ) connection.execute( matchtable.insert(), [ { "id": 1, "title": "Agile Web Development with Rails", "category_id": 2, }, {"id": 2, "title": "Dive Into Python", "category_id": 1}, { "id": 3, "title": "Programming Matz's Ruby", "category_id": 2, }, { "id": 4, "title": "The Definitive Guide to Django", "category_id": 1, }, {"id": 5, "title": "Python in a Nutshell", "category_id": 1}, ], ) def _strs_render_bind_casts(self, connection): return ( connection.dialect._bind_typing_render_casts and String().dialect_impl(connection.dialect).render_bind_cast ) @testing.requires.pyformat_paramstyle def test_expression_pyformat(self, connection): matchtable = self.tables.matchtable if self._strs_render_bind_casts(connection): self.assert_compile( matchtable.c.title.match("somstr"), "matchtable.title @@ plainto_tsquery(%(title_1)s::VARCHAR)", ) else: self.assert_compile( matchtable.c.title.match("somstr"), "matchtable.title @@ plainto_tsquery(%(title_1)s)", ) @testing.only_if("+asyncpg") def test_expression_positional(self, connection): matchtable = self.tables.matchtable if self._strs_render_bind_casts(connection): self.assert_compile( matchtable.c.title.match("somstr"), "matchtable.title @@ plainto_tsquery($1::VARCHAR)", ) else: self.assert_compile( matchtable.c.title.match("somstr"), "matchtable.title @@ plainto_tsquery($1)", ) @testing.combinations( (func.to_tsvector,), (func.to_tsquery,), (func.plainto_tsquery,), (func.phraseto_tsquery,), (func.websearch_to_tsquery, testing.skip_if("postgresql < 11")), argnames="to_ts_func", ) @testing.variation("use_regconfig", [True, False, "literal"]) def test_to_regconfig_fns(self, connection, to_ts_func, use_regconfig): """test #8977""" matchtable = self.tables.matchtable fn_name = to_ts_func().name if use_regconfig.literal: regconfig = literal("english", REGCONFIG) elif use_regconfig: regconfig = "english" else: regconfig = None if regconfig is None: if fn_name == "to_tsvector": fn = to_ts_func(matchtable.c.title).match("python") else: fn = func.to_tsvector(matchtable.c.title).op("@@")( to_ts_func("python") ) else: if fn_name == "to_tsvector": fn = to_ts_func(regconfig, matchtable.c.title).match("python") else: fn = func.to_tsvector(matchtable.c.title).op("@@")( to_ts_func(regconfig, "python") ) stmt = matchtable.select().where(fn).order_by(matchtable.c.id) results = connection.execute(stmt).fetchall() eq_([2, 5], [r.id for r in results]) @testing.variation("use_regconfig", [True, False, "literal"]) @testing.variation("include_options", [True, False]) def test_ts_headline(self, connection, use_regconfig, include_options): """test #8977""" if use_regconfig.literal: regconfig = literal("english", REGCONFIG) elif use_regconfig: regconfig = "english" else: regconfig = None text = ( "The most common type of search is to find all documents " "containing given query terms and return them in order of " "their similarity to the query." ) tsquery = func.to_tsquery("english", "query & similarity") if regconfig is None: if include_options: fn = func.ts_headline( text, tsquery, "MaxFragments=10, MaxWords=7, MinWords=3, " "StartSel=<<, StopSel=>>", ) else: fn = func.ts_headline( text, tsquery, ) else: if include_options: fn = func.ts_headline( regconfig, text, tsquery, "MaxFragments=10, MaxWords=7, MinWords=3, " "StartSel=<<, StopSel=>>", ) else: fn = func.ts_headline( regconfig, text, tsquery, ) stmt = select(fn) if include_options: eq_( connection.scalar(stmt), "documents containing given <<query>> terms and return ... " "their <<similarity>> to the <<query>>", ) else: eq_( connection.scalar(stmt), "containing given <b>query</b> terms and return them in " "order of their <b>similarity</b> to the <b>query</b>.", ) def test_simple_match(self, connection): matchtable = self.tables.matchtable results = connection.execute( matchtable.select() .where(matchtable.c.title.match("python")) .order_by(matchtable.c.id) ).fetchall() eq_([2, 5], [r.id for r in results]) def test_not_match(self, connection): matchtable = self.tables.matchtable results = connection.execute( matchtable.select() .where(~matchtable.c.title.match("python")) .order_by(matchtable.c.id) ).fetchall() eq_([1, 3, 4], [r.id for r in results]) def test_simple_match_with_apostrophe(self, connection): matchtable = self.tables.matchtable results = connection.execute( matchtable.select().where(matchtable.c.title.match("Matz's")) ).fetchall() eq_([3], [r.id for r in results]) def test_simple_derivative_match(self, connection): matchtable = self.tables.matchtable results = connection.execute( matchtable.select().where(matchtable.c.title.match("nutshells")) ).fetchall() eq_([5], [r.id for r in results]) def test_or_match(self, connection): matchtable = self.tables.matchtable results1 = connection.execute( matchtable.select() .where( or_( matchtable.c.title.match("nutshells"), matchtable.c.title.match("rubies"), ) ) .order_by(matchtable.c.id) ).fetchall() eq_([3, 5], [r.id for r in results1]) def test_or_tsquery(self, connection): matchtable = self.tables.matchtable results2 = connection.execute( matchtable.select() .where( matchtable.c.title.bool_op("@@")( func.to_tsquery("nutshells | rubies") ) ) .order_by(matchtable.c.id) ).fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self, connection): matchtable = self.tables.matchtable results1 = connection.execute( matchtable.select().where( and_( matchtable.c.title.match("python"), matchtable.c.title.match("nutshells"), ) ) ).fetchall() eq_([5], [r.id for r in results1]) def test_and_tsquery(self, connection): matchtable = self.tables.matchtable results2 = connection.execute( matchtable.select().where( matchtable.c.title.op("@@")( func.to_tsquery("python & nutshells") ) ) ).fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self, connection): cattable, matchtable = self.tables("cattable", "matchtable") results = connection.execute( matchtable.select() .where( and_( cattable.c.id == matchtable.c.category_id, or_( cattable.c.description.match("Ruby"), matchtable.c.title.match("nutshells"), ), ) ) .order_by(matchtable.c.id) ).fetchall() eq_([1, 3, 5], [r.id for r in results])
MatchTest
python
django__django
django/db/models/functions/datetime.py
{ "start": 4759, "end": 4946 }
class ____(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = "week_day"
ExtractWeekDay
python
google__jax
tests/pallas/tpu_splash_attention_kernel_test.py
{ "start": 10252, "end": 26638 }
class ____(PallasBaseTest): @parameterized.product( is_mqa=(False, True), is_segmented=(False, True), is_dynamic_mask=(False, True), ) @hp.given(hps.data()) def test_splash_attention(self, is_mqa, is_segmented, is_dynamic_mask, data): seed = data.draw(seed_strategy()) key = random.key(seed) k1, k2, k3 = random.split(key, 3) ( q_seq_len, kv_seq_len, num_q_heads, num_kv_heads, head_dim_qk, head_dim_v, dtype, ) = data.draw(mha_strategy()) # Avoid segment ids for rectangular matrices, as its hard to enforce # valid masks (non-0 rows). hp.assume(q_seq_len == kv_seq_len or not is_segmented) q = random.uniform(k1, (num_q_heads, q_seq_len, head_dim_qk), dtype=dtype) if is_mqa: k = random.uniform(k2, (kv_seq_len, head_dim_qk), dtype=dtype) v = random.uniform(k3, (kv_seq_len, head_dim_v), dtype=dtype) else: k = random.uniform( k2, (num_kv_heads, kv_seq_len, head_dim_qk), dtype=dtype ) v = random.uniform( k3, (num_kv_heads, kv_seq_len, head_dim_v), dtype=dtype ) segment_ids = None if is_segmented: assert q_seq_len == kv_seq_len segment_ids = data.draw(segment_ids_strategy(q_seq_len)) attn_logits_soft_cap = data.draw(attn_logits_soft_cap_strategy()) masks = data.draw(mha_mask_strategy(q_seq_len, kv_seq_len, num_q_heads)) mask = mask_lib.MultiHeadMask(tuple(m.get_mask() for m in masks)) if is_dynamic_mask: mask = jnp.array(mask[:, :, :]) block_sizes = data.draw(block_sizes_strategy(q_seq_len, kv_seq_len)) if is_mqa: attn_ref = splash.make_masked_mqa_reference(mask) attn = splash.make_splash_mqa_single_device( mask, block_sizes=block_sizes, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) else: attn_ref = splash.make_masked_mha_reference(mask) attn = splash.make_splash_mha_single_device( mask, block_sizes=block_sizes, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) o = attn(q, k, v, segment_ids) o_ref = attn_ref( q.astype(np.float32), k.astype(np.float32), v.astype(np.float32), segment_ids, attn_logits_soft_cap=attn_logits_soft_cap, ) self._assert_allclose(o, o_ref, atol=3e-3, rtol=3e-3) @parameterized.product( is_mqa=(False, True), is_segmented=(False, True), is_dynamic_mask=(False, True), ) @hp.given(hps.data()) def test_splash_attention_fwd( self, is_mqa, is_segmented, is_dynamic_mask, data ): seed = data.draw(seed_strategy()) key = random.key(seed) k1, k2, k3 = random.split(key, 3) ( q_seq_len, kv_seq_len, num_q_heads, num_kv_heads, head_dim_qk, head_dim_v, dtype, ) = data.draw(mha_strategy()) # Avoid segment ids for rectangular matrices, as its hard to enforce # valid masks (non-0 rows). hp.assume(q_seq_len == kv_seq_len or not is_segmented) q = random.uniform(k1, (num_q_heads, q_seq_len, head_dim_qk), dtype=dtype) if is_mqa: k = random.uniform(k2, (kv_seq_len, head_dim_qk), dtype=dtype) v = random.uniform(k3, (kv_seq_len, head_dim_v), dtype=dtype) else: k = random.uniform( k2, (num_kv_heads, kv_seq_len, head_dim_qk), dtype=dtype ) v = random.uniform( k3, (num_kv_heads, kv_seq_len, head_dim_v), dtype=dtype ) segment_ids = None if is_segmented: assert q_seq_len == kv_seq_len segment_ids = data.draw(segment_ids_strategy(q_seq_len)) attn_logits_soft_cap = data.draw(attn_logits_soft_cap_strategy()) masks = data.draw(mha_mask_strategy(q_seq_len, kv_seq_len, num_q_heads)) mask = mask_lib.MultiHeadMask(tuple(m.get_mask() for m in masks)) if is_dynamic_mask: mask = jnp.array(mask[:, :, :]) block_sizes = data.draw(block_sizes_strategy(q_seq_len, kv_seq_len)) if is_mqa: attn_ref = splash.make_masked_mqa_reference(mask) attn = splash.make_splash_mqa_single_device( mask, block_sizes=block_sizes, save_residuals=True, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) else: attn_ref = splash.make_masked_mha_reference(mask) attn = splash.make_splash_mha_single_device( mask, block_sizes=block_sizes, save_residuals=True, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) attn_ref = partial( attn_ref, save_residuals=True, attn_logits_soft_cap=attn_logits_soft_cap, ) o, (logsumexp,) = attn(q, k, v, segment_ids) o_ref, (logsumexp_ref,) = attn_ref( q.astype(jnp.float32), k.astype(jnp.float32), v.astype(jnp.float32), segment_ids, ) self._assert_allclose(o, o_ref, atol=3e-3, rtol=3e-3) self._assert_allclose(logsumexp, logsumexp_ref, atol=1e-3, rtol=1e-3) @parameterized.product( is_segmented=(False, True), ) @hp.given(hps.data()) def test_splash_attention_custom_bwd(self, is_segmented, data): seed = data.draw(seed_strategy(), label="seed") key = random.key(1 + seed) k1, k2, k3, k4 = random.split(key, 4) q_seq_len, kv_seq_len, head_dim_qk, head_dim_v, dtype = data.draw( attention_strategy() ) # Avoid segment ids for rectangular matrices, as it's hard to enforce # valid masks (non-0 rows). hp.assume(q_seq_len == kv_seq_len or not is_segmented) q = random.uniform(k1, (q_seq_len, head_dim_qk), dtype=dtype) k = random.uniform(k2, (kv_seq_len, head_dim_qk), dtype=dtype) v = random.uniform(k3, (kv_seq_len, head_dim_v), dtype=dtype) segment_ids = None if is_segmented: assert q_seq_len == kv_seq_len segment_ids = data.draw(segment_ids_strategy(q_seq_len)) masks = data.draw(mha_mask_strategy(q_seq_len, kv_seq_len, 1)) mask = jnp.array(masks[0].get_mask()[:, :]) attn_logits_soft_cap = data.draw(attn_logits_soft_cap_strategy(), label="logit_cap") attn_ref = partial(splash.attention_reference, mask, attn_logits_soft_cap=attn_logits_soft_cap) attn_custom = partial(splash.attention_reference_custom, mask, attn_logits_soft_cap=attn_logits_soft_cap) attn_custom_vanilla = partial(splash.attention_reference_custom, mask, custom_type="vanilla", attn_logits_soft_cap=attn_logits_soft_cap) o_ref, attn_vjp_ref = jax.vjp(attn_ref, q, k, v, segment_ids) q32, k32, v32 = jax.tree.map(lambda x: x.astype(jnp.float32), (q, k, v)) o_custom = attn_custom(q32, k32, v32, segment_ids) _, attn_vjp = jax.vjp(attn_custom, q32, k32, v32, segment_ids) _, attn_vanilla_vjp = jax.vjp(attn_custom_vanilla, q32, k32, v32, segment_ids) do = random.uniform(k4, o_custom.shape, dtype=o_custom.dtype) / 10. # These should be identical self._assert_allclose(o_custom, o_ref, atol=1e-5, rtol=1e-5) dq, dk, dv, _ = attn_vjp(do) dq_vanilla, dk_vanilla, dv_vanilla, _ = attn_vanilla_vjp(do) dq_ref, dk_ref, dv_ref, _ = attn_vjp_ref(do) # These will be different because of reassociation if dtype == jnp.bfloat16: atols = {"dv": 4e-3, "dq": 0.05, "dk": 0.05} atols_v = {"dv": 8e-3, "dq": 2e-2, "dk": 2e-2} rtols = {"dv": 4e-3, "dq": 0.05, "dk": 0.05} rtols_v = {"dv": 8e-3, "dq": 2e-2, "dk": 2e-2} if jtu.is_device_tpu(version=5): atols["dk"] = 0.065 elif dtype == jnp.float32: atols = {"dv": 3e-3, "dq": 0.05, "dk": 0.05} atols_v = {"dv": 4e-4, "dq": 2e-3, "dk": 3e-3} rtols = {"dv": 3e-3, "dq": 0.05, "dk": 0.05} rtols_v = {"dv": 8e-3, "dq": 5e-4, "dk": 5e-4} if jtu.is_device_tpu(version=4): atols["dk"] = 0.09 else: raise NotImplementedError self._assert_allclose( dv_vanilla, dv_ref, atol=atols_v["dv"], rtol=rtols_v["dv"] ) self._assert_allclose(dv, dv_ref, atol=atols["dv"], rtol=rtols["dv"]) self._assert_allclose( dq_vanilla, dq_ref, atol=atols_v["dq"], rtol=rtols_v["dq"] ) self._assert_allclose(dq, dq_ref, atol=atols["dq"], rtol=rtols["dq"]) self._assert_allclose( dk_vanilla, dk_ref, atol=atols_v["dk"], rtol=rtols_v["dk"] ) self._assert_allclose(dk, dk_ref, atol=atols["dk"], rtol=rtols["dk"]) @parameterized.product( is_mqa=(False, True), is_segmented=(False, True), downcast_smem_data=(False, True), use_fused_bwd_kernel=(False, True), use_dynamic_mask=(False, True), use_sinks=(False, True), ) @hp.given(hps.data()) def test_splash_attention_bwd( self, is_mqa, is_segmented, downcast_smem_data, use_fused_bwd_kernel, use_dynamic_mask, use_sinks, data, ): seed = data.draw(seed_strategy()) key = random.key(seed) k1, k2, k3, k4, k_sinks = random.split(key, 5) ( q_seq_len, kv_seq_len, num_q_heads, num_kv_heads, head_dim_qk, head_dim_v, dtype, ) = data.draw(mha_strategy()) # Avoid segment ids for rectangular matrices, as it's hard to enforce # valid masks (non-0 rows). hp.assume(q_seq_len == kv_seq_len or not is_segmented) q = random.uniform(k1, (num_q_heads, q_seq_len, head_dim_qk), dtype=dtype) if is_mqa: k = random.uniform(k2, (kv_seq_len, head_dim_qk), dtype=dtype) v = random.uniform(k3, (kv_seq_len, head_dim_v), dtype=dtype) else: k = random.uniform( k2, (num_kv_heads, kv_seq_len, head_dim_qk), dtype=dtype ) v = random.uniform( k3, (num_kv_heads, kv_seq_len, head_dim_v), dtype=dtype ) if use_sinks: sinks = 1.0 * random.uniform(k_sinks, (num_q_heads,), dtype=dtype) else: sinks = None segment_ids = None if is_segmented: assert q_seq_len == kv_seq_len segment_ids = data.draw(segment_ids_strategy(q_seq_len)) attn_logits_soft_cap = data.draw(attn_logits_soft_cap_strategy()) masks = data.draw(mha_mask_strategy(q_seq_len, kv_seq_len, num_q_heads)) mask = mask_lib.MultiHeadMask(tuple(m.get_mask() for m in masks)) if use_dynamic_mask: mask = jnp.array(mask[:, :, :]) block_sizes = data.draw( block_sizes_strategy(q_seq_len, kv_seq_len, include_bwd_blocks=True, use_fused_bwd_kernel=use_fused_bwd_kernel) ) if is_mqa: attn_ref = splash.make_masked_mqa_reference(mask, backward_impl="custom") attn = splash.make_splash_mqa_single_device( mask, block_sizes=block_sizes, downcast_smem_data=downcast_smem_data, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) else: attn_ref = splash.make_masked_mha_reference(mask, backward_impl="custom") attn = splash.make_splash_mha_single_device( mask, block_sizes=block_sizes, downcast_smem_data=downcast_smem_data, attn_logits_soft_cap=attn_logits_soft_cap, interpret=self.INTERPRET, ) if use_sinks: o, attn_vjp = jax.vjp(attn, q, k, v, segment_ids, sinks) else: o, attn_vjp = jax.vjp(attn, q, k, v, segment_ids) q32, k32, v32 = jax.tree.map( lambda x: x.astype(jnp.float32), (q, k, v) ) o_ref, (logsumexp,) = attn_ref( q32, k32, v32, segment_ids, sinks=sinks, save_residuals=True, attn_logits_soft_cap=attn_logits_soft_cap, ) self._assert_allclose(o, o_ref, atol=3e-3, rtol=3e-3) do = random.uniform(k4, o.shape, dtype=o.dtype) if use_sinks: dq, dk, dv, _, dsinks = attn_vjp(do) else: dq, dk, dv, _ = attn_vjp(do) dsinks = None def bwd( mask, q, k, v, segment_ids, sinks, o, logsumexp, do ) -> tuple[jax.Array, jax.Array, jax.Array]: _, dq, dk, dv, _, dsinks = splash._attention_reference_custom_bwd( splash.DEFAULT_MASK_VALUE, False, "flash", attn_logits_soft_cap, (mask, q, k, v, segment_ids, sinks, o, logsumexp), do, ) return dq, dk, dv, dsinks is_grouped = not is_mqa and num_kv_heads < num_q_heads assert num_q_heads % num_kv_heads == 0 head_multiplier = num_q_heads // num_kv_heads if is_mqa: bwd = jax.vmap(bwd, in_axes=(0, 0, None, None, None, 0, 0, 0, 0)) else: bwd = jax.vmap(bwd, in_axes=(0, 0, 0, 0, None, 0, 0, 0, 0)) # Interleave the KV heads to match the corresponding Q heads. if is_grouped: k32 = jnp.repeat(k32, head_multiplier, axis=0) v32 = jnp.repeat(v32, head_multiplier, axis=0) dq_ref, dk_ref, dv_ref, dsinks_ref = bwd( mask[:, :, :], q32, k32, v32, segment_ids, sinks, o.astype(jnp.float32), logsumexp, do.astype(jnp.float32), ) if is_mqa: dk_ref, dv_ref = dk_ref.sum(axis=0), dv_ref.sum(axis=0) elif is_grouped: # Perform the sum reduction across the head_multiplier dimension only. # So that the output still has KV heads. dk_ref = dk_ref.reshape(num_kv_heads, head_multiplier, *dk_ref.shape[1:]) dv_ref = dv_ref.reshape(num_kv_heads, head_multiplier, *dv_ref.shape[1:]) dk_ref, dv_ref = dk_ref.sum(axis=1), dv_ref.sum(axis=1) self._assert_allclose(dv, dv_ref, atol=2e-2, rtol=3e-2) self._assert_allclose(dq, dq_ref, atol=2e-2, rtol=3e-2) self._assert_allclose(dk, dk_ref, atol=2e-2, rtol=3e-2) if use_sinks: self._assert_allclose(dsinks, dsinks_ref, atol=5e-3, rtol=3e-2) def test_grid_shrinking(self): """Make sure that grid shrinking does not change the attention output.""" class IdentityMask(mask_lib._ComputableMask): """Identity mask that is guaranteed to trigger grid shrinking.""" def __init__( self, shape: tuple[int, int], shard_count: int = 1, ): def identity_mask_function(q_ids, kv_ids): return q_ids == kv_ids super().__init__( shape=shape, mask_function=identity_mask_function, shard_count=shard_count, ) def __eq__(self, other: object): if not isinstance(other, type(self)): return NotImplemented return self.shape == other.shape and np.array_equal( self.q_sequence, other.q_sequence ) def __hash__(self): return hash(( type(self), self.shape, self.q_sequence.tobytes() if self.q_sequence is not None else None, )) # Use a sequence length greater than the default block size to trigger # the grid shrinking logic. seq_len = 256 head_dim = 128 key = random.key(42) k1, k2, k3 = random.split(key, 3) q = random.uniform(k1, (1, seq_len, head_dim), dtype=jnp.float32) k = random.uniform(k2, (seq_len, head_dim), dtype=jnp.float32) v = random.uniform(k3, (seq_len, head_dim), dtype=jnp.float32) identity_mask = mask_lib.MultiHeadMask([IdentityMask((seq_len, seq_len))]) process_mask_path = "jax.experimental.pallas.ops.tpu.splash_attention.splash_attention_mask_info.process_mask" process_mask_shrink = lambda *args, **kwargs: process_mask( *args, **kwargs, shrink_grid=True ) process_mask_no_shrink = lambda *args, **kwargs: process_mask( *args, **kwargs, shrink_grid=False ) with unittest.mock.patch(process_mask_path, process_mask_shrink): shrink_out = splash.make_splash_mqa_single_device(identity_mask)(q, k, v) with unittest.mock.patch(process_mask_path, process_mask_no_shrink): no_shrink_out = splash.make_splash_mqa_single_device(identity_mask)( q, k, v ) np.testing.assert_array_equal(shrink_out, no_shrink_out) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
SplashAttentionTest
python
pytorch__pytorch
torch/onnx/_internal/exporter/_building.py
{ "start": 20862, "end": 29167 }
class ____(evaluator.Evaluator): """An onnxscript Evaluator that captures the graph into ONNX IR.""" def __init__( self, opset: onnxscript.values.Opset, constant_farm: dict[Any, ir.Value] ) -> None: self.nodes: list[ir.Node] = [] self.opset = opset self.functions: dict[ ir.OperatorIdentifier, onnxscript.OnnxFunction | ir.Function ] = {} self.constant_farm = constant_farm def _call_op( self, op_signature: _schemas.OpSignature, named_inputs: dict[str, AllowedArgType], named_attrs: dict[str, ValidAttributeType], num_outputs: int, ) -> Sequence[_tensors.SymbolicTensor]: """Record nodes for the given opschema and arguments. Args: op_signature: The OpSchema containing the node signature. named_inputs: The mapping of parameter names to their arguments. named_attrs: The mapping of attribute names to their values. """ type_binding = _resolve_parameter_dtypes(op_signature, named_inputs) try: converted_named_inputs = _process_python_constants( op_signature, named_inputs, type_binding, self.constant_farm, self.opset ) converted_named_inputs = _process_python_sequences( op_signature, converted_named_inputs, # type: ignore[arg-type] type_binding, self.constant_farm, self.opset, ) except Exception as e: raise _errors.GraphConstructionError( f"Error processing Python constants for operator '{op_signature.domain}::{op_signature.name}'. " f"named_inputs={named_inputs}, named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}." ) from e try: self.nodes.append( node := _construct_node( op_signature, converted_named_inputs, named_attrs, self.opset, num_outputs, ) ) except Exception as e: raise _errors.GraphConstructionError( f"Error constructing node for operator '{op_signature.domain}::{op_signature.name}'. " f"named_inputs={named_inputs}, converted_named_inputs={converted_named_inputs}, " f"named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}." ) from e return node.outputs # type: ignore[return-value] def eval( self, schema: onnx.defs.OpSchema, args: Sequence[AllowedArgType], # type: ignore[override] kwargs: Mapping[str, AllowedArgType], ) -> _tensors.SymbolicTensor | Sequence[_tensors.SymbolicTensor]: try: op_signature = _schemas.OpSignature.from_opschema(schema) named_inputs, named_attrs = _construct_named_inputs_and_attrs( op_signature, args, kwargs ) # TODO(justinchuby): Handle cast if schema.name == "CastLike": assert len(named_inputs) == 2 # Skip CastLike if the input and output types are the same src_input = named_inputs["input"] target_type = named_inputs["target_type"] if ( isinstance(src_input, ir.Value) and isinstance(target_type, ir.Value) and src_input.dtype is not None and target_type.dtype is not None ): # dtypes are available if src_input.dtype == target_type.dtype: # Same type. No cast needed return src_input # type: ignore[return-value] else: # Create a Cast node return self.opset.Cast(src_input, to=target_type.dtype) # type: ignore[union-attr,return-value] num_outputs = _determine_output_number(op_signature, named_attrs) outputs = self._call_op( op_signature, named_inputs, named_attrs, num_outputs ) if len(outputs) == 1: return outputs[0] return outputs except Exception as e: raise _errors.GraphConstructionError( f"Error calling operator '{schema.name}' with args {args} and kwargs {kwargs}." ) from e def eval_function( # type: ignore[override] self, function: onnxscript.OnnxFunction, args: Sequence[AllowedArgType], kwargs: Mapping[str, AllowedArgType], ) -> _tensors.SymbolicTensor | Sequence[_tensors.SymbolicTensor] | bool | int: try: # NOTE: signature should be written to function in the registration process if hasattr(function, "_pt_onnx_signature"): op_signature = function._pt_onnx_signature # type: ignore[attr-defined] else: op_signature = _schemas.OpSignature.from_function( function, function.function_ir.domain, function.name, opset_version=function.opset.version, ) function._pt_onnx_signature = op_signature # type: ignore[attr-defined] named_inputs, named_attrs = _construct_named_inputs_and_attrs( op_signature, args, kwargs ) # TODO(after torchlib migration): Remove traceable function handling # NOTE: We need to call traceable functions after the _construct_named_inputs_and_attrs # call because it will filter out the unexpected kwargs for us. if function.traceable: # Trace the function call instead of adding the function as a node # Turn the ir.Attr objects into Python constants first named_attrs = { name: attr.value if isinstance(attr, ir.Attr) else attr for name, attr in named_attrs.items() } # Use the type binding to resolve the dtypes of the inputs, and # convert Python constants to Constant nodes type_binding = _resolve_parameter_dtypes(op_signature, named_inputs) try: # _process_python_sequences is not here because we want to preserve python list # properties for the function call converted_named_inputs = _process_python_constants( op_signature, named_inputs, type_binding, self.constant_farm, self.opset, ) except Exception as e: raise _errors.GraphConstructionError( f"Error processing Python constants for operator '{op_signature.domain}::{op_signature.name}'. " f"named_inputs={named_inputs}, named_attrs={named_attrs}, opset={self.opset}, op_signature={op_signature}." ) from e return function.function(**converted_named_inputs, **named_attrs) outputs = self._call_op( op_signature, named_inputs, named_attrs, len(op_signature.outputs), ) self.functions[(function.function_ir.domain, function.name, "")] = function if len(outputs) == 1: return outputs[0] return outputs except Exception as e: try: source_file = inspect.getsourcefile(function.function) _, lineno = inspect.getsourcelines(function.function) except Exception: source_file = lineno = None raise _errors.GraphConstructionError( f"Error calling function '{function.name}' with args {args} and kwargs {kwargs}." + f" The function is defined at '{source_file}:{lineno}'." if source_file else "" ) from e
OpRecorder
python
encode__django-rest-framework
rest_framework/serializers.py
{ "start": 12808, "end": 21593 }
class ____(BaseSerializer, metaclass=SerializerMetaclass): default_error_messages = { 'invalid': _('Invalid data. Expected a dictionary, but got {datatype}.') } def set_value(self, dictionary, keys, value): """ Similar to Python's built in `dictionary[key] = value`, but takes a list of nested keys instead of a single key. set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2} set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2} set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}} """ if not keys: dictionary.update(value) return for key in keys[:-1]: if key not in dictionary: dictionary[key] = {} dictionary = dictionary[key] dictionary[keys[-1]] = value @cached_property def fields(self): """ A dictionary of {field_name: field_instance}. """ # `fields` is evaluated lazily. We do this to ensure that we don't # have issues importing modules that use ModelSerializers as fields, # even if Django's app-loading stage has not yet run. fields = BindingDict(self) for key, value in self.get_fields().items(): fields[key] = value return fields @property def _writable_fields(self): for field in self.fields.values(): if not field.read_only: yield field @property def _readable_fields(self): for field in self.fields.values(): if not field.write_only: yield field def get_fields(self): """ Returns a dictionary of {field_name: field_instance}. """ # Every new serializer is created with a clone of the field instances. # This allows users to dynamically modify the fields on a serializer # instance without affecting every other serializer instance. return copy.deepcopy(self._declared_fields) def get_validators(self): """ Returns a list of validator callables. """ # Used by the lazily-evaluated `validators` property. meta = getattr(self, 'Meta', None) validators = getattr(meta, 'validators', None) return list(validators) if validators else [] def get_initial(self): if hasattr(self, 'initial_data'): # initial_data may not be a valid type if not isinstance(self.initial_data, Mapping): return {} return { field_name: field.get_value(self.initial_data) for field_name, field in self.fields.items() if (field.get_value(self.initial_data) is not empty) and not field.read_only } return { field.field_name: field.get_initial() for field in self.fields.values() if not field.read_only } def get_value(self, dictionary): # We override the default field access in order to support # nested HTML forms. if html.is_html_input(dictionary): return html.parse_html_dict(dictionary, prefix=self.field_name) or empty return dictionary.get(self.field_name, empty) def run_validation(self, data=empty): """ We override the default `run_validation`, because the validation performed by validators and the `.validate()` method should be coerced into an error dictionary with a 'non_fields_error' key. """ (is_empty_value, data) = self.validate_empty_values(data) if is_empty_value: return data value = self.to_internal_value(data) try: self.run_validators(value) value = self.validate(value) assert value is not None, '.validate() should return the validated data' except (ValidationError, DjangoValidationError) as exc: raise ValidationError(detail=as_serializer_error(exc)) return value def _read_only_defaults(self): fields = [ field for field in self.fields.values() if (field.read_only) and (field.default != empty) and (field.source != '*') and ('.' not in field.source) ] defaults = {} for field in fields: try: default = field.get_default() except SkipField: continue defaults[field.source] = default return defaults def run_validators(self, value): """ Add read_only fields with defaults to value before running validators. """ if isinstance(value, dict): to_validate = self._read_only_defaults() to_validate.update(value) else: to_validate = value super().run_validators(to_validate) def to_internal_value(self, data): """ Dict of native values <- Dict of primitive datatypes. """ if not isinstance(data, Mapping): message = self.error_messages['invalid'].format( datatype=type(data).__name__ ) raise ValidationError({ api_settings.NON_FIELD_ERRORS_KEY: [message] }, code='invalid') ret = {} errors = {} fields = self._writable_fields for field in fields: validate_method = getattr(self, 'validate_' + field.field_name, None) primitive_value = field.get_value(data) try: validated_value = field.run_validation(primitive_value) if validate_method is not None: validated_value = validate_method(validated_value) except ValidationError as exc: errors[field.field_name] = exc.detail except DjangoValidationError as exc: errors[field.field_name] = get_error_detail(exc) except SkipField: pass else: self.set_value(ret, field.source_attrs, validated_value) if errors: raise ValidationError(errors) return ret def to_representation(self, instance): """ Object instance -> Dict of primitive datatypes. """ ret = {} fields = self._readable_fields for field in fields: try: attribute = field.get_attribute(instance) except SkipField: continue # We skip `to_representation` for `None` values so that fields do # not have to explicitly deal with that case. # # For related fields with `use_pk_only_optimization` we need to # resolve the pk value. check_for_none = attribute.pk if isinstance(attribute, PKOnlyObject) else attribute if check_for_none is None: ret[field.field_name] = None else: ret[field.field_name] = field.to_representation(attribute) return ret def validate(self, attrs): return attrs def __repr__(self): return representation.serializer_repr(self, indent=1) # The following are used for accessing `BoundField` instances on the # serializer, for the purposes of presenting a form-like API onto the # field values and field errors. def __iter__(self): for field in self.fields.values(): yield self[field.field_name] def __getitem__(self, key): field = self.fields[key] value = self.data.get(key) error = self.errors.get(key) if hasattr(self, '_errors') else None if isinstance(field, Serializer): return NestedBoundField(field, value, error) if isinstance(field, JSONField): return JSONBoundField(field, value, error) return BoundField(field, value, error) # Include a backlink to the serializer class on return objects. # Allows renderers such as HTMLFormRenderer to get the full field info. @property def data(self): ret = super().data return ReturnDict(ret, serializer=self) @property def errors(self): ret = super().errors if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null': # Edge case. Provide a more descriptive error than # "this field may not be null", when no data is passed. detail = ErrorDetail('No data provided', code='null') ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]} return ReturnDict(ret, serializer=self) # There's some replication of `ListField` here, # but that's probably better than obfuscating the call hierarchy.
Serializer
python
tensorflow__tensorflow
tensorflow/python/distribute/values.py
{ "start": 48918, "end": 49623 }
class ____(saveable_object.SaveableObject): """Class for defining how to restore a SyncOnReadVariable.""" def __init__(self, sync_on_read_variable, name): self._sync_on_read_variable = sync_on_read_variable tensor, spec = values_util.get_on_read_saveable( sync_on_read_variable, sync_on_read_variable._primary, name) super(_SyncOnReadSaveable, self).__init__(tensor, spec, name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into all variables.""" tensor, = restored_tensors return values_util.get_on_read_restore_ops( self._sync_on_read_variable, tensor, self._sync_on_read_variable.aggregation)
_SyncOnReadSaveable
python
sanic-org__sanic
sanic/asgi.py
{ "start": 595, "end": 4069 }
class ____: def __init__( self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend ) -> None: self.sanic_app = sanic_app self.scope = scope self.receive = receive self.send = send if "server.init.before" in self.sanic_app.signal_router.name_index: logger.debug( 'You have set a listener for "before_server_start" ' "in ASGI mode. " "It will be executed as early as possible, but not before " "the ASGI server is started.", extra={"verbosity": 1}, ) if "server.shutdown.after" in self.sanic_app.signal_router.name_index: logger.debug( 'You have set a listener for "after_server_stop" ' "in ASGI mode. " "It will be executed as late as possible, but not after " "the ASGI server is stopped.", extra={"verbosity": 1}, ) async def startup(self) -> None: """ Gather the listeners to fire on server start. Because we are using a third-party server and not Sanic server, we do not have access to fire anything BEFORE the server starts. Therefore, we fire before_server_start and after_server_start in sequence since the ASGI lifespan protocol only supports a single startup event. """ await self.sanic_app._startup() await self.sanic_app._server_event("init", "before") await self.sanic_app._server_event("init", "after") if not isinstance(self.sanic_app.config.USE_UVLOOP, Default): warnings.warn( "You have set the USE_UVLOOP configuration option, but Sanic " "cannot control the event loop when running in ASGI mode." "This option will be ignored." ) async def shutdown(self) -> None: """ Gather the listeners to fire on server stop. Because we are using a third-party server and not Sanic server, we do not have access to fire anything AFTER the server stops. Therefore, we fire before_server_stop and after_server_stop in sequence since the ASGI lifespan protocol only supports a single shutdown event. """ await self.sanic_app._server_event("shutdown", "before") await self.sanic_app._server_event("shutdown", "after") async def __call__(self) -> None: while True: message = await self.receive() if message["type"] == "lifespan.startup": try: await self.startup() except Exception as e: error_logger.exception(e) await self.send( {"type": "lifespan.startup.failed", "message": str(e)} ) else: await self.send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": try: await self.shutdown() except Exception as e: error_logger.exception(e) await self.send( {"type": "lifespan.shutdown.failed", "message": str(e)} ) else: await self.send({"type": "lifespan.shutdown.complete"}) return
Lifespan
python
spack__spack
lib/spack/spack/spec_parser.py
{ "start": 28219, "end": 29468 }
class ____(spack.error.SpecSyntaxError): """Error when parsing tokens""" def __init__(self, message, token, text): message += f"\n{text}" if token: underline = f"\n{' '*token.start}{'^'*(token.end - token.start)}" message += color.colorize(f"@*r{{{underline}}}") super().__init__(message) def strip_quotes_and_unescape(string: str) -> str: """Remove surrounding single or double quotes from string, if present.""" match = STRIP_QUOTES.match(string) if not match: return string # replace any escaped quotes with bare quotes quote, result = match.groups() return result.replace(rf"\{quote}", quote) def quote_if_needed(value: str) -> str: """Add quotes around the value if it requires quotes. This will add quotes around the value unless it matches :data:`NO_QUOTES_NEEDED`. This adds: * single quotes by default * double quotes around any value that contains single quotes If double quotes are used, we json-escape the string. That is, we escape ``\\``, ``"``, and control codes. """ if NO_QUOTES_NEEDED.match(value): return value return json.dumps(value) if "'" in value else f"'{value}'"
SpecParsingError
python
scipy__scipy
scipy/optimize/tests/test__shgo.py
{ "start": 564, "end": 1228 }
class ____: def __init__(self, bounds, expected_x, expected_fun=None, expected_xl=None, expected_funl=None): self.bounds = bounds self.expected_x = expected_x self.expected_fun = expected_fun self.expected_xl = expected_xl self.expected_funl = expected_funl def wrap_constraints(g): cons = [] if g is not None: if not isinstance(g, tuple | list): g = (g,) else: pass for g in g: cons.append({'type': 'ineq', 'fun': g}) cons = tuple(cons) else: cons = None return cons
StructTestFunction
python
lxml__lxml
src/lxml/tests/test_xslt.py
{ "start": 49361, "end": 67131 }
class ____(HelperTestCase): """Tests for extension elements in XSLT.""" def test_extension_element(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns" exclude-result-prefixes="myns"> <xsl:template match="a"> <A><myns:myext>b</myns:myext></A> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): child = etree.Element(self_node.text) child.text = 'X' output_parent.append(child) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><b>X</b></A>') def test_extension_element_doc_context(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns" exclude-result-prefixes="myns"> <xsl:template match="/"> <A><myns:myext>b</myns:myext></A> </xsl:template> </xsl:stylesheet>''') tags = [] class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): tags.append(input_node.tag) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(tags, ['a']) def test_extension_element_comment_pi_context(self): tree = self.parse('<?test toast?><a><!--a comment--><?another pi?></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns" exclude-result-prefixes="myns"> <xsl:template match="/"> <ROOT><xsl:apply-templates /></ROOT> </xsl:template> <xsl:template match="comment()"> <A><myns:myext>b</myns:myext></A> </xsl:template> <xsl:template match="processing-instruction()"> <A><myns:myext>b</myns:myext></A> </xsl:template> </xsl:stylesheet>''') text = [] class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): text.append(input_node.text) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(text, ['toast', 'a comment', 'pi']) def _test_extension_element_attribute_context(self): # currently not supported tree = self.parse('<a test="A"><b attr="B"/></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns" exclude-result-prefixes="myns"> <xsl:template match="@test"> <A><myns:myext>b</myns:myext></A> </xsl:template> <xsl:template match="@attr"> <A><myns:myext>b</myns:myext></A> </xsl:template> </xsl:stylesheet>''') text = [] class MyExt(etree.XSLTExtension): def execute(self, context, self_node, attr_value, output_parent): text.append(attr_value) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(text, ['A', 'B']) def test_extension_element_content(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): output_parent.extend(list(self_node)[1:]) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><y>Y</y><z/></A>') def test_extension_element_apply_templates(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A> </xsl:template> <xsl:template match="x" /> <xsl:template match="z">XYZ</xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): for child in self_node: for result in self.apply_templates(context, child): if isinstance(result, str): el = etree.Element("T") el.text = result else: el = result output_parent.append(el) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><T>Y</T><T>XYZ</T></A>') def test_extension_element_apply_templates_elements_only(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A> </xsl:template> <xsl:template match="x"><X/></xsl:template> <xsl:template match="z">XYZ</xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): for child in self_node: for result in self.apply_templates(context, child, elements_only=True): assert not isinstance(result, str) output_parent.append(result) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><X/></A>') def test_extension_element_apply_templates_remove_blank_text(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A> </xsl:template> <xsl:template match="x"><X/></xsl:template> <xsl:template match="y"><xsl:text> </xsl:text></xsl:template> <xsl:template match="z">XYZ</xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): for child in self_node: for result in self.apply_templates(context, child, remove_blank_text=True): if isinstance(result, str): assert result.strip() el = etree.Element("T") el.text = result else: el = result output_parent.append(el) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><X/><T>XYZ</T></A>') def test_extension_element_apply_templates_target_node(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A> </xsl:template> <xsl:template match="x" /> <xsl:template match="z">XYZ</xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): for child in self_node: self.apply_templates(context, child, output_parent) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A>YXYZ</A>') def test_extension_element_apply_templates_target_node_doc(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <myns:myext><x>X</x><y>Y</y><z/></myns:myext> </xsl:template> <xsl:template match="x"><xsl:processing-instruction name="test">TEST</xsl:processing-instruction></xsl:template> <xsl:template match="y"><Y>XYZ</Y></xsl:template> <xsl:template match="z"><xsl:comment>TEST</xsl:comment></xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): for child in self_node: self.apply_templates(context, child, output_parent) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(etree.tostring(result), b'<?test TEST?><Y>XYZ</Y><!--TEST-->') def test_extension_element_process_children(self): tree = self.parse('<a><b>E</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <xsl:variable name="testvar">yo</xsl:variable> <A> <myns:myext> <xsl:attribute name="attr"> <xsl:value-of select="$testvar" /> </xsl:attribute> <B> <xsl:choose> <xsl:when test="1 = 2"><C/></xsl:when> <xsl:otherwise><D><xsl:value-of select="b/text()" /></D></xsl:otherwise> </xsl:choose> </B> </myns:myext> </A> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): el = etree.Element('MY') self.process_children(context, el) output_parent.append(el) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A><MYattr="yo"><B><D>E</D></B></MY></A>') def test_extension_element_process_children_to_append_only(self): tree = self.parse('<a/>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <myns:myext> <A/> </myns:myext> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): self.process_children(context, output_parent) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<A/>') def test_extension_element_process_children_to_read_only_raise(self): tree = self.parse('<a/>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <myns:myext> <A/> </myns:myext> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): self.process_children(context, self_node) extensions = { ('testns', 'myext') : MyExt() } self.assertRaises(TypeError, tree.xslt, style, extensions=extensions) def test_extension_element_process_children_with_subextension_element(self): tree = self.parse('<a/>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns"> <xsl:template match="a"> <myns:myext> <A><myns:myext><B/></myns:myext></A> </myns:myext> </xsl:template> </xsl:stylesheet>''') class MyExt(etree.XSLTExtension): callback_call_counter = 0 def execute(self, context, self_node, input_node, output_parent): self.callback_call_counter += 1 el = etree.Element('MY', n=str(self.callback_call_counter)) self.process_children(context, el) output_parent.append(el) extensions = { ('testns', 'myext') : MyExt() } result = tree.xslt(style, extensions=extensions) self.assertEqual(self._rootstring(result), b'<MYn="1"><A><MYn="2"><B/></MY></A></MY>') def test_extension_element_raise(self): tree = self.parse('<a><b>B</b></a>') style = self.parse('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:myns="testns" extension-element-prefixes="myns" exclude-result-prefixes="myns"> <xsl:template match="a"> <A><myns:myext>b</myns:myext></A> </xsl:template> </xsl:stylesheet>''') class MyError(Exception): pass class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): raise MyError("expected!") extensions = { ('testns', 'myext') : MyExt() } self.assertRaises(MyError, tree.xslt, style, extensions=extensions) # FIXME: DISABLED - implementation seems to be broken # if someone cares enough about this feature, I take pull requests that fix it. def _test_multiple_extension_elements_with_output_parent(self): tree = self.parse("""\ <text> <par>This is <format>arbitrary</format> text in a paragraph</par> </text>""") style = self.parse("""\ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:my="my" extension-element-prefixes="my" version="1.0"> <xsl:template match="par"> <my:par><xsl:apply-templates /></my:par> </xsl:template> <xsl:template match="format"> <my:format><xsl:apply-templates /></my:format> </xsl:template> </xsl:stylesheet> """) test = self calls = [] class ExtMyPar(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): calls.append('par') p = etree.Element("p") p.attrib["style"] = "color:red" self.process_children(context, p) output_parent.append(p) class ExtMyFormat(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): calls.append('format') content = self.process_children(context) test.assertEqual(1, len(content)) test.assertEqual('arbitrary', content[0]) test.assertEqual('This is ', output_parent.text) output_parent.text += '*-%s-*' % content[0] extensions = {("my", "par"): ExtMyPar(), ("my", "format"): ExtMyFormat()} transform = etree.XSLT(style, extensions=extensions) result = transform(tree) self.assertEqual(['par', 'format'], calls) self.assertEqual( b'<p style="color:red">This is *-arbitrary-* text in a paragraph</p>\n', etree.tostring(result)) def test_extensions_nsmap(self): tree = self.parse("""\ <root> <inner xmlns:sha256="http://www.w3.org/2001/04/xmlenc#sha256"> <data>test</data> </inner> </root> """) style = self.parse("""\ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:my="extns" extension-element-prefixes="my" version="1.0"> <xsl:template match="node()|@*"> <xsl:copy> <xsl:apply-templates select="node()|@*"/> </xsl:copy> </xsl:template> <xsl:template match="data"> <my:show-nsmap/> </xsl:template> </xsl:stylesheet> """) class MyExt(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): output_parent.text = str(input_node.nsmap) extensions = {('extns', 'show-nsmap'): MyExt()} result = tree.xslt(style, extensions=extensions) self.assertEqual(etree.tostring(result, pretty_print=True), b"""\ <root> <inner xmlns:sha256="http://www.w3.org/2001/04/xmlenc#sha256">{'sha256': 'http://www.w3.org/2001/04/xmlenc#sha256'} </inner> </root> """)
ETreeXSLTExtElementTestCase
python
mlflow__mlflow
tests/telemetry/test_tracked_events.py
{ "start": 24510, "end": 33775 }
class ____(mlflow.pyfunc.PythonModel): def predict(self, context, model_input: list[str], params=None) -> list[str]: return model_input set_model(TestModel()) """ model_path = tmp_path / "model.py" model_path.write_text(model_def) model_info = mlflow.pyfunc.log_model( name="model", python_model=model_path, ) mock_telemetry_client.flush() mlflow.pyfunc.load_model(model_info.model_uri) data = validate_telemetry_record( mock_telemetry_client, mock_requests, GetLoggedModelEvent.name, check_params=False ) # test load model after registry mlflow.register_model(model_info.model_uri, name="test") mock_telemetry_client.flush() mlflow.pyfunc.load_model("models:/test/1") data = validate_telemetry_record( mock_telemetry_client, mock_requests, GetLoggedModelEvent.name, check_params=False ) def test_mcp_run(mock_requests, mock_telemetry_client: TelemetryClient): from mlflow.mcp.cli import run runner = CliRunner(catch_exceptions=False) with mock.patch("mlflow.mcp.cli.run_server") as mock_run_server: runner.invoke(run) mock_run_server.assert_called_once() mock_telemetry_client.flush() validate_telemetry_record(mock_telemetry_client, mock_requests, McpRunEvent.name) def test_ai_command_run(mock_requests, mock_telemetry_client: TelemetryClient): from mlflow.ai_commands import commands runner = CliRunner(catch_exceptions=False) # Test CLI context with mock.patch("mlflow.ai_commands.get_command", return_value="---\ntest\n---\nTest command"): result = runner.invoke(commands, ["run", "test_command"]) assert result.exit_code == 0 mock_telemetry_client.flush() validate_telemetry_record( mock_telemetry_client, mock_requests, AiCommandRunEvent.name, {"command_key": "test_command", "context": "cli"}, ) def test_git_model_versioning(mock_requests, mock_telemetry_client): from mlflow.genai import enable_git_model_versioning with enable_git_model_versioning(): pass mock_telemetry_client.flush() validate_telemetry_record(mock_telemetry_client, mock_requests, GitModelVersioningEvent.name) @pytest.mark.parametrize( ("model_uri", "expected_provider", "litellm_available", "use_native_provider"), [ ("databricks:/llama-3.1-70b", "databricks", True, False), ("openai:/gpt-4o-mini", "openai", True, False), ("endpoints:/my-endpoint", "endpoints", True, False), ("anthropic:/claude-3-opus", "anthropic", True, False), ], ) def test_invoke_custom_judge_model( mock_requests, mock_telemetry_client: TelemetryClient, model_uri, expected_provider, litellm_available, use_native_provider, ): from mlflow.genai.judges.utils import invoke_judge_model from mlflow.utils.rest_utils import MlflowHostCreds mock_response = json.dumps({"result": 0.8, "rationale": "Test rationale"}) # Mock Databricks credentials for databricks:// URIs mock_creds = MlflowHostCreds(host="https://test.databricks.com", token="test-token") with ( mock.patch( "mlflow.genai.judges.utils._is_litellm_available", return_value=litellm_available ), mock.patch( "mlflow.utils.databricks_utils.get_databricks_host_creds", return_value=mock_creds ), ): if use_native_provider: with ( mock.patch.object( __import__( "mlflow.metrics.genai.model_utils", fromlist=["score_model_on_payload"] ), "score_model_on_payload", return_value=mock_response, ), mock.patch.object( __import__("mlflow.metrics.genai.model_utils", fromlist=["get_endpoint_type"]), "get_endpoint_type", return_value="llm/v1/chat", ), ): invoke_judge_model( model_uri=model_uri, prompt="Test prompt", assessment_name="test_assessment", ) else: with ( mock.patch( "mlflow.genai.judges.utils.invocation_utils._invoke_litellm_and_handle_tools", return_value=(mock_response, 10), ), mock.patch( "mlflow.genai.judges.adapters.databricks_serving_endpoint_adapter._invoke_databricks_serving_endpoint" ) as mock_databricks, ): # For databricks provider, mock the databricks model invocation if expected_provider in ["databricks", "endpoints"]: from mlflow.genai.judges.adapters.databricks_serving_endpoint_adapter import ( InvokeDatabricksModelOutput, ) mock_databricks.return_value = InvokeDatabricksModelOutput( response=mock_response, request_id="test-request-id", num_prompt_tokens=10, num_completion_tokens=20, ) invoke_judge_model( model_uri=model_uri, prompt="Test prompt", assessment_name="test_assessment", ) expected_params = {"model_provider": expected_provider} validate_telemetry_record( mock_telemetry_client, mock_requests, InvokeCustomJudgeModelEvent.name, expected_params, ) def test_make_judge(mock_requests, mock_telemetry_client: TelemetryClient): make_judge( name="test_judge", instructions="Evaluate the {{ inputs }} and {{ outputs }}", model="openai:/gpt-4", feedback_value_type=str, ) expected_params = {"model_provider": "openai"} validate_telemetry_record( mock_telemetry_client, mock_requests, MakeJudgeEvent.name, expected_params ) make_judge( name="test_judge", instructions="Evaluate the {{ inputs }} and {{ outputs }}", feedback_value_type=str, ) expected_params = {"model_provider": None} validate_telemetry_record( mock_telemetry_client, mock_requests, MakeJudgeEvent.name, expected_params ) def test_align_judge(mock_requests, mock_telemetry_client: TelemetryClient): judge = make_judge( name="test_judge", instructions="Evaluate the {{ inputs }} and {{ outputs }}", model="openai:/gpt-4", feedback_value_type=str, ) traces = [ mock.MagicMock(spec=Trace), mock.MagicMock(spec=Trace), ] class MockOptimizer(AlignmentOptimizer): def align(self, judge, traces): return judge custom_optimizer = MockOptimizer() judge.align(traces, optimizer=custom_optimizer) expected_params = {"trace_count": 2, "optimizer_type": "MockOptimizer"} validate_telemetry_record( mock_telemetry_client, mock_requests, AlignJudgeEvent.name, expected_params ) def test_autologging(mock_requests, mock_telemetry_client: TelemetryClient): try: mlflow.openai.autolog() mlflow.autolog() mock_telemetry_client.flush() data = [record["data"] for record in mock_requests] params = [event["params"] for event in data if event["event_name"] == AutologgingEvent.name] assert ( json.dumps({"flavor": mlflow.openai.FLAVOR_NAME, "log_traces": True, "disable": False}) in params ) assert json.dumps({"flavor": "all", "log_traces": True, "disable": False}) in params finally: mlflow.autolog(disable=True) def test_load_prompt(mock_requests, mock_telemetry_client: TelemetryClient): # Register a prompt first prompt = mlflow.genai.register_prompt( name="test_prompt", template="Hello {{name}}", ) mock_telemetry_client.flush() # Set an alias for testing mlflow.genai.set_prompt_alias(name="test_prompt", version=prompt.version, alias="production") # Test load_prompt with version (no alias) mlflow.genai.load_prompt(name_or_uri="test_prompt", version=prompt.version) validate_telemetry_record( mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": False} ) # Test load_prompt with URI and version (no alias) mlflow.genai.load_prompt(name_or_uri=f"prompts:/test_prompt/{prompt.version}") validate_telemetry_record( mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": False} ) # Test load_prompt with alias mlflow.genai.load_prompt(name_or_uri="prompts:/test_prompt@production") validate_telemetry_record( mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": True} ) # Test load_prompt with @latest (special alias) mlflow.genai.load_prompt(name_or_uri="prompts:/test_prompt@latest") validate_telemetry_record( mock_telemetry_client, mock_requests, LoadPromptEvent.name, {"uses_alias": True} )
TestModel
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclassDescriptors2.py
{ "start": 910, "end": 992 }
class ____: x: Desc[int] y: Desc[str] z: Desc[str] = Desc() @dataclass
B
python
numpy__numpy
numpy/_core/tests/test_ufunc.py
{ "start": 137219, "end": 139707 }
class ____: PARAMS_COMMON = { "casting": "same_kind", "order": "K", "dtype": None, "subok": True, "signature": None, } PARAMS_UFUNC = { "where": True, } | PARAMS_COMMON PARAMS_GUFUNC = { "axes": np._NoValue, "axis": np._NoValue, "keepdims": False, } | PARAMS_COMMON @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) def test_dunder_signature_attr(self, ufunc: np.ufunc): assert hasattr(ufunc, "__signature__") assert isinstance(ufunc.__signature__, inspect.Signature) assert inspect.signature(ufunc) == ufunc.__signature__ @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) def test_params_common_positional(self, ufunc: np.ufunc): sig = inspect.signature(ufunc) # check positional-only parameters posonly_params = {name: param.default for name, param in sig.parameters.items() if param.kind is param.POSITIONAL_ONLY} assert len(posonly_params) == ufunc.nin assert all(default is inspect.Parameter.empty for default in posonly_params.values()) # check 'out' parameter out_param = sig.parameters.get("out") assert out_param is not None assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) def test_params_common_ufunc(self, ufunc: np.ufunc): assert ufunc.signature is None # sanity check sig = inspect.signature(ufunc) # check keyword-only parameters keyword_params = {name: param.default for name, param in sig.parameters.items() if param.kind is param.KEYWORD_ONLY} assert keyword_params == self.PARAMS_UFUNC @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) def test_params_common_gufunc(self, gufunc: np.ufunc): assert gufunc.signature is not None # sanity check sig = inspect.signature(gufunc) # check keyword-only parameters keyword_params = {name: param.default for name, param in sig.parameters.items() if param.kind is param.KEYWORD_ONLY} assert keyword_params == self.PARAMS_GUFUNC
TestUFuncInspectSignature
python
kamyu104__LeetCode-Solutions
Python/elimination-game.py
{ "start": 32, "end": 365 }
class ____(object): def lastRemaining(self, n): """ :type n: int :rtype: int """ start, step, direction = 1, 2, 1 while n > 1: start += direction * (step * (n//2) - step//2) n //= 2 step *= 2 direction *= -1 return start
Solution
python
ansible__ansible
lib/ansible/_internal/_json/_profiles/_inventory_legacy.py
{ "start": 262, "end": 854 }
class ____(_legacy._LegacyVariableVisitor, _json.StateTrackingMixIn): """State-tracking visitor implementation that only applies trust to `_meta.hostvars` and `vars` inventory values.""" # DTFIX5: does the variable visitor need to support conversion of sequence/mapping for inventory? @property def _allow_trust(self) -> bool: stack = self._get_stack() if len(stack) >= 4 and stack[:2] == ['_meta', 'hostvars']: return True if len(stack) >= 3 and stack[1] == 'vars': return True return False
_InventoryVariableVisitor
python
pandas-dev__pandas
pandas/tests/frame/test_arithmetic.py
{ "start": 1218, "end": 1923 }
class ____: def __init__(self, value, dtype) -> None: self.value = value self.dtype = np.dtype(dtype) def __array__(self, dtype=None, copy=None): return np.array(self.value, dtype=self.dtype) def __str__(self) -> str: return f"DummyElement({self.value}, {self.dtype})" def __repr__(self) -> str: return str(self) def astype(self, dtype, copy=False): self.dtype = dtype return self def view(self, dtype): return type(self)(self.value.view(dtype), dtype) def any(self, axis=None): return bool(self.value) # ------------------------------------------------------------------- # Comparisons
DummyElement
python
google__jax
jaxlib/xla_client.py
{ "start": 8790, "end": 11712 }
class ____: def __init__(self, parameter_shapes, result_shape): def parameter_shapes(self) -> [Shape]: def result_shape(self) -> Shape: def __repr__(self): """ DeviceAssignment = _xla.DeviceAssignment DeviceAssignment.__doc__ = """ A DeviceAssignment is a C++ object with the following signature. def create(assignment): '''Builds a device assignment. Args: assignment: a 2D numpy array of device ordinal integers, indexed by [replica][computation_in_replica]. Returns: A device assignment. ''' def replica_count(): '''Returns the number of replicas.''' def computation_count(): '''Returns the number of computations per replica.''' """ Device = _xla.Device CompileOptions = _xla.CompileOptions HostBufferSemantics = _xla.HostBufferSemantics # An Executable is a C++ class that duck types with the following API: # class Executable: # def local_devices(self) -> [Device]: # def execute(self, arguments : [Buffer]) -> Buffer: # """Execute on one replica with Buffer arguments and return value.""" # # def size_of_generated_code_in_bytes(self) -> int: # """Return generated binary size, or -1 if not known.""" # # def execute_sharded_on_local_devices(self, arguments: [[Buffer]]) # -> [Buffer]: # """Execute on many replicas with Buffer arguments and return value. # # Args: # arguments: A sequence of sequences of Buffers. The i'th element of each # sequence comprises the arguments for execution on the i'th local # device. # # Returns: # A list of the computation's outputs as a list of Buffers for each # device. # """ # # There are different implementations of Executable for different backends. XlaComputation = _xla.XlaComputation Client = _xla.Client Memory = _xla.Memory Array = _xla.Array ArrayImpl = _xla.ArrayImpl LoadedExecutable = _xla.LoadedExecutable Executable = _xla.Executable DeviceList = _xla.DeviceList OpSharding = _xla.OpSharding HloSharding = _xla.HloSharding Sharding = _xla.Sharding NamedSharding = _xla.NamedSharding SingleDeviceSharding = _xla.SingleDeviceSharding PmapSharding = _xla.PmapSharding GSPMDSharding = _xla.GSPMDSharding PjRtLayout = _xla.PjRtLayout AutotuneCacheMode = _xla.AutotuneCacheMode def LoadedExecutable_execute(self, arguments, device=None): del device results = self.execute_sharded(arguments) return [x[0] for x in results.disassemble_into_single_device_arrays()] def LoadedExecutable_execute_with_token(self, arguments, device=None): del device results = self.execute_sharded(arguments, with_tokens=True) return ( [x[0] for x in results.disassemble_into_single_device_arrays()], results.consume_token().get_token(0), ) LoadedExecutable.execute = LoadedExecutable_execute # type: ignore[method-assign] LoadedExecutable.execute_with_token = LoadedExecutable_execute_with_token # type: ignore[method-assign]
ProgramShape
python
walkccc__LeetCode
solutions/1363. Largest Multiple of Three/1363.py
{ "start": 0, "end": 520 }
class ____: def largestMultipleOfThree(self, digits: list[int]) -> str: ans = '' mod1 = [1, 4, 7, 2, 5, 8] mod2 = [2, 5, 8, 1, 4, 7] count = collections.Counter(digits) summ = sum(digits) while summ % 3 != 0: for digit in (mod1 if summ % 3 == 1 else mod2): if count[digit]: count[digit] -= 1 summ -= digit break for digit in reversed(range(10)): ans += str(digit) * count[digit] return '0' if len(ans) and ans[0] == '0' else ans
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-youtube-analytics/components.py
{ "start": 3156, "end": 4066 }
class ____(StateMigration): def should_migrate(self, stream_state: Mapping[str, Any]) -> bool: return stream_state.get("state") or stream_state.get("date") def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]: if stream_state.get("date"): # old format state before migration to low code cursor_value = str(stream_state["date"]) stream_state = { "state": {"date": cursor_value}, "parent_state": {"report": {"state": {"date": cursor_value}, "lookback_window": 0}}, } return stream_state cursor_value = stream_state["state"] cursor_value["date"] = str(cursor_value["date"]) stream_state["parent_state"]["report"]["state"] = cursor_value stream_state["parent_state"]["report"]["lookback_window"] = 0 return stream_state
ReportsStateMigration
python
huggingface__transformers
src/transformers/models/reformer/modeling_reformer.py
{ "start": 63727, "end": 69925 }
class ____(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = ReformerAttention(config, layer_id) # dropout requires to have the same # seed for forward and backward pass self.attention_seed = None self.feed_forward_seed = None self.feed_forward = ChunkReformerFeedForward(config) def _init_attention_seed(self): """ This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.attention_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.attention_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.attention_seed) def _init_feed_forward_seed(self): """ This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.feed_forward_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.feed_forward_seed) def forward( self, prev_attn_output, hidden_states, attention_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, ): with torch.no_grad(): # every forward pass we sample a different seed # for dropout and save for forward fn in backward pass # to have correct dropout if self.training: self._init_attention_seed() attn_outputs = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = attn_outputs.hidden_states # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # Y_1 = X_1 + f(X_2) attn_output = prev_attn_output + attn_output # free memory del prev_attn_output # every forward pass we sample a different seed # for dropout and save seed for forward fn in backward # to have correct dropout if self.training: self._init_feed_forward_seed() # Y_2 = X_2 + g(Y_1) hidden_states = hidden_states + self.feed_forward(attn_output) return ReformerOutput( attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets, ) def backward_pass( self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, buckets=None, ): # Implements the backward pass for reversible ResNets. # A good blog post on how this works can be found here: # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py assert self.training, ( "If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the" " model into training mode." ) with torch.enable_grad(): next_attn_output.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.feed_forward_seed) # g(Y_1) res_hidden_states = self.feed_forward(next_attn_output) res_hidden_states.backward(grad_hidden_states, retain_graph=True) with torch.no_grad(): # X_2 = Y_2 - g(Y_1) hidden_states = hidden_states - res_hidden_states del res_hidden_states grad_attn_output = grad_attn_output + next_attn_output.grad next_attn_output.grad = None with torch.enable_grad(): hidden_states.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.attention_seed) # f(X_2) # use cached buckets for backprob if buckets not None for LSHSelfAttention output = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, buckets=buckets, ).hidden_states output.backward(grad_attn_output, retain_graph=True) with torch.no_grad(): # X_1 = Y_1 - f(X_2) attn_output = next_attn_output - output del output, next_attn_output grad_hidden_states = grad_hidden_states + hidden_states.grad hidden_states.grad = None hidden_states = hidden_states.detach() return ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, )
ReformerLayer