language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
kamyu104__LeetCode-Solutions
Python/find-the-length-of-the-longest-common-prefix.py
{ "start": 725, "end": 1246 }
class ____(object): def longestCommonPrefix(self, arr1, arr2): """ :type arr1: List[int] :type arr2: List[int] :rtype: int """ lookup = {0} for x in arr1: while x not in lookup: lookup.add(x) x //= 10 result = 0 for x in arr2: l = len(str(x)) while x not in lookup: x //= 10 l -= 1 result = max(result, l) return result
Solution2
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 375071, "end": 478991 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "abort_queued_migrations", "accept_enterprise_administrator_invitation", "accept_topic_suggestion", "add_assignees_to_assignable", "add_comment", "add_discussion_comment", "add_discussion_poll_vote", "add_enterprise_support_entitlement", "add_labels_to_labelable", "add_project_card", "add_project_column", "add_project_draft_issue", "add_project_next_item", "add_project_v2_draft_issue", "add_project_v2_item_by_id", "add_pull_request_review", "add_pull_request_review_comment", "add_pull_request_review_thread", "add_reaction", "add_star", "add_upvote", "add_verifiable_domain", "approve_deployments", "approve_verifiable_domain", "archive_repository", "cancel_enterprise_admin_invitation", "cancel_sponsorship", "change_user_status", "clear_labels_from_labelable", "clear_project_v2_item_field_value", "clone_project", "clone_template_repository", "close_issue", "close_pull_request", "convert_project_card_note_to_issue", "convert_pull_request_to_draft", "create_branch_protection_rule", "create_check_run", "create_check_suite", "create_commit_on_branch", "create_discussion", "create_enterprise_organization", "create_environment", "create_ip_allow_list_entry", "create_issue", "create_migration_source", "create_project", "create_project_v2", "create_pull_request", "create_ref", "create_repository", "create_sponsors_tier", "create_sponsorship", "create_team_discussion", "create_team_discussion_comment", "decline_topic_suggestion", "delete_branch_protection_rule", "delete_deployment", "delete_discussion", "delete_discussion_comment", "delete_environment", "delete_ip_allow_list_entry", "delete_issue", "delete_issue_comment", "delete_project", "delete_project_card", "delete_project_column", "delete_project_next_item", "delete_project_v2_item", "delete_pull_request_review", "delete_pull_request_review_comment", "delete_ref", "delete_team_discussion", "delete_team_discussion_comment", "delete_verifiable_domain", "disable_pull_request_auto_merge", "dismiss_pull_request_review", "dismiss_repository_vulnerability_alert", "enable_pull_request_auto_merge", "follow_organization", "follow_user", "grant_enterprise_organizations_migrator_role", "grant_migrator_role", "invite_enterprise_admin", "link_repository_to_project", "lock_lockable", "mark_discussion_comment_as_answer", "mark_file_as_viewed", "mark_pull_request_ready_for_review", "merge_branch", "merge_pull_request", "minimize_comment", "move_project_card", "move_project_column", "pin_issue", "regenerate_enterprise_identity_provider_recovery_codes", "regenerate_verifiable_domain_token", "reject_deployments", "remove_assignees_from_assignable", "remove_enterprise_admin", "remove_enterprise_identity_provider", "remove_enterprise_organization", "remove_enterprise_support_entitlement", "remove_labels_from_labelable", "remove_outside_collaborator", "remove_reaction", "remove_star", "remove_upvote", "reopen_issue", "reopen_pull_request", "request_reviews", "rerequest_check_suite", "resolve_review_thread", "revoke_enterprise_organizations_migrator_role", "revoke_migrator_role", "set_enterprise_identity_provider", "set_organization_interaction_limit", "set_repository_interaction_limit", "set_user_interaction_limit", "start_repository_migration", "submit_pull_request_review", "transfer_issue", "unarchive_repository", "unfollow_organization", "unfollow_user", "unlink_repository_from_project", "unlock_lockable", "unmark_discussion_comment_as_answer", "unmark_file_as_viewed", "unmark_issue_as_duplicate", "unminimize_comment", "unpin_issue", "unresolve_review_thread", "update_branch_protection_rule", "update_check_run", "update_check_suite_preferences", "update_discussion", "update_discussion_comment", "update_enterprise_administrator_role", "update_enterprise_allow_private_repository_forking_setting", "update_enterprise_default_repository_permission_setting", "update_enterprise_members_can_change_repository_visibility_setting", "update_enterprise_members_can_create_repositories_setting", "update_enterprise_members_can_delete_issues_setting", "update_enterprise_members_can_delete_repositories_setting", "update_enterprise_members_can_invite_collaborators_setting", "update_enterprise_members_can_make_purchases_setting", "update_enterprise_members_can_update_protected_branches_setting", "update_enterprise_members_can_view_dependency_insights_setting", "update_enterprise_organization_projects_setting", "update_enterprise_owner_organization_role", "update_enterprise_profile", "update_enterprise_repository_projects_setting", "update_enterprise_team_discussions_setting", "update_enterprise_two_factor_authentication_required_setting", "update_environment", "update_ip_allow_list_enabled_setting", "update_ip_allow_list_entry", "update_ip_allow_list_for_installed_apps_enabled_setting", "update_issue", "update_issue_comment", "update_notification_restriction_setting", "update_organization_allow_private_repository_forking_setting", "update_organization_web_commit_signoff_setting", "update_project", "update_project_card", "update_project_column", "update_project_draft_issue", "update_project_next", "update_project_next_item_field", "update_project_v2", "update_project_v2_draft_issue", "update_project_v2_item_field_value", "update_project_v2_item_position", "update_pull_request", "update_pull_request_branch", "update_pull_request_review", "update_pull_request_review_comment", "update_ref", "update_repository", "update_repository_web_commit_signoff_setting", "update_sponsorship_preferences", "update_subscription", "update_team_discussion", "update_team_discussion_comment", "update_teams_repository", "update_topics", "verify_verifiable_domain", ) abort_queued_migrations = sgqlc.types.Field( AbortQueuedMigrationsPayload, graphql_name="abortQueuedMigrations", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AbortQueuedMigrationsInput), graphql_name="input", default=None, ), ), ) ), ) accept_enterprise_administrator_invitation = sgqlc.types.Field( AcceptEnterpriseAdministratorInvitationPayload, graphql_name="acceptEnterpriseAdministratorInvitation", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( AcceptEnterpriseAdministratorInvitationInput ), graphql_name="input", default=None, ), ), ) ), ) accept_topic_suggestion = sgqlc.types.Field( AcceptTopicSuggestionPayload, graphql_name="acceptTopicSuggestion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AcceptTopicSuggestionInput), graphql_name="input", default=None, ), ), ) ), ) add_assignees_to_assignable = sgqlc.types.Field( AddAssigneesToAssignablePayload, graphql_name="addAssigneesToAssignable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddAssigneesToAssignableInput), graphql_name="input", default=None, ), ), ) ), ) add_comment = sgqlc.types.Field( AddCommentPayload, graphql_name="addComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddCommentInput), graphql_name="input", default=None, ), ), ) ), ) add_discussion_comment = sgqlc.types.Field( AddDiscussionCommentPayload, graphql_name="addDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) add_discussion_poll_vote = sgqlc.types.Field( AddDiscussionPollVotePayload, graphql_name="addDiscussionPollVote", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddDiscussionPollVoteInput), graphql_name="input", default=None, ), ), ) ), ) add_enterprise_support_entitlement = sgqlc.types.Field( AddEnterpriseSupportEntitlementPayload, graphql_name="addEnterpriseSupportEntitlement", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddEnterpriseSupportEntitlementInput), graphql_name="input", default=None, ), ), ) ), ) add_labels_to_labelable = sgqlc.types.Field( AddLabelsToLabelablePayload, graphql_name="addLabelsToLabelable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddLabelsToLabelableInput), graphql_name="input", default=None, ), ), ) ), ) add_project_card = sgqlc.types.Field( AddProjectCardPayload, graphql_name="addProjectCard", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectCardInput), graphql_name="input", default=None, ), ), ) ), ) add_project_column = sgqlc.types.Field( AddProjectColumnPayload, graphql_name="addProjectColumn", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectColumnInput), graphql_name="input", default=None, ), ), ) ), ) add_project_draft_issue = sgqlc.types.Field( AddProjectDraftIssuePayload, graphql_name="addProjectDraftIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectDraftIssueInput), graphql_name="input", default=None, ), ), ) ), ) add_project_next_item = sgqlc.types.Field( AddProjectNextItemPayload, graphql_name="addProjectNextItem", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectNextItemInput), graphql_name="input", default=None, ), ), ) ), ) add_project_v2_draft_issue = sgqlc.types.Field( AddProjectV2DraftIssuePayload, graphql_name="addProjectV2DraftIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectV2DraftIssueInput), graphql_name="input", default=None, ), ), ) ), ) add_project_v2_item_by_id = sgqlc.types.Field( AddProjectV2ItemByIdPayload, graphql_name="addProjectV2ItemById", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddProjectV2ItemByIdInput), graphql_name="input", default=None, ), ), ) ), ) add_pull_request_review = sgqlc.types.Field( AddPullRequestReviewPayload, graphql_name="addPullRequestReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddPullRequestReviewInput), graphql_name="input", default=None, ), ), ) ), ) add_pull_request_review_comment = sgqlc.types.Field( AddPullRequestReviewCommentPayload, graphql_name="addPullRequestReviewComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddPullRequestReviewCommentInput), graphql_name="input", default=None, ), ), ) ), ) add_pull_request_review_thread = sgqlc.types.Field( AddPullRequestReviewThreadPayload, graphql_name="addPullRequestReviewThread", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddPullRequestReviewThreadInput), graphql_name="input", default=None, ), ), ) ), ) add_reaction = sgqlc.types.Field( AddReactionPayload, graphql_name="addReaction", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddReactionInput), graphql_name="input", default=None, ), ), ) ), ) add_star = sgqlc.types.Field( AddStarPayload, graphql_name="addStar", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddStarInput), graphql_name="input", default=None, ), ), ) ), ) add_upvote = sgqlc.types.Field( AddUpvotePayload, graphql_name="addUpvote", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddUpvoteInput), graphql_name="input", default=None, ), ), ) ), ) add_verifiable_domain = sgqlc.types.Field( AddVerifiableDomainPayload, graphql_name="addVerifiableDomain", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(AddVerifiableDomainInput), graphql_name="input", default=None, ), ), ) ), ) approve_deployments = sgqlc.types.Field( ApproveDeploymentsPayload, graphql_name="approveDeployments", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ApproveDeploymentsInput), graphql_name="input", default=None, ), ), ) ), ) approve_verifiable_domain = sgqlc.types.Field( ApproveVerifiableDomainPayload, graphql_name="approveVerifiableDomain", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ApproveVerifiableDomainInput), graphql_name="input", default=None, ), ), ) ), ) archive_repository = sgqlc.types.Field( ArchiveRepositoryPayload, graphql_name="archiveRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ArchiveRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) cancel_enterprise_admin_invitation = sgqlc.types.Field( CancelEnterpriseAdminInvitationPayload, graphql_name="cancelEnterpriseAdminInvitation", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CancelEnterpriseAdminInvitationInput), graphql_name="input", default=None, ), ), ) ), ) cancel_sponsorship = sgqlc.types.Field( CancelSponsorshipPayload, graphql_name="cancelSponsorship", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CancelSponsorshipInput), graphql_name="input", default=None, ), ), ) ), ) change_user_status = sgqlc.types.Field( ChangeUserStatusPayload, graphql_name="changeUserStatus", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ChangeUserStatusInput), graphql_name="input", default=None, ), ), ) ), ) clear_labels_from_labelable = sgqlc.types.Field( ClearLabelsFromLabelablePayload, graphql_name="clearLabelsFromLabelable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ClearLabelsFromLabelableInput), graphql_name="input", default=None, ), ), ) ), ) clear_project_v2_item_field_value = sgqlc.types.Field( ClearProjectV2ItemFieldValuePayload, graphql_name="clearProjectV2ItemFieldValue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ClearProjectV2ItemFieldValueInput), graphql_name="input", default=None, ), ), ) ), ) clone_project = sgqlc.types.Field( CloneProjectPayload, graphql_name="cloneProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CloneProjectInput), graphql_name="input", default=None, ), ), ) ), ) clone_template_repository = sgqlc.types.Field( CloneTemplateRepositoryPayload, graphql_name="cloneTemplateRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CloneTemplateRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) close_issue = sgqlc.types.Field( CloseIssuePayload, graphql_name="closeIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CloseIssueInput), graphql_name="input", default=None, ), ), ) ), ) close_pull_request = sgqlc.types.Field( ClosePullRequestPayload, graphql_name="closePullRequest", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ClosePullRequestInput), graphql_name="input", default=None, ), ), ) ), ) convert_project_card_note_to_issue = sgqlc.types.Field( ConvertProjectCardNoteToIssuePayload, graphql_name="convertProjectCardNoteToIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ConvertProjectCardNoteToIssueInput), graphql_name="input", default=None, ), ), ) ), ) convert_pull_request_to_draft = sgqlc.types.Field( ConvertPullRequestToDraftPayload, graphql_name="convertPullRequestToDraft", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ConvertPullRequestToDraftInput), graphql_name="input", default=None, ), ), ) ), ) create_branch_protection_rule = sgqlc.types.Field( CreateBranchProtectionRulePayload, graphql_name="createBranchProtectionRule", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateBranchProtectionRuleInput), graphql_name="input", default=None, ), ), ) ), ) create_check_run = sgqlc.types.Field( CreateCheckRunPayload, graphql_name="createCheckRun", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateCheckRunInput), graphql_name="input", default=None, ), ), ) ), ) create_check_suite = sgqlc.types.Field( CreateCheckSuitePayload, graphql_name="createCheckSuite", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateCheckSuiteInput), graphql_name="input", default=None, ), ), ) ), ) create_commit_on_branch = sgqlc.types.Field( CreateCommitOnBranchPayload, graphql_name="createCommitOnBranch", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateCommitOnBranchInput), graphql_name="input", default=None, ), ), ) ), ) create_discussion = sgqlc.types.Field( CreateDiscussionPayload, graphql_name="createDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) create_enterprise_organization = sgqlc.types.Field( CreateEnterpriseOrganizationPayload, graphql_name="createEnterpriseOrganization", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateEnterpriseOrganizationInput), graphql_name="input", default=None, ), ), ) ), ) create_environment = sgqlc.types.Field( CreateEnvironmentPayload, graphql_name="createEnvironment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateEnvironmentInput), graphql_name="input", default=None, ), ), ) ), ) create_ip_allow_list_entry = sgqlc.types.Field( CreateIpAllowListEntryPayload, graphql_name="createIpAllowListEntry", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateIpAllowListEntryInput), graphql_name="input", default=None, ), ), ) ), ) create_issue = sgqlc.types.Field( CreateIssuePayload, graphql_name="createIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateIssueInput), graphql_name="input", default=None, ), ), ) ), ) create_migration_source = sgqlc.types.Field( CreateMigrationSourcePayload, graphql_name="createMigrationSource", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateMigrationSourceInput), graphql_name="input", default=None, ), ), ) ), ) create_project = sgqlc.types.Field( CreateProjectPayload, graphql_name="createProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateProjectInput), graphql_name="input", default=None, ), ), ) ), ) create_project_v2 = sgqlc.types.Field( CreateProjectV2Payload, graphql_name="createProjectV2", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateProjectV2Input), graphql_name="input", default=None, ), ), ) ), ) create_pull_request = sgqlc.types.Field( CreatePullRequestPayload, graphql_name="createPullRequest", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreatePullRequestInput), graphql_name="input", default=None, ), ), ) ), ) create_ref = sgqlc.types.Field( CreateRefPayload, graphql_name="createRef", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateRefInput), graphql_name="input", default=None, ), ), ) ), ) create_repository = sgqlc.types.Field( CreateRepositoryPayload, graphql_name="createRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) create_sponsors_tier = sgqlc.types.Field( CreateSponsorsTierPayload, graphql_name="createSponsorsTier", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateSponsorsTierInput), graphql_name="input", default=None, ), ), ) ), ) create_sponsorship = sgqlc.types.Field( CreateSponsorshipPayload, graphql_name="createSponsorship", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateSponsorshipInput), graphql_name="input", default=None, ), ), ) ), ) create_team_discussion = sgqlc.types.Field( CreateTeamDiscussionPayload, graphql_name="createTeamDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateTeamDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) create_team_discussion_comment = sgqlc.types.Field( CreateTeamDiscussionCommentPayload, graphql_name="createTeamDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(CreateTeamDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) decline_topic_suggestion = sgqlc.types.Field( DeclineTopicSuggestionPayload, graphql_name="declineTopicSuggestion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeclineTopicSuggestionInput), graphql_name="input", default=None, ), ), ) ), ) delete_branch_protection_rule = sgqlc.types.Field( DeleteBranchProtectionRulePayload, graphql_name="deleteBranchProtectionRule", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteBranchProtectionRuleInput), graphql_name="input", default=None, ), ), ) ), ) delete_deployment = sgqlc.types.Field( DeleteDeploymentPayload, graphql_name="deleteDeployment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteDeploymentInput), graphql_name="input", default=None, ), ), ) ), ) delete_discussion = sgqlc.types.Field( DeleteDiscussionPayload, graphql_name="deleteDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) delete_discussion_comment = sgqlc.types.Field( DeleteDiscussionCommentPayload, graphql_name="deleteDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) delete_environment = sgqlc.types.Field( DeleteEnvironmentPayload, graphql_name="deleteEnvironment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteEnvironmentInput), graphql_name="input", default=None, ), ), ) ), ) delete_ip_allow_list_entry = sgqlc.types.Field( DeleteIpAllowListEntryPayload, graphql_name="deleteIpAllowListEntry", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteIpAllowListEntryInput), graphql_name="input", default=None, ), ), ) ), ) delete_issue = sgqlc.types.Field( DeleteIssuePayload, graphql_name="deleteIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteIssueInput), graphql_name="input", default=None, ), ), ) ), ) delete_issue_comment = sgqlc.types.Field( DeleteIssueCommentPayload, graphql_name="deleteIssueComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteIssueCommentInput), graphql_name="input", default=None, ), ), ) ), ) delete_project = sgqlc.types.Field( DeleteProjectPayload, graphql_name="deleteProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteProjectInput), graphql_name="input", default=None, ), ), ) ), ) delete_project_card = sgqlc.types.Field( DeleteProjectCardPayload, graphql_name="deleteProjectCard", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteProjectCardInput), graphql_name="input", default=None, ), ), ) ), ) delete_project_column = sgqlc.types.Field( DeleteProjectColumnPayload, graphql_name="deleteProjectColumn", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteProjectColumnInput), graphql_name="input", default=None, ), ), ) ), ) delete_project_next_item = sgqlc.types.Field( DeleteProjectNextItemPayload, graphql_name="deleteProjectNextItem", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteProjectNextItemInput), graphql_name="input", default=None, ), ), ) ), ) delete_project_v2_item = sgqlc.types.Field( DeleteProjectV2ItemPayload, graphql_name="deleteProjectV2Item", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteProjectV2ItemInput), graphql_name="input", default=None, ), ), ) ), ) delete_pull_request_review = sgqlc.types.Field( DeletePullRequestReviewPayload, graphql_name="deletePullRequestReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeletePullRequestReviewInput), graphql_name="input", default=None, ), ), ) ), ) delete_pull_request_review_comment = sgqlc.types.Field( DeletePullRequestReviewCommentPayload, graphql_name="deletePullRequestReviewComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeletePullRequestReviewCommentInput), graphql_name="input", default=None, ), ), ) ), ) delete_ref = sgqlc.types.Field( DeleteRefPayload, graphql_name="deleteRef", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteRefInput), graphql_name="input", default=None, ), ), ) ), ) delete_team_discussion = sgqlc.types.Field( DeleteTeamDiscussionPayload, graphql_name="deleteTeamDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteTeamDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) delete_team_discussion_comment = sgqlc.types.Field( DeleteTeamDiscussionCommentPayload, graphql_name="deleteTeamDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteTeamDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) delete_verifiable_domain = sgqlc.types.Field( DeleteVerifiableDomainPayload, graphql_name="deleteVerifiableDomain", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DeleteVerifiableDomainInput), graphql_name="input", default=None, ), ), ) ), ) disable_pull_request_auto_merge = sgqlc.types.Field( DisablePullRequestAutoMergePayload, graphql_name="disablePullRequestAutoMerge", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DisablePullRequestAutoMergeInput), graphql_name="input", default=None, ), ), ) ), ) dismiss_pull_request_review = sgqlc.types.Field( DismissPullRequestReviewPayload, graphql_name="dismissPullRequestReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DismissPullRequestReviewInput), graphql_name="input", default=None, ), ), ) ), ) dismiss_repository_vulnerability_alert = sgqlc.types.Field( DismissRepositoryVulnerabilityAlertPayload, graphql_name="dismissRepositoryVulnerabilityAlert", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(DismissRepositoryVulnerabilityAlertInput), graphql_name="input", default=None, ), ), ) ), ) enable_pull_request_auto_merge = sgqlc.types.Field( EnablePullRequestAutoMergePayload, graphql_name="enablePullRequestAutoMerge", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(EnablePullRequestAutoMergeInput), graphql_name="input", default=None, ), ), ) ), ) follow_organization = sgqlc.types.Field( FollowOrganizationPayload, graphql_name="followOrganization", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(FollowOrganizationInput), graphql_name="input", default=None, ), ), ) ), ) follow_user = sgqlc.types.Field( FollowUserPayload, graphql_name="followUser", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(FollowUserInput), graphql_name="input", default=None, ), ), ) ), ) grant_enterprise_organizations_migrator_role = sgqlc.types.Field( GrantEnterpriseOrganizationsMigratorRolePayload, graphql_name="grantEnterpriseOrganizationsMigratorRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( GrantEnterpriseOrganizationsMigratorRoleInput ), graphql_name="input", default=None, ), ), ) ), ) grant_migrator_role = sgqlc.types.Field( GrantMigratorRolePayload, graphql_name="grantMigratorRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(GrantMigratorRoleInput), graphql_name="input", default=None, ), ), ) ), ) invite_enterprise_admin = sgqlc.types.Field( InviteEnterpriseAdminPayload, graphql_name="inviteEnterpriseAdmin", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(InviteEnterpriseAdminInput), graphql_name="input", default=None, ), ), ) ), ) link_repository_to_project = sgqlc.types.Field( LinkRepositoryToProjectPayload, graphql_name="linkRepositoryToProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(LinkRepositoryToProjectInput), graphql_name="input", default=None, ), ), ) ), ) lock_lockable = sgqlc.types.Field( LockLockablePayload, graphql_name="lockLockable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(LockLockableInput), graphql_name="input", default=None, ), ), ) ), ) mark_discussion_comment_as_answer = sgqlc.types.Field( MarkDiscussionCommentAsAnswerPayload, graphql_name="markDiscussionCommentAsAnswer", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MarkDiscussionCommentAsAnswerInput), graphql_name="input", default=None, ), ), ) ), ) mark_file_as_viewed = sgqlc.types.Field( MarkFileAsViewedPayload, graphql_name="markFileAsViewed", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MarkFileAsViewedInput), graphql_name="input", default=None, ), ), ) ), ) mark_pull_request_ready_for_review = sgqlc.types.Field( MarkPullRequestReadyForReviewPayload, graphql_name="markPullRequestReadyForReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MarkPullRequestReadyForReviewInput), graphql_name="input", default=None, ), ), ) ), ) merge_branch = sgqlc.types.Field( MergeBranchPayload, graphql_name="mergeBranch", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MergeBranchInput), graphql_name="input", default=None, ), ), ) ), ) merge_pull_request = sgqlc.types.Field( MergePullRequestPayload, graphql_name="mergePullRequest", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MergePullRequestInput), graphql_name="input", default=None, ), ), ) ), ) minimize_comment = sgqlc.types.Field( MinimizeCommentPayload, graphql_name="minimizeComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MinimizeCommentInput), graphql_name="input", default=None, ), ), ) ), ) move_project_card = sgqlc.types.Field( MoveProjectCardPayload, graphql_name="moveProjectCard", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MoveProjectCardInput), graphql_name="input", default=None, ), ), ) ), ) move_project_column = sgqlc.types.Field( MoveProjectColumnPayload, graphql_name="moveProjectColumn", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(MoveProjectColumnInput), graphql_name="input", default=None, ), ), ) ), ) pin_issue = sgqlc.types.Field( "PinIssuePayload", graphql_name="pinIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(PinIssueInput), graphql_name="input", default=None, ), ), ) ), ) regenerate_enterprise_identity_provider_recovery_codes = sgqlc.types.Field( "RegenerateEnterpriseIdentityProviderRecoveryCodesPayload", graphql_name="regenerateEnterpriseIdentityProviderRecoveryCodes", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( RegenerateEnterpriseIdentityProviderRecoveryCodesInput ), graphql_name="input", default=None, ), ), ) ), ) regenerate_verifiable_domain_token = sgqlc.types.Field( "RegenerateVerifiableDomainTokenPayload", graphql_name="regenerateVerifiableDomainToken", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RegenerateVerifiableDomainTokenInput), graphql_name="input", default=None, ), ), ) ), ) reject_deployments = sgqlc.types.Field( "RejectDeploymentsPayload", graphql_name="rejectDeployments", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RejectDeploymentsInput), graphql_name="input", default=None, ), ), ) ), ) remove_assignees_from_assignable = sgqlc.types.Field( "RemoveAssigneesFromAssignablePayload", graphql_name="removeAssigneesFromAssignable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveAssigneesFromAssignableInput), graphql_name="input", default=None, ), ), ) ), ) remove_enterprise_admin = sgqlc.types.Field( "RemoveEnterpriseAdminPayload", graphql_name="removeEnterpriseAdmin", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveEnterpriseAdminInput), graphql_name="input", default=None, ), ), ) ), ) remove_enterprise_identity_provider = sgqlc.types.Field( "RemoveEnterpriseIdentityProviderPayload", graphql_name="removeEnterpriseIdentityProvider", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveEnterpriseIdentityProviderInput), graphql_name="input", default=None, ), ), ) ), ) remove_enterprise_organization = sgqlc.types.Field( "RemoveEnterpriseOrganizationPayload", graphql_name="removeEnterpriseOrganization", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveEnterpriseOrganizationInput), graphql_name="input", default=None, ), ), ) ), ) remove_enterprise_support_entitlement = sgqlc.types.Field( "RemoveEnterpriseSupportEntitlementPayload", graphql_name="removeEnterpriseSupportEntitlement", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveEnterpriseSupportEntitlementInput), graphql_name="input", default=None, ), ), ) ), ) remove_labels_from_labelable = sgqlc.types.Field( "RemoveLabelsFromLabelablePayload", graphql_name="removeLabelsFromLabelable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveLabelsFromLabelableInput), graphql_name="input", default=None, ), ), ) ), ) remove_outside_collaborator = sgqlc.types.Field( "RemoveOutsideCollaboratorPayload", graphql_name="removeOutsideCollaborator", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveOutsideCollaboratorInput), graphql_name="input", default=None, ), ), ) ), ) remove_reaction = sgqlc.types.Field( "RemoveReactionPayload", graphql_name="removeReaction", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveReactionInput), graphql_name="input", default=None, ), ), ) ), ) remove_star = sgqlc.types.Field( "RemoveStarPayload", graphql_name="removeStar", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveStarInput), graphql_name="input", default=None, ), ), ) ), ) remove_upvote = sgqlc.types.Field( "RemoveUpvotePayload", graphql_name="removeUpvote", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RemoveUpvoteInput), graphql_name="input", default=None, ), ), ) ), ) reopen_issue = sgqlc.types.Field( "ReopenIssuePayload", graphql_name="reopenIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ReopenIssueInput), graphql_name="input", default=None, ), ), ) ), ) reopen_pull_request = sgqlc.types.Field( "ReopenPullRequestPayload", graphql_name="reopenPullRequest", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ReopenPullRequestInput), graphql_name="input", default=None, ), ), ) ), ) request_reviews = sgqlc.types.Field( "RequestReviewsPayload", graphql_name="requestReviews", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RequestReviewsInput), graphql_name="input", default=None, ), ), ) ), ) rerequest_check_suite = sgqlc.types.Field( "RerequestCheckSuitePayload", graphql_name="rerequestCheckSuite", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RerequestCheckSuiteInput), graphql_name="input", default=None, ), ), ) ), ) resolve_review_thread = sgqlc.types.Field( "ResolveReviewThreadPayload", graphql_name="resolveReviewThread", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(ResolveReviewThreadInput), graphql_name="input", default=None, ), ), ) ), ) revoke_enterprise_organizations_migrator_role = sgqlc.types.Field( "RevokeEnterpriseOrganizationsMigratorRolePayload", graphql_name="revokeEnterpriseOrganizationsMigratorRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( RevokeEnterpriseOrganizationsMigratorRoleInput ), graphql_name="input", default=None, ), ), ) ), ) revoke_migrator_role = sgqlc.types.Field( "RevokeMigratorRolePayload", graphql_name="revokeMigratorRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(RevokeMigratorRoleInput), graphql_name="input", default=None, ), ), ) ), ) set_enterprise_identity_provider = sgqlc.types.Field( "SetEnterpriseIdentityProviderPayload", graphql_name="setEnterpriseIdentityProvider", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(SetEnterpriseIdentityProviderInput), graphql_name="input", default=None, ), ), ) ), ) set_organization_interaction_limit = sgqlc.types.Field( "SetOrganizationInteractionLimitPayload", graphql_name="setOrganizationInteractionLimit", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(SetOrganizationInteractionLimitInput), graphql_name="input", default=None, ), ), ) ), ) set_repository_interaction_limit = sgqlc.types.Field( "SetRepositoryInteractionLimitPayload", graphql_name="setRepositoryInteractionLimit", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(SetRepositoryInteractionLimitInput), graphql_name="input", default=None, ), ), ) ), ) set_user_interaction_limit = sgqlc.types.Field( "SetUserInteractionLimitPayload", graphql_name="setUserInteractionLimit", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(SetUserInteractionLimitInput), graphql_name="input", default=None, ), ), ) ), ) start_repository_migration = sgqlc.types.Field( "StartRepositoryMigrationPayload", graphql_name="startRepositoryMigration", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(StartRepositoryMigrationInput), graphql_name="input", default=None, ), ), ) ), ) submit_pull_request_review = sgqlc.types.Field( "SubmitPullRequestReviewPayload", graphql_name="submitPullRequestReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(SubmitPullRequestReviewInput), graphql_name="input", default=None, ), ), ) ), ) transfer_issue = sgqlc.types.Field( "TransferIssuePayload", graphql_name="transferIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(TransferIssueInput), graphql_name="input", default=None, ), ), ) ), ) unarchive_repository = sgqlc.types.Field( "UnarchiveRepositoryPayload", graphql_name="unarchiveRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnarchiveRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) unfollow_organization = sgqlc.types.Field( "UnfollowOrganizationPayload", graphql_name="unfollowOrganization", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnfollowOrganizationInput), graphql_name="input", default=None, ), ), ) ), ) unfollow_user = sgqlc.types.Field( "UnfollowUserPayload", graphql_name="unfollowUser", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnfollowUserInput), graphql_name="input", default=None, ), ), ) ), ) unlink_repository_from_project = sgqlc.types.Field( "UnlinkRepositoryFromProjectPayload", graphql_name="unlinkRepositoryFromProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnlinkRepositoryFromProjectInput), graphql_name="input", default=None, ), ), ) ), ) unlock_lockable = sgqlc.types.Field( "UnlockLockablePayload", graphql_name="unlockLockable", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnlockLockableInput), graphql_name="input", default=None, ), ), ) ), ) unmark_discussion_comment_as_answer = sgqlc.types.Field( "UnmarkDiscussionCommentAsAnswerPayload", graphql_name="unmarkDiscussionCommentAsAnswer", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnmarkDiscussionCommentAsAnswerInput), graphql_name="input", default=None, ), ), ) ), ) unmark_file_as_viewed = sgqlc.types.Field( "UnmarkFileAsViewedPayload", graphql_name="unmarkFileAsViewed", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnmarkFileAsViewedInput), graphql_name="input", default=None, ), ), ) ), ) unmark_issue_as_duplicate = sgqlc.types.Field( "UnmarkIssueAsDuplicatePayload", graphql_name="unmarkIssueAsDuplicate", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnmarkIssueAsDuplicateInput), graphql_name="input", default=None, ), ), ) ), ) unminimize_comment = sgqlc.types.Field( "UnminimizeCommentPayload", graphql_name="unminimizeComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnminimizeCommentInput), graphql_name="input", default=None, ), ), ) ), ) unpin_issue = sgqlc.types.Field( "UnpinIssuePayload", graphql_name="unpinIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnpinIssueInput), graphql_name="input", default=None, ), ), ) ), ) unresolve_review_thread = sgqlc.types.Field( "UnresolveReviewThreadPayload", graphql_name="unresolveReviewThread", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UnresolveReviewThreadInput), graphql_name="input", default=None, ), ), ) ), ) update_branch_protection_rule = sgqlc.types.Field( "UpdateBranchProtectionRulePayload", graphql_name="updateBranchProtectionRule", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateBranchProtectionRuleInput), graphql_name="input", default=None, ), ), ) ), ) update_check_run = sgqlc.types.Field( "UpdateCheckRunPayload", graphql_name="updateCheckRun", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateCheckRunInput), graphql_name="input", default=None, ), ), ) ), ) update_check_suite_preferences = sgqlc.types.Field( "UpdateCheckSuitePreferencesPayload", graphql_name="updateCheckSuitePreferences", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateCheckSuitePreferencesInput), graphql_name="input", default=None, ), ), ) ), ) update_discussion = sgqlc.types.Field( "UpdateDiscussionPayload", graphql_name="updateDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) update_discussion_comment = sgqlc.types.Field( "UpdateDiscussionCommentPayload", graphql_name="updateDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_administrator_role = sgqlc.types.Field( "UpdateEnterpriseAdministratorRolePayload", graphql_name="updateEnterpriseAdministratorRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateEnterpriseAdministratorRoleInput), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_allow_private_repository_forking_setting = sgqlc.types.Field( "UpdateEnterpriseAllowPrivateRepositoryForkingSettingPayload", graphql_name="updateEnterpriseAllowPrivateRepositoryForkingSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseAllowPrivateRepositoryForkingSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_default_repository_permission_setting = sgqlc.types.Field( "UpdateEnterpriseDefaultRepositoryPermissionSettingPayload", graphql_name="updateEnterpriseDefaultRepositoryPermissionSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseDefaultRepositoryPermissionSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_change_repository_visibility_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanChangeRepositoryVisibilitySettingPayload", graphql_name="updateEnterpriseMembersCanChangeRepositoryVisibilitySetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanChangeRepositoryVisibilitySettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_create_repositories_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanCreateRepositoriesSettingPayload", graphql_name="updateEnterpriseMembersCanCreateRepositoriesSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanCreateRepositoriesSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_delete_issues_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanDeleteIssuesSettingPayload", graphql_name="updateEnterpriseMembersCanDeleteIssuesSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanDeleteIssuesSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_delete_repositories_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanDeleteRepositoriesSettingPayload", graphql_name="updateEnterpriseMembersCanDeleteRepositoriesSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanDeleteRepositoriesSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_invite_collaborators_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanInviteCollaboratorsSettingPayload", graphql_name="updateEnterpriseMembersCanInviteCollaboratorsSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanInviteCollaboratorsSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_make_purchases_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanMakePurchasesSettingPayload", graphql_name="updateEnterpriseMembersCanMakePurchasesSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanMakePurchasesSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_update_protected_branches_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingPayload", graphql_name="updateEnterpriseMembersCanUpdateProtectedBranchesSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_members_can_view_dependency_insights_setting = sgqlc.types.Field( "UpdateEnterpriseMembersCanViewDependencyInsightsSettingPayload", graphql_name="updateEnterpriseMembersCanViewDependencyInsightsSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseMembersCanViewDependencyInsightsSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_organization_projects_setting = sgqlc.types.Field( "UpdateEnterpriseOrganizationProjectsSettingPayload", graphql_name="updateEnterpriseOrganizationProjectsSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseOrganizationProjectsSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_owner_organization_role = sgqlc.types.Field( "UpdateEnterpriseOwnerOrganizationRolePayload", graphql_name="updateEnterpriseOwnerOrganizationRole", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseOwnerOrganizationRoleInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_profile = sgqlc.types.Field( "UpdateEnterpriseProfilePayload", graphql_name="updateEnterpriseProfile", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateEnterpriseProfileInput), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_repository_projects_setting = sgqlc.types.Field( "UpdateEnterpriseRepositoryProjectsSettingPayload", graphql_name="updateEnterpriseRepositoryProjectsSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseRepositoryProjectsSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_team_discussions_setting = sgqlc.types.Field( "UpdateEnterpriseTeamDiscussionsSettingPayload", graphql_name="updateEnterpriseTeamDiscussionsSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseTeamDiscussionsSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_enterprise_two_factor_authentication_required_setting = sgqlc.types.Field( "UpdateEnterpriseTwoFactorAuthenticationRequiredSettingPayload", graphql_name="updateEnterpriseTwoFactorAuthenticationRequiredSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateEnterpriseTwoFactorAuthenticationRequiredSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_environment = sgqlc.types.Field( "UpdateEnvironmentPayload", graphql_name="updateEnvironment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateEnvironmentInput), graphql_name="input", default=None, ), ), ) ), ) update_ip_allow_list_enabled_setting = sgqlc.types.Field( "UpdateIpAllowListEnabledSettingPayload", graphql_name="updateIpAllowListEnabledSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateIpAllowListEnabledSettingInput), graphql_name="input", default=None, ), ), ) ), ) update_ip_allow_list_entry = sgqlc.types.Field( "UpdateIpAllowListEntryPayload", graphql_name="updateIpAllowListEntry", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateIpAllowListEntryInput), graphql_name="input", default=None, ), ), ) ), ) update_ip_allow_list_for_installed_apps_enabled_setting = sgqlc.types.Field( "UpdateIpAllowListForInstalledAppsEnabledSettingPayload", graphql_name="updateIpAllowListForInstalledAppsEnabledSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateIpAllowListForInstalledAppsEnabledSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_issue = sgqlc.types.Field( "UpdateIssuePayload", graphql_name="updateIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateIssueInput), graphql_name="input", default=None, ), ), ) ), ) update_issue_comment = sgqlc.types.Field( "UpdateIssueCommentPayload", graphql_name="updateIssueComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateIssueCommentInput), graphql_name="input", default=None, ), ), ) ), ) update_notification_restriction_setting = sgqlc.types.Field( "UpdateNotificationRestrictionSettingPayload", graphql_name="updateNotificationRestrictionSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateNotificationRestrictionSettingInput), graphql_name="input", default=None, ), ), ) ), ) update_organization_allow_private_repository_forking_setting = sgqlc.types.Field( "UpdateOrganizationAllowPrivateRepositoryForkingSettingPayload", graphql_name="updateOrganizationAllowPrivateRepositoryForkingSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateOrganizationAllowPrivateRepositoryForkingSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_organization_web_commit_signoff_setting = sgqlc.types.Field( "UpdateOrganizationWebCommitSignoffSettingPayload", graphql_name="updateOrganizationWebCommitSignoffSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateOrganizationWebCommitSignoffSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_project = sgqlc.types.Field( "UpdateProjectPayload", graphql_name="updateProject", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectInput), graphql_name="input", default=None, ), ), ) ), ) update_project_card = sgqlc.types.Field( "UpdateProjectCardPayload", graphql_name="updateProjectCard", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectCardInput), graphql_name="input", default=None, ), ), ) ), ) update_project_column = sgqlc.types.Field( "UpdateProjectColumnPayload", graphql_name="updateProjectColumn", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectColumnInput), graphql_name="input", default=None, ), ), ) ), ) update_project_draft_issue = sgqlc.types.Field( "UpdateProjectDraftIssuePayload", graphql_name="updateProjectDraftIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectDraftIssueInput), graphql_name="input", default=None, ), ), ) ), ) update_project_next = sgqlc.types.Field( "UpdateProjectNextPayload", graphql_name="updateProjectNext", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectNextInput), graphql_name="input", default=None, ), ), ) ), ) update_project_next_item_field = sgqlc.types.Field( "UpdateProjectNextItemFieldPayload", graphql_name="updateProjectNextItemField", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectNextItemFieldInput), graphql_name="input", default=None, ), ), ) ), ) update_project_v2 = sgqlc.types.Field( "UpdateProjectV2Payload", graphql_name="updateProjectV2", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectV2Input), graphql_name="input", default=None, ), ), ) ), ) update_project_v2_draft_issue = sgqlc.types.Field( "UpdateProjectV2DraftIssuePayload", graphql_name="updateProjectV2DraftIssue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectV2DraftIssueInput), graphql_name="input", default=None, ), ), ) ), ) update_project_v2_item_field_value = sgqlc.types.Field( "UpdateProjectV2ItemFieldValuePayload", graphql_name="updateProjectV2ItemFieldValue", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectV2ItemFieldValueInput), graphql_name="input", default=None, ), ), ) ), ) update_project_v2_item_position = sgqlc.types.Field( "UpdateProjectV2ItemPositionPayload", graphql_name="updateProjectV2ItemPosition", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateProjectV2ItemPositionInput), graphql_name="input", default=None, ), ), ) ), ) update_pull_request = sgqlc.types.Field( "UpdatePullRequestPayload", graphql_name="updatePullRequest", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdatePullRequestInput), graphql_name="input", default=None, ), ), ) ), ) update_pull_request_branch = sgqlc.types.Field( "UpdatePullRequestBranchPayload", graphql_name="updatePullRequestBranch", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdatePullRequestBranchInput), graphql_name="input", default=None, ), ), ) ), ) update_pull_request_review = sgqlc.types.Field( "UpdatePullRequestReviewPayload", graphql_name="updatePullRequestReview", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdatePullRequestReviewInput), graphql_name="input", default=None, ), ), ) ), ) update_pull_request_review_comment = sgqlc.types.Field( "UpdatePullRequestReviewCommentPayload", graphql_name="updatePullRequestReviewComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdatePullRequestReviewCommentInput), graphql_name="input", default=None, ), ), ) ), ) update_ref = sgqlc.types.Field( "UpdateRefPayload", graphql_name="updateRef", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateRefInput), graphql_name="input", default=None, ), ), ) ), ) update_repository = sgqlc.types.Field( "UpdateRepositoryPayload", graphql_name="updateRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) update_repository_web_commit_signoff_setting = sgqlc.types.Field( "UpdateRepositoryWebCommitSignoffSettingPayload", graphql_name="updateRepositoryWebCommitSignoffSetting", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null( UpdateRepositoryWebCommitSignoffSettingInput ), graphql_name="input", default=None, ), ), ) ), ) update_sponsorship_preferences = sgqlc.types.Field( "UpdateSponsorshipPreferencesPayload", graphql_name="updateSponsorshipPreferences", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateSponsorshipPreferencesInput), graphql_name="input", default=None, ), ), ) ), ) update_subscription = sgqlc.types.Field( "UpdateSubscriptionPayload", graphql_name="updateSubscription", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateSubscriptionInput), graphql_name="input", default=None, ), ), ) ), ) update_team_discussion = sgqlc.types.Field( "UpdateTeamDiscussionPayload", graphql_name="updateTeamDiscussion", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateTeamDiscussionInput), graphql_name="input", default=None, ), ), ) ), ) update_team_discussion_comment = sgqlc.types.Field( "UpdateTeamDiscussionCommentPayload", graphql_name="updateTeamDiscussionComment", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateTeamDiscussionCommentInput), graphql_name="input", default=None, ), ), ) ), ) update_teams_repository = sgqlc.types.Field( "UpdateTeamsRepositoryPayload", graphql_name="updateTeamsRepository", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateTeamsRepositoryInput), graphql_name="input", default=None, ), ), ) ), ) update_topics = sgqlc.types.Field( "UpdateTopicsPayload", graphql_name="updateTopics", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(UpdateTopicsInput), graphql_name="input", default=None, ), ), ) ), ) verify_verifiable_domain = sgqlc.types.Field( "VerifyVerifiableDomainPayload", graphql_name="verifyVerifiableDomain", args=sgqlc.types.ArgDict( ( ( "input", sgqlc.types.Arg( sgqlc.types.non_null(VerifyVerifiableDomainInput), graphql_name="input", default=None, ), ), ) ), )
Mutation
python
PrefectHQ__prefect
src/prefect/settings/models/server/services.py
{ "start": 1563, "end": 3787 }
class ____(ServicesBaseSetting): """ Settings for controlling the event persister service """ model_config: ClassVar[SettingsConfigDict] = build_settings_config( ("server", "services", "event_persister") ) enabled: bool = Field( default=True, description="Whether or not to start the event persister service in the server application.", validation_alias=AliasChoices( AliasPath("enabled"), "prefect_server_services_event_persister_enabled", "prefect_api_services_event_persister_enabled", ), ) batch_size: int = Field( default=20, gt=0, description="The number of events the event persister will attempt to insert in one batch.", validation_alias=AliasChoices( AliasPath("batch_size"), "prefect_server_services_event_persister_batch_size", "prefect_api_services_event_persister_batch_size", ), ) read_batch_size: int = Field( default=1, gt=0, description="The number of events the event persister will attempt to read from the message broker in one batch.", validation_alias=AliasChoices( AliasPath("read_batch_size"), "prefect_server_services_event_persister_read_batch_size", "prefect_api_services_event_persister_read_batch_size", ), ) flush_interval: float = Field( default=5, gt=0.0, description="The maximum number of seconds between flushes of the event persister.", validation_alias=AliasChoices( AliasPath("flush_interval"), "prefect_server_services_event_persister_flush_interval", "prefect_api_services_event_persister_flush_interval", ), ) batch_size_delete: int = Field( default=10_000, gt=0, description="The number of expired events and event resources the event persister will attempt to delete in one batch.", validation_alias=AliasChoices( AliasPath("batch_size_delete"), "prefect_server_services_event_persister_batch_size_delete", ), )
ServerServicesEventPersisterSettings
python
hynek__structlog
tests/test_threadlocal.py
{ "start": 11271, "end": 13821 }
class ____: def test_cleanup(self): """ Bindings are cleaned up """ with pytest.deprecated_call(), bound_threadlocal(x=42, y="foo"): assert {"x": 42, "y": "foo"} == get_threadlocal() with pytest.deprecated_call(): assert {} == get_threadlocal() def test_cleanup_conflict(self): """ Overwritten keys are restored after the clean up """ with pytest.deprecated_call(): bind_threadlocal(x="original", z="unrelated") with bound_threadlocal(x=42, y="foo"): assert { "x": 42, "y": "foo", "z": "unrelated", } == get_threadlocal() with pytest.deprecated_call(): assert {"x": "original", "z": "unrelated"} == get_threadlocal() def test_preserve_independent_bind(self): """ New bindings inside bound_threadlocal are preserved after the clean up """ with pytest.deprecated_call(), bound_threadlocal(x=42): bind_threadlocal(y="foo") assert {"x": 42, "y": "foo"} == get_threadlocal() with pytest.deprecated_call(): assert {"y": "foo"} == get_threadlocal() def test_nesting_works(self): """ bound_threadlocal binds and unbinds even when nested """ with pytest.deprecated_call(): with bound_threadlocal(l1=1): assert {"l1": 1} == get_threadlocal() with bound_threadlocal(l2=2): assert {"l1": 1, "l2": 2} == get_threadlocal() assert {"l1": 1} == get_threadlocal() assert {} == get_threadlocal() def test_as_decorator(self): """ bound_threadlocal can be used as a decorator and it preserves the name, signature and documentation of the wrapped function. """ @bound_threadlocal(x=42) def wrapped(arg1): """Wrapped documentation""" with pytest.deprecated_call(): bind_threadlocal(y=arg1) with pytest.deprecated_call(): assert {"x": 42, "y": arg1} == get_threadlocal() # I can't find a way for the warnings to be raised from the decorator. with pytest.deprecated_call(): wrapped(23) assert "wrapped" == wrapped.__name__ assert "(arg1)" == str(inspect.signature(wrapped)) assert "Wrapped documentation" == wrapped.__doc__
TestBoundThreadlocal
python
pytorch__pytorch
torch/__init__.py
{ "start": 71423, "end": 71643 }
class ____(_LegacyStorage): @classproperty def dtype(self): _warn_typed_storage_removal(stacklevel=3) return self._dtype @classproperty def _dtype(self): return torch.int
IntStorage
python
django__django
tests/admin_widgets/tests.py
{ "start": 1671, "end": 9991 }
class ____(SimpleTestCase): """ Tests for correct behavior of ModelAdmin.formfield_for_dbfield """ def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides): """ Helper to call formfield_for_dbfield for a given model and field name and verify that the returned formfield is appropriate. """ # Override any settings on the model admin class MyModelAdmin(admin.ModelAdmin): pass for k in admin_overrides: setattr(MyModelAdmin, k, admin_overrides[k]) # Construct the admin, and ask it for a formfield ma = MyModelAdmin(model, admin.site) ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None) # "unwrap" the widget wrapper, if needed if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper): widget = ff.widget.widget else: widget = ff.widget self.assertIsInstance(widget, widgetclass) # Return the formfield so that other tests can continue return ff def test_DateField(self): self.assertFormfield(Event, "start_date", widgets.AdminDateWidget) def test_DateTimeField(self): self.assertFormfield(Member, "birthdate", widgets.AdminSplitDateTime) def test_TimeField(self): self.assertFormfield(Event, "start_time", widgets.AdminTimeWidget) def test_TextField(self): self.assertFormfield(Event, "description", widgets.AdminTextareaWidget) def test_URLField(self): self.assertFormfield(Event, "link", widgets.AdminURLFieldWidget) def test_IntegerField(self): self.assertFormfield(Event, "min_age", widgets.AdminIntegerFieldWidget) def test_CharField(self): self.assertFormfield(Member, "name", widgets.AdminTextInputWidget) def test_EmailField(self): self.assertFormfield(Member, "email", widgets.AdminEmailInputWidget) def test_FileField(self): self.assertFormfield(Album, "cover_art", widgets.AdminFileWidget) def test_ForeignKey(self): self.assertFormfield(Event, "main_band", forms.Select) def test_raw_id_ForeignKey(self): self.assertFormfield( Event, "main_band", widgets.ForeignKeyRawIdWidget, raw_id_fields=["main_band"], ) def test_radio_fields_ForeignKey(self): ff = self.assertFormfield( Event, "main_band", widgets.AdminRadioSelect, radio_fields={"main_band": admin.VERTICAL}, ) self.assertIsNone(ff.empty_label) def test_radio_fields_foreignkey_formfield_overrides_empty_label(self): class MyModelAdmin(admin.ModelAdmin): radio_fields = {"parent": admin.VERTICAL} formfield_overrides = { ForeignKey: {"empty_label": "Custom empty label"}, } ma = MyModelAdmin(Inventory, admin.site) ff = ma.formfield_for_dbfield(Inventory._meta.get_field("parent"), request=None) self.assertEqual(ff.empty_label, "Custom empty label") def test_many_to_many(self): self.assertFormfield(Band, "members", forms.SelectMultiple) def test_raw_id_many_to_many(self): self.assertFormfield( Band, "members", widgets.ManyToManyRawIdWidget, raw_id_fields=["members"] ) def test_filtered_many_to_many(self): self.assertFormfield( Band, "members", widgets.FilteredSelectMultiple, filter_vertical=["members"] ) def test_formfield_overrides(self): self.assertFormfield( Event, "start_date", forms.TextInput, formfield_overrides={DateField: {"widget": forms.TextInput}}, ) def test_formfield_overrides_widget_instances(self): """ Widget instances in formfield_overrides are not shared between different fields. (#19423) """ class BandAdmin(admin.ModelAdmin): formfield_overrides = { CharField: {"widget": forms.TextInput(attrs={"size": "10"})} } ma = BandAdmin(Band, admin.site) f1 = ma.formfield_for_dbfield(Band._meta.get_field("name"), request=None) f2 = ma.formfield_for_dbfield(Band._meta.get_field("style"), request=None) self.assertNotEqual(f1.widget, f2.widget) self.assertEqual(f1.widget.attrs["maxlength"], "100") self.assertEqual(f2.widget.attrs["maxlength"], "20") self.assertEqual(f2.widget.attrs["size"], "10") def test_formfield_overrides_m2m_filter_widget(self): """ The autocomplete_fields, raw_id_fields, filter_vertical, and filter_horizontal widgets for ManyToManyFields may be overridden by specifying a widget in formfield_overrides. """ class BandAdmin(admin.ModelAdmin): filter_vertical = ["members"] formfield_overrides = { ManyToManyField: {"widget": forms.CheckboxSelectMultiple}, } ma = BandAdmin(Band, admin.site) field = ma.formfield_for_dbfield(Band._meta.get_field("members"), request=None) self.assertIsInstance(field.widget.widget, forms.CheckboxSelectMultiple) def test_formfield_overrides_for_datetime_field(self): """ Overriding the widget for DateTimeField doesn't overrides the default form_class for that field (#26449). """ class MemberAdmin(admin.ModelAdmin): formfield_overrides = { DateTimeField: {"widget": widgets.AdminSplitDateTime} } ma = MemberAdmin(Member, admin.site) f1 = ma.formfield_for_dbfield(Member._meta.get_field("birthdate"), request=None) self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime) self.assertIsInstance(f1, forms.SplitDateTimeField) def test_formfield_overrides_for_custom_field(self): """ formfield_overrides works for a custom field class. """ class AlbumAdmin(admin.ModelAdmin): formfield_overrides = {MyFileField: {"widget": forms.TextInput()}} ma = AlbumAdmin(Member, admin.site) f1 = ma.formfield_for_dbfield( Album._meta.get_field("backside_art"), request=None ) self.assertIsInstance(f1.widget, forms.TextInput) def test_field_with_choices(self): self.assertFormfield(Member, "gender", forms.Select) def test_choices_with_radio_fields(self): self.assertFormfield( Member, "gender", widgets.AdminRadioSelect, radio_fields={"gender": admin.VERTICAL}, ) def test_inheritance(self): self.assertFormfield(Album, "backside_art", widgets.AdminFileWidget) def test_m2m_widgets(self): """m2m fields help text as it applies to admin app (#9321).""" class AdvisorAdmin(admin.ModelAdmin): filter_vertical = ["companies"] self.assertFormfield( Advisor, "companies", widgets.FilteredSelectMultiple, filter_vertical=["companies"], ) ma = AdvisorAdmin(Advisor, admin.site) f = ma.formfield_for_dbfield(Advisor._meta.get_field("companies"), request=None) self.assertEqual( f.help_text, "Hold down “Control”, or “Command” on a Mac, to select more than one.", ) def test_m2m_widgets_no_allow_multiple_selected(self): class NoAllowMultipleSelectedWidget(forms.SelectMultiple): allow_multiple_selected = False class AdvisorAdmin(admin.ModelAdmin): filter_vertical = ["companies"] formfield_overrides = { ManyToManyField: {"widget": NoAllowMultipleSelectedWidget}, } self.assertFormfield( Advisor, "companies", widgets.FilteredSelectMultiple, filter_vertical=["companies"], ) ma = AdvisorAdmin(Advisor, admin.site) f = ma.formfield_for_dbfield(Advisor._meta.get_field("companies"), request=None) self.assertEqual(f.help_text, "") @override_settings(ROOT_URLCONF="admin_widgets.urls")
AdminFormfieldForDBFieldTests
python
getsentry__sentry
src/sentry/tasks/check_am2_compatibility.py
{ "start": 9214, "end": 26963 }
class ____: @classmethod def get_widget_url(cls, org_slug, dashboard_id, widget_id) -> str: return f"https://{org_slug}.sentry.io/organizations/{org_slug}/dashboard/{dashboard_id}/widget/{widget_id}/" @classmethod def get_alert_url(cls, org_slug, alert_id) -> str: return f"https://{org_slug}.sentry.io/organizations/{org_slug}/alerts/rules/details/{alert_id}/" @classmethod def get_found_sdks_url(cls, org_slug): return ( f"https://{org_slug}.sentry.io/organizations/{org_slug}/discover/homepage/?field=count%28%29&field" f"=project&field=sdk.name&field=sdk.version&query=event.type%3Atransaction&statsPeriod=30d&yAxis=count%28" f"%29" ) @classmethod def compare_versions(cls, version1, version2): # Split the version strings into individual numbers nums1 = version1.split(".") nums2 = version2.split(".") # Pad the shorter version with zeros to ensure equal length length = max(len(nums1), len(nums2)) nums1 = (["0"] * (length - len(nums1))) + nums1 nums2 = (["0"] * (length - len(nums2))) + nums2 # Compare the numbers from left to right for num1, num2 in zip(nums1, nums2): if int(num1) > int(num2): return 1 elif int(num1) < int(num2): return -1 # All numbers are equal return 0 @classmethod def format_results( cls, organization, unsupported_widgets, unsupported_alerts, ondemand_supported_widgets, outdated_sdks_per_project, ): results: dict[str, Any] = {} widgets = [] for dashboard_id, widget_data in unsupported_widgets.items(): unsupported = [] for widget_id, fields, conditions in widget_data: unsupported.append( { "id": widget_id, "url": cls.get_widget_url(organization.slug, dashboard_id, widget_id), "fields": fields, "conditions": conditions, } ) widgets.append({"dashboard_id": dashboard_id, "unsupported": unsupported}) results["widgets"] = widgets ondemand_widgets = [] for dashboard_id, widget_data in ondemand_supported_widgets.items(): ondemand_supported = [] for widget_id, fields, conditions in widget_data: ondemand_supported.append( { "id": widget_id, "url": cls.get_widget_url(organization.slug, dashboard_id, widget_id), "fields": fields, "conditions": conditions, } ) ondemand_widgets.append( {"dashboard_id": dashboard_id, "ondemand_supported": ondemand_supported} ) results["ondemand_widgets"] = ondemand_widgets alerts = [] for alert_id, aggregate, query in unsupported_alerts: alerts.append( { "id": alert_id, "url": cls.get_alert_url(organization.slug, alert_id), "aggregate": aggregate, "query": query, } ) results["alerts"] = alerts projects = [] for project, found_sdks in outdated_sdks_per_project.items(): unsupported = [] for sdk_name, sdk_versions in found_sdks.items(): unsupported.append( { "sdk_name": sdk_name, "sdk_versions": [ # Required will be None in case we didn't manage to find the SDK in the compatibility # list. {"found": found, "required": required} for found, required in sdk_versions ], } ) projects.append({"project": project, "unsupported": unsupported}) results["sdks"] = {"url": cls.get_found_sdks_url(organization.slug), "projects": projects} return results @classmethod def extract_sdks_from_data(cls, data): found_sdks_per_project: Mapping[str, Mapping[str, set[str]]] = defaultdict( lambda: defaultdict(set) ) for element in data: project = element.get("project") sdk_name = element.get("sdk.name") sdk_version = element.get("sdk.version") if sdk_name and sdk_version: found_sdks_per_project[project][sdk_name].add(sdk_version) return found_sdks_per_project @classmethod def get_outdated_sdks(cls, found_sdks_per_project): outdated_sdks_per_project: Mapping[str, Mapping[str, set[tuple[str, str | None]]]] = ( defaultdict(lambda: defaultdict(set)) ) for project, found_sdks in found_sdks_per_project.items(): for sdk_name, sdk_versions in found_sdks.items(): # If the SDK is not supporting performance, we don't want to try and check dynamic sampling # compatibility, and we also don't return it as unsupported since it will create noise. if sdk_name not in SDKS_SUPPORTING_PERFORMANCE: continue sdk_versions_set: set[tuple[str, str | None]] = set() found_supported_version = False min_sdk_version = SUPPORTED_SDK_VERSIONS.get(sdk_name) for sdk_version in sdk_versions: if min_sdk_version is None: # If we didn't find the SDK, we suppose it doesn't support dynamic sampling. sdk_versions_set.add((sdk_version, None)) else: # We run the semver comparison for the two sdk versions. comparison = cls.compare_versions(sdk_version, min_sdk_version) if comparison == -1: # If the sdk version is less it means that it doesn't support dynamic sampling, and we want # to add it to the unsupported list. sdk_versions_set.add((sdk_version, min_sdk_version)) else: # In case we end up here, it means that the sdk version found is >= than the minimum # version, thus want to skip the iteration since we don't want to show possible unsupported # versions in case at least one supported version is found. found_supported_version = True break # In case we didn't find any supported sdks, we want to return the entire list of unsupported sdks. if not found_supported_version and sdk_versions_set: outdated_sdks_per_project[project][sdk_name].update(sdk_versions_set) return outdated_sdks_per_project @classmethod def get_sdks_version_used(cls, organization, project_objects): # We use the count() operation in order to group by project, sdk.name and sdk.version. selected_columns = ["count()", "project", "sdk.name", "sdk.version"] params = SnubaParams( start=datetime.now(tz=timezone.utc) - timedelta(days=QUERY_TIME_RANGE_IN_DAYS), end=datetime.now(tz=timezone.utc), organization=organization, projects=project_objects, ) try: results = discover_query( selected_columns=selected_columns, query="event.type:transaction", snuba_params=params, referrer="api.organization-events", ) found_sdks_per_project = cls.extract_sdks_from_data(results.get("data")) outdated_sdks_per_project = cls.get_outdated_sdks(found_sdks_per_project) return outdated_sdks_per_project except Exception: return None @classmethod def is_metrics_data(cls, organization_id, project_objects, query): selected_columns = ["count()"] params = { "organization_id": organization_id, "project_objects": project_objects, "start": datetime.now(tz=timezone.utc) - timedelta(days=QUERY_TIME_RANGE_IN_DAYS), "end": datetime.now(tz=timezone.utc), } try: builder = MetricsQueryBuilder( params, dataset=Dataset.PerformanceMetrics, query=query, selected_columns=selected_columns, config=QueryBuilderConfig( allow_metric_aggregates=True, auto_fields=False, use_metrics_layer=False, on_demand_metrics_enabled=False, ), ) builder.get_snql_query() return True except IncompatibleMetricsQuery: return False except Exception: return None @classmethod def is_on_demand_metrics_data(cls, aggregate, query): return should_use_on_demand_metrics( Dataset.Transactions.value, aggregate, query, None, True ) @classmethod def get_excluded_conditions(cls): # We want an empty condition as identity for the AND chaining. qs = Q() for condition in EXCLUDED_CONDITIONS: # We want to build an AND condition with multiple negated elements. qs &= ~Q(conditions__icontains=condition) qs &= ~Q(fields__icontains=condition) return qs @classmethod def get_all_widgets_of_organization(cls, organization_id): return DashboardWidgetQuery.objects.filter( cls.get_excluded_conditions(), widget__dashboard__organization_id=organization_id, widget__widget_type=0, ).values_list( "widget__id", "widget__dashboard__id", "widget__dashboard__title", "fields", "conditions", ) @classmethod def get_all_alerts_of_organization(cls, organization_id): return ( AlertRule.objects.filter( organization_id=organization_id, snuba_query__dataset=Dataset.Transactions.value, ) .select_related("snuba_query") .values_list("id", "snuba_query__aggregate", "snuba_query__query") ) @classmethod def get_ondemand_widget_ids(cls, organization_id): current_version = OnDemandMetricSpecVersioning.get_query_spec_version(organization_id) widget_ids = DashboardWidgetQueryOnDemand.objects.filter( spec_version=current_version.version, dashboard_widget_query__widget__dashboard__organization_id=organization_id, extraction_state__startswith=ON_DEMAND_ENABLED_KEY, ).values_list("dashboard_widget_query__widget_id", flat=True) return set(widget_ids) @classmethod def run_compatibility_check(cls, org_id): organization = Organization.objects.get(id=org_id) all_projects = list(Project.objects.using_replica().filter(organization=organization)) unsupported_widgets = defaultdict(list) ondemand_supported_widgets = defaultdict(list) ondemand_widget_ids = cls.get_ondemand_widget_ids(org_id) for ( widget_id, dashboard_id, dashboard_title, fields, conditions, ) in cls.get_all_widgets_of_organization(organization.id): # We run this query by selecting all projects, so that the widget query should never fail in case the # `query` contains "project:something". supports_metrics = cls.is_metrics_data(organization.id, all_projects, conditions) supports_ondemand = widget_id in ondemand_widget_ids if supports_ondemand: # If it supports on demand it's no longer unsupported, but until all data has begun migrating # we should still be showing the widgets so they can be checked. ondemand_supported_widgets[dashboard_id].append((widget_id, fields, conditions)) if supports_metrics is None: with sentry_sdk.isolation_scope() as scope: scope.set_tag("org_id", organization.id) scope.set_extra("widget_id", widget_id) scope.set_extra("fields", fields) scope.set_extra("conditions", conditions) sentry_sdk.capture_message("Can't figure out AM2 compatibility for widget.") continue if not supports_metrics and not supports_ondemand: # We mark whether a metric is not supported. unsupported_widgets[dashboard_id].append((widget_id, fields, conditions)) unsupported_alerts = [] for alert_id, aggregate, query in cls.get_all_alerts_of_organization(organization.id): supports_metrics = cls.is_on_demand_metrics_data( aggregate, query ) or cls.is_metrics_data(organization.id, all_projects, query) if supports_metrics is None: with sentry_sdk.isolation_scope() as scope: scope.set_tag("org_id", organization.id) scope.set_extra("alert_id", alert_id) scope.set_extra("aggregate", aggregate) scope.set_extra("query", query) sentry_sdk.capture_message("Can't figure out AM2 compatibility for alert.") continue if not supports_metrics: # We mark whether a metric is not supported. unsupported_alerts.append((alert_id, aggregate, query)) outdated_sdks_per_project = cls.get_sdks_version_used(organization, all_projects) if outdated_sdks_per_project is None: with sentry_sdk.isolation_scope() as scope: scope.set_tag("org_id", organization.id) sentry_sdk.capture_message("Can't figure out outdated SDKs.") outdated_sdks_per_project = {} return cls.format_results( organization, unsupported_widgets, unsupported_alerts, ondemand_supported_widgets, outdated_sdks_per_project, ) def generate_cache_key_for_async_progress(org_id) -> str: return f"ds::o:{org_id}:check_am2_compatibility_status" def generate_cache_key_for_async_result(org_id) -> str: return f"ds::o:{org_id}:check_am2_compatibility_results" def set_check_status(org_id, status, ttl=CACHING_TTL_IN_SECONDS): redis_client = get_redis_client_for_ds() cache_key = generate_cache_key_for_async_progress(org_id) redis_client.set(cache_key, status.value) redis_client.expire(cache_key, ttl) def get_check_status(org_id): redis_client = get_redis_client_for_ds() cache_key = generate_cache_key_for_async_progress(org_id) try: cached_status = redis_client.get(cache_key) if cached_status: float_cached_status = float(cached_status) return CheckStatus(float_cached_status) except (TypeError, ValueError): pass return None def set_check_results(org_id, results): redis_client = get_redis_client_for_ds() cache_key = generate_cache_key_for_async_result(org_id) redis_client.set(cache_key, json.dumps(results)) redis_client.expire(cache_key, CACHING_TTL_IN_SECONDS) def get_check_results(org_id): redis_client = get_redis_client_for_ds() cache_key = generate_cache_key_for_async_result(org_id) try: serialised_val = redis_client.get(cache_key) # We check if there is a value in cache. if serialised_val: return json.loads(serialised_val) except (TypeError, ValueError): return None def refresh_check_state(org_id): redis_client = get_redis_client_for_ds() status_cache_key = generate_cache_key_for_async_progress(org_id) results_cache_key = generate_cache_key_for_async_result(org_id) redis_client.delete(status_cache_key, results_cache_key) @instrumented_task( name="sentry.tasks.check_am2_compatibility", namespace=telemetry_experience_tasks, processing_deadline_duration=TASK_SOFT_LIMIT_IN_SECONDS + 5, retry=Retry(times=1, delay=5), silo_mode=SiloMode.REGION, ) def run_compatibility_check_async(org_id): try: set_check_status(org_id, CheckStatus.IN_PROGRESS) results = CheckAM2Compatibility.run_compatibility_check(org_id) # The expiration of these two cache keys will be arbitrarily different due to the different times in which # Redis might apply the operation, but we don't care, as long as the status outlives the result, since we check # the status for determining if we want to proceed to even read a possible result. set_check_status(org_id, CheckStatus.DONE) set_check_results(org_id, {"results": results}) except Exception as e: sentry_sdk.capture_exception(e) # We want to store the error status for 1 minutes, after that the system will auto reset and we will run the # compatibility check again if follow-up requests happen. set_check_status(org_id, CheckStatus.ERROR, ONE_MINUTE_TTL)
CheckAM2Compatibility
python
getsentry__sentry-python
tests/integrations/django/myapp/views.py
{ "start": 2444, "end": 3279 }
class ____: def __call__(self, request): return HttpResponse("ok") @csrf_exempt def read_body_and_view_exc(request): request.read() 1 / 0 @csrf_exempt def message(request): sentry_sdk.capture_message("hi") return HttpResponse("ok") @csrf_exempt def nomessage(request): return HttpResponse("ok") @csrf_exempt def view_with_signal(request): custom_signal = Signal() custom_signal.send(sender="hello") return HttpResponse("ok") @csrf_exempt def mylogin(request): user = User.objects.create_user("john", "lennon@thebeatles.com", "johnpassword") user.backend = "django.contrib.auth.backends.ModelBackend" login(request, user) return HttpResponse("ok") @csrf_exempt def handler500(request): return HttpResponseServerError("Sentry error.")
SentryClassBasedViewWithCsrf
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/styles/base.py
{ "start": 352, "end": 2833 }
class ____(NamedTuple): color: str | None bgcolor: str | None bold: bool | None underline: bool | None strike: bool | None italic: bool | None blink: bool | None reverse: bool | None hidden: bool | None dim: bool | None """ :param color: Hexadecimal string. E.g. '000000' or Ansi color name: e.g. 'ansiblue' :param bgcolor: Hexadecimal string. E.g. 'ffffff' or Ansi color name: e.g. 'ansired' :param bold: Boolean :param underline: Boolean :param strike: Boolean :param italic: Boolean :param blink: Boolean :param reverse: Boolean :param hidden: Boolean :param dim: Boolean """ #: The default `Attrs`. DEFAULT_ATTRS = Attrs( color="", bgcolor="", bold=False, underline=False, strike=False, italic=False, blink=False, reverse=False, hidden=False, dim=False, ) #: ``Attrs.bgcolor/fgcolor`` can be in either 'ffffff' format, or can be any of #: the following in case we want to take colors from the 8/16 color palette. #: Usually, in that case, the terminal application allows to configure the RGB #: values for these names. #: ISO 6429 colors ANSI_COLOR_NAMES = [ "ansidefault", # Low intensity, dark. (One or two components 0x80, the other 0x00.) "ansiblack", "ansired", "ansigreen", "ansiyellow", "ansiblue", "ansimagenta", "ansicyan", "ansigray", # High intensity, bright. (One or two components 0xff, the other 0x00. Not supported everywhere.) "ansibrightblack", "ansibrightred", "ansibrightgreen", "ansibrightyellow", "ansibrightblue", "ansibrightmagenta", "ansibrightcyan", "ansiwhite", ] # People don't use the same ANSI color names everywhere. In prompt_toolkit 1.0 # we used some unconventional names (which were contributed like that to # Pygments). This is fixed now, but we still support the old names. # The table below maps the old aliases to the current names. ANSI_COLOR_NAMES_ALIASES: dict[str, str] = { "ansidarkgray": "ansibrightblack", "ansiteal": "ansicyan", "ansiturquoise": "ansibrightcyan", "ansibrown": "ansiyellow", "ansipurple": "ansimagenta", "ansifuchsia": "ansibrightmagenta", "ansilightgray": "ansigray", "ansidarkred": "ansired", "ansidarkgreen": "ansigreen", "ansidarkblue": "ansiblue", } assert set(ANSI_COLOR_NAMES_ALIASES.values()).issubset(set(ANSI_COLOR_NAMES)) assert not (set(ANSI_COLOR_NAMES_ALIASES.keys()) & set(ANSI_COLOR_NAMES))
Attrs
python
wandb__wandb
wandb/vendor/pygments/filter.py
{ "start": 1069, "end": 1351 }
class ____(object): """ Default filter. Subclass this class or use the `simplefilter` decorator to create own filters. """ def __init__(self, **options): self.options = options def filter(self, lexer, stream): raise NotImplementedError
Filter
python
kamyu104__LeetCode-Solutions
Python/minimum-cost-to-divide-array-into-subarrays.py
{ "start": 180, "end": 339 }
class ____(object): def __init__(self, level=0, val=None): self.val = val self.nexts = [None]*level self.prevs = [None]*level
SkipNode
python
psf__black
tests/util.py
{ "start": 2676, "end": 6344 }
class ____(Exception): """Used to wrap failures when assert_format() runs in an extra mode.""" def assert_format( source: str, expected: str, mode: black.Mode = DEFAULT_MODE, *, fast: bool = False, minimum_version: tuple[int, int] | None = None, lines: Collection[tuple[int, int]] = (), no_preview_line_length_1: bool = False, ) -> None: """Convenience function to check that Black formats as expected. You can pass @minimum_version if you're passing code with newer syntax to guard safety guards so they don't just crash with a SyntaxError. Please note this is separate from TargetVerson Mode configuration. """ _assert_format_inner( source, expected, mode, fast=fast, minimum_version=minimum_version, lines=lines ) # For both preview and non-preview tests, ensure that Black doesn't crash on # this code, but don't pass "expected" because the precise output may differ. try: if mode.unstable: new_mode = replace(mode, unstable=False, preview=False) else: new_mode = replace(mode, preview=not mode.preview) _assert_format_inner( source, None, new_mode, fast=fast, minimum_version=minimum_version, lines=lines, ) except Exception as e: text = ( "unstable" if mode.unstable else "non-preview" if mode.preview else "preview" ) raise FormatFailure( f"Black crashed formatting this case in {text} mode." ) from e # Similarly, setting line length to 1 is a good way to catch # stability bugs. Some tests are known to be broken in preview mode with line length # of 1 though, and have marked that with a flag --no-preview-line-length-1 preview_modes = [False] if not no_preview_line_length_1: preview_modes.append(True) for preview_mode in preview_modes: try: _assert_format_inner( source, None, replace(mode, preview=preview_mode, line_length=1, unstable=False), fast=fast, minimum_version=minimum_version, lines=lines, ) except Exception as e: text = "preview" if preview_mode else "non-preview" raise FormatFailure( f"Black crashed formatting this case in {text} mode with line-length=1." ) from e def _assert_format_inner( source: str, expected: str | None = None, mode: black.Mode = DEFAULT_MODE, *, fast: bool = False, minimum_version: tuple[int, int] | None = None, lines: Collection[tuple[int, int]] = (), ) -> None: actual = black.format_str(source, mode=mode, lines=lines) if expected is not None: _assert_format_equal(expected, actual) # It's not useful to run safety checks if we're expecting no changes anyway. The # assertion right above will raise if reality does actually make changes. This just # avoids wasted CPU cycles. if not fast and source != actual: # Unfortunately the AST equivalence check relies on the built-in ast module # being able to parse the code being formatted. This doesn't always work out # when checking modern code on older versions. if minimum_version is None or sys.version_info >= minimum_version: black.assert_equivalent(source, actual) black.assert_stable(source, actual, mode=mode, lines=lines) def dump_to_stderr(*output: str) -> str: return "\n" + "\n".join(output) + "\n"
FormatFailure
python
huggingface__transformers
src/transformers/models/glm4v/modular_glm4v.py
{ "start": 70726, "end": 71013 }
class ____(Qwen2VLProcessorKwargs): _defaults = { "text_kwargs": { "padding": False, "return_token_type_ids": False, "return_mm_token_type_ids": False, }, "videos_kwargs": {"return_metadata": True}, }
Glm4vProcessorKwargs
python
kubernetes-client__python
kubernetes/client/models/v1_self_subject_review_status.py
{ "start": 383, "end": 3492 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'user_info': 'V1UserInfo' } attribute_map = { 'user_info': 'userInfo' } def __init__(self, user_info=None, local_vars_configuration=None): # noqa: E501 """V1SelfSubjectReviewStatus - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._user_info = None self.discriminator = None if user_info is not None: self.user_info = user_info @property def user_info(self): """Gets the user_info of this V1SelfSubjectReviewStatus. # noqa: E501 :return: The user_info of this V1SelfSubjectReviewStatus. # noqa: E501 :rtype: V1UserInfo """ return self._user_info @user_info.setter def user_info(self, user_info): """Sets the user_info of this V1SelfSubjectReviewStatus. :param user_info: The user_info of this V1SelfSubjectReviewStatus. # noqa: E501 :type: V1UserInfo """ self._user_info = user_info def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1SelfSubjectReviewStatus): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1SelfSubjectReviewStatus): return True return self.to_dict() != other.to_dict()
V1SelfSubjectReviewStatus
python
sympy__sympy
sympy/physics/mechanics/wrapping_geometry.py
{ "start": 17936, "end": 30055 }
class ____(WrappingGeometryBase): """A solid (infinite) conical object. Explanation =========== A wrapping geometry that allows for circular arcs to be defined between pairs of points on the surface of a cone. These paths are always geodetic (the shortest possible) in the sense that they become straight lines on the unwrapped conical surface. Examples ======== To create a ``WrappingCone`` instance, a ``Symbol`` denoting its semi-vertical angle, a ``Point`` defining its apex, and a ``Vector`` specifying its axis are needed: >>> from sympy import symbols >>> from sympy.physics.mechanics import Point, ReferenceFrame, WrappingCone >>> N = ReferenceFrame('N') >>> alpha = symbols('alpha') >>> pO = Point('pO') >>> ax = N.z A cone with semi-vertical angle ``alpha``, apex at ``pO``, and axis aligned with ``N.z`` can be instantiated with: >>> WrappingCone(alpha, pO, ax) WrappingCone(alpha=alpha, apex=pO, axis=N.z) Parameters ========== alpha : Symbol The semi-vertical angle of the cone. apex : Point The tip of the cone where the curved surface meets. axis : Vector The axis along which the cone is aligned. See Also ======== WrappingCylinder Cylindrical geometry where wrapping arcs are geodetic on the cylinder. WrappingSphere Spherical geometry where the wrapping direction is always geodetic. """ def __init__(self, alpha, apex, axis): """ Initializer for ``WrappingCone``. Parameters ========== alpha: Symbol The semi vertical angle of the cone. apex: Point The tip of the cone where the curved surface meets. axis: Vector The axis along which the cone is aligned. """ if alpha.is_number: if alpha == 0: raise ValueError( "Cone angle alpha must be positive." ) if alpha == pi / 2: raise ValueError( "Cone angle alpha must be less than pi/2." ) elif alpha.is_real is False or alpha.is_positive is False: raise ValueError( "Cone angle alpha must be real and positive." ) self._alpha = alpha if not isinstance(apex, Point): raise TypeError("The 'apex' must be a Point object.") self._apex = apex if not isinstance(axis, Vector): raise TypeError("The 'axis' must be a Vector object.") self._axis = axis.normalize() @property def point(self): """This method is implemented as required by WrappingGeometryBase, use WrappingCone.apex instead.""" return self._apex @property def alpha(self): """The semi vertical angle of the cone.""" return self._alpha @alpha.setter def alpha(self, alpha): self._alpha = alpha @property def apex(self): """The tip of the cone where the curved surface meets.""" return self._apex @apex.setter def apex(self, apex): self._apex = apex @property def axis(self): """The axis along which the cone is aligned.""" return self._axis @axis.setter def axis(self, axis): self._axis = axis.normalize() def point_on_surface(self, point): """ Returns a symbolic equality for whether a point is on the cone's surface. Parameters ========== point : Point The point for which the expression is to be generated. """ position = point.pos_from(self.apex) axis_component = position.dot(self.axis) * self.axis radial_component = position - axis_component lhs = radial_component.dot(radial_component) rhs = axis_component.dot(axis_component) * tan(self.alpha) ** 2 return Eq(lhs, rhs, evaluate=False) def geodesic_length(self, point_1, point_2): """ The shortest distance between two points on a conical surface. Explanation =========== Computes the geodesic by "unwrapping" the cone into a planar sector and measuring the straight line distance between the corresponding points in that sector. Examples ======== >>> from sympy import pi, sqrt >>> from sympy.physics.mechanics import Point, ReferenceFrame, WrappingCone >>> N = ReferenceFrame('N') >>> alpha = pi/6 >>> apex = Point('O') >>> cone = WrappingCone(alpha, apex, N.z) >>> p1 = Point('A') >>> p1.set_pos(apex, N.x / sqrt(3) + N.z) >>> p2 = Point('B') >>> p2.set_pos(apex, N.y / sqrt(3) + N.z) >>> cone.geodesic_length(p1, p2) sqrt(8/3 - 4*sqrt(2)/3) Parameters ========== point_1 : Point Starting point on the cone's surface. point_2 : Point Ending point on the cone's surface. """ pos1 = point_1.pos_from(self.apex) pos2 = point_2.pos_from(self.apex) z1 = pos1.dot(self.axis) z2 = pos2.dot(self.axis) s1 = z1 / cos(self.alpha) s2 = z2 / cos(self.alpha) n1 = (pos1 - z1 * self.axis).normalize() n2 = (pos2 - z2 * self.axis).normalize() central = _directional_atan( cancel((n1.cross(n2)).dot(self.axis)), cancel(n1.dot(n2)) ) central = Piecewise((central, central <= pi), (2 * pi - central, True)) delta_u = central * sin(self.alpha) return sqrt(s1**2 + s2**2 - 2*s1*s2*cos(delta_u)) def geodesic_end_vectors(self, point_1, point_2): """ Computes the unit tangent vectors for the geodesic path at its endpoints. The tangent vector of the geodesic is found by considering the straight-line path in the unrolled planar sector representation of the cone. This vector is then mapped back to the 3D space at each endpoint. """ # Get the position vectors of the points relative to the cone's apex. # All subsequent calculations are performed in the cone's reference frame. pos1 = point_1.pos_from(self.apex) pos2 = point_2.pos_from(self.apex) # A unique geodesic cannot be defined between two identical points. if pos1 == pos2: raise ValueError( f'No unique geodesic exists for coincident points {point_1} and {point_2}.' ) # If one point is the apex, the geodesic is the straight line along the # cone's surface to the other point. The tangent at the apex points # towards the other point, and the tangent at the other point points # away from the apex. if pos1.magnitude() == 0: v = pos2.normalize() return (v, -v) if pos2.magnitude() == 0: v = pos1.normalize() return (-v, v) # Project the position vectors onto the cone's axis to get the z-height. z1 = pos1.dot(self.axis) z2 = pos2.dot(self.axis) # Calculate the slant height (distance from apex to point along the # cone's surface). This is R in polar coordinates for the unrolled cone. s1 = z1 / cos(self.alpha) s2 = z2 / cos(self.alpha) # Calculate the geodesic length (shortest distance on the surface). # This will be the denominator when normalizing the tangent vectors. L = self.geodesic_length(point_1, point_2) # If the points are the same, the tangent vectors are zero vectors. if L == 0: return (Vector(0), Vector(0)) # Unrolling the cone # Find the unit vectors perpendicular to the cone's axis that point # towards each point's projection on the xy-plane. n1 = (pos1 - z1 * self.axis).normalize() n2 = (pos2 - z2 * self.axis).normalize() # Calculate the central angle (theta) between the two points in the # plane perpendicular to the cone's axis. central = _directional_atan( cancel((n1.cross(n2)).dot(self.axis)), cancel(n1.dot(n2)) ) # The shortest path is chosen by ensuring the angle is not reflex. central = Piecewise((central, central <= pi), (2 * pi - central, True)) # Convert the 3D central angle to the corresponding angle (phi) in the # unrolled 2D planar sector. delta_u = central * sin(self.alpha) # At each point, define an orthogonal basis on the tangent plane. # All vectors are expressed in the cone's main reference frame. # g: The generator vector, pointing radially away from the apex. # c: The circumferential vector, tangential to the circular base. g1 = pos1.normalize() c1 = self.axis.cross(n1) g2 = pos2.normalize() c2 = self.axis.cross(n2) # In the unrolled 2D plane, the tangent vector is constant. We find its # components in the local polar basis at each point. # v_radial: Component along the generator vector 'g'. # v_circ: Component along the circumferential vector 'c'. # Components for v1 (tangent vector at point_1) v1_radial_comp = (s2 * cos(delta_u) - s1) / L v1_circ_comp = (s2 * sin(delta_u)) / L v1 = v1_radial_comp * g1 + v1_circ_comp * c1 # Components for v2 (tangent vector at point_2) v2_radial_comp = (s1 * cos(delta_u) - s2) / L v2_circ_comp = (-s1 * sin(delta_u)) / L v2 = v2_radial_comp * g2 + v2_circ_comp * c2 return (v1, v2) def __repr__(self): """Representation of a ``WrappingCone``.""" return ( f'{self.__class__.__name__}(alpha={self.alpha}, ' f'apex={self.apex}, axis={self.axis})' ) def _directional_atan(numerator, denominator): """Compute atan in a directional sense as required for geodesics. Explanation =========== To be able to control the direction of the geodesic length along the surface of a cylinder a dedicated arctangent function is needed that properly handles the directionality of different case. This function ensures that the central angle is always positive but shifting the case where ``atan2`` would return a negative angle to be centered around ``2*pi``. Notes ===== This function only handles very specific cases, i.e. the ones that are expected to be encountered when calculating symbolic geodesics on uniformly curved surfaces. As such, ``NotImplemented`` errors can be raised in many cases. This function is named with a leader underscore to indicate that it only aims to provide very specific functionality within the private scope of this module. """ if numerator.is_number and denominator.is_number: angle = atan2(numerator, denominator) if angle < 0: angle += 2 * pi elif numerator.is_number: msg = ( f'Cannot compute a directional atan when the numerator {numerator} ' f'is numeric and the denominator {denominator} is symbolic.' ) raise NotImplementedError(msg) elif denominator.is_number: msg = ( f'Cannot compute a directional atan when the numerator {numerator} ' f'is symbolic and the denominator {denominator} is numeric.' ) raise NotImplementedError(msg) else: ratio = sympify(trigsimp(numerator / denominator)) if isinstance(ratio, tan): angle = ratio.args[0] elif ( ratio.is_Mul and ratio.args[0] == Integer(-1) and isinstance(ratio.args[1], tan) ): angle = 2 * pi - ratio.args[1].args[0] else: msg = f'Cannot compute a directional atan for the value {ratio}.' raise NotImplementedError(msg) return angle
WrappingCone
python
instagram__MonkeyType
monkeytype/stubs.py
{ "start": 26251, "end": 33070 }
class ____: _KIND_WITH_SELF = { FunctionKind.CLASS, FunctionKind.INSTANCE, FunctionKind.PROPERTY, FunctionKind.DJANGO_CACHED_PROPERTY, } def __init__( self, module: str, qualname: str, kind: FunctionKind, sig: inspect.Signature, is_async: bool = False, typed_dict_class_stubs: Optional[Iterable[ClassStub]] = None, ) -> None: self.module = module self.qualname = qualname self.kind = kind self.signature = sig self.is_async = is_async self.typed_dict_class_stubs = typed_dict_class_stubs or [] @classmethod def from_callable( cls, func: Callable[..., Any], kind: Optional[FunctionKind] = None ) -> "FunctionDefinition": kind = FunctionKind.from_callable(func) sig = inspect.Signature.from_callable(func) is_async = asyncio.iscoroutinefunction(func) return FunctionDefinition( func.__module__, func.__qualname__, kind, sig, is_async ) @classmethod def from_callable_and_traced_types( cls, func: Callable[..., Any], arg_types: Dict[str, type], return_type: Optional[type], yield_type: Optional[type], existing_annotation_strategy: ExistingAnnotationStrategy = ExistingAnnotationStrategy.REPLICATE, ) -> "FunctionDefinition": typed_dict_class_stubs: List[ClassStub] = [] new_arg_types = {} for name, typ in arg_types.items(): rewritten_type, stubs = ReplaceTypedDictsWithStubs.rewrite_and_get_stubs( typ, class_name_hint=name ) new_arg_types[name] = rewritten_type typed_dict_class_stubs.extend(stubs) if return_type: # Replace the dot in a qualified name. class_name_hint = func.__qualname__.replace(".", "_") return_type, stubs = ReplaceTypedDictsWithStubs.rewrite_and_get_stubs( return_type, class_name_hint ) typed_dict_class_stubs.extend(stubs) if yield_type: # Replace the dot in a qualified name. class_name_hint = func.__qualname__.replace(".", "_") + "Yield" yield_type, stubs = ReplaceTypedDictsWithStubs.rewrite_and_get_stubs( yield_type, class_name_hint ) typed_dict_class_stubs.extend(stubs) function = FunctionDefinition.from_callable(func) signature = function.signature signature = update_signature_args( signature, new_arg_types, function.has_self, existing_annotation_strategy ) signature = update_signature_return( signature, return_type, yield_type, existing_annotation_strategy ) return FunctionDefinition( function.module, function.qualname, function.kind, signature, function.is_async, typed_dict_class_stubs, ) @property def has_self(self) -> bool: return self.kind in self._KIND_WITH_SELF def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return NotImplemented def __repr__(self) -> str: return "FunctionDefinition('%s', '%s', %s, %s, %s, %s)" % ( self.module, self.qualname, self.kind, self.signature, self.is_async, self.typed_dict_class_stubs, ) def get_updated_definition( func: Callable[..., Any], traces: Iterable[CallTrace], max_typed_dict_size: int, rewriter: Optional[TypeRewriter] = None, existing_annotation_strategy: ExistingAnnotationStrategy = ExistingAnnotationStrategy.REPLICATE, ) -> FunctionDefinition: """Update the definition for func using the types collected in traces.""" if rewriter is None: rewriter = NoOpRewriter() arg_types, return_type, yield_type = shrink_traced_types( traces, max_typed_dict_size ) arg_types = {name: rewriter.rewrite(typ) for name, typ in arg_types.items()} if return_type is not None: return_type = rewriter.rewrite(return_type) if yield_type is not None: yield_type = rewriter.rewrite(yield_type) return FunctionDefinition.from_callable_and_traced_types( func, arg_types, return_type, yield_type, existing_annotation_strategy ) def build_module_stubs(entries: Iterable[FunctionDefinition]) -> Dict[str, ModuleStub]: """Given an iterable of function definitions, build the corresponding stubs""" mod_stubs: Dict[str, ModuleStub] = {} for entry in entries: path = entry.qualname.split(".") name = path.pop() class_path = path # TODO: Handle nested classes klass = None if len(class_path) > 0: klass = ".".join(class_path) if entry.module not in mod_stubs: mod_stubs[entry.module] = ModuleStub() mod_stub = mod_stubs[entry.module] imports = get_imports_for_signature(entry.signature) # Import TypedDict, if needed. if entry.typed_dict_class_stubs: imports["mypy_extensions"].add("TypedDict") func_stub = FunctionStub( name, entry.signature, entry.kind, list(imports.keys()), entry.is_async ) # Don't need to import anything from the same module imports.pop(entry.module, None) mod_stub.imports_stub.imports.merge(imports) if klass is not None: if klass not in mod_stub.class_stubs: mod_stub.class_stubs[klass] = ClassStub(klass) class_stub = mod_stub.class_stubs[klass] class_stub.function_stubs[func_stub.name] = func_stub else: mod_stub.function_stubs[func_stub.name] = func_stub mod_stub.typed_dict_class_stubs.extend(entry.typed_dict_class_stubs) return mod_stubs def build_module_stubs_from_traces( traces: Iterable[CallTrace], max_typed_dict_size: int, existing_annotation_strategy: ExistingAnnotationStrategy = ExistingAnnotationStrategy.REPLICATE, rewriter: Optional[TypeRewriter] = None, ) -> Dict[str, ModuleStub]: """Given an iterable of call traces, build the corresponding stubs.""" index: DefaultDict[Callable[..., Any], Set[CallTrace]] = collections.defaultdict( set ) for trace in traces: index[trace.func].add(trace) defns = [] for func, traces in index.items(): defn = get_updated_definition( func, traces, max_typed_dict_size, rewriter, existing_annotation_strategy ) defns.append(defn) return build_module_stubs(defns)
FunctionDefinition
python
django__django
tests/transactions/tests.py
{ "start": 17465, "end": 19135 }
class ____(TransactionTestCase): available_apps = ["transactions"] @skipIf(threading is None, "Test requires threading") def test_implicit_savepoint_rollback(self): """ MySQL implicitly rolls back savepoints when it deadlocks (#22291). """ Reporter.objects.create(id=1) Reporter.objects.create(id=2) main_thread_ready = threading.Event() def other_thread(): try: with transaction.atomic(): Reporter.objects.select_for_update().get(id=1) main_thread_ready.wait() # 1) This line locks... (see below for 2) Reporter.objects.exclude(id=1).update(id=2) finally: # This is the thread-local connection, not the main connection. connection.close() other_thread = threading.Thread(target=other_thread) other_thread.start() with self.assertRaisesMessage(OperationalError, "Deadlock found"): # Double atomic to enter a transaction and create a savepoint. with transaction.atomic(): with transaction.atomic(): Reporter.objects.select_for_update().get(id=2) main_thread_ready.set() # The two threads can't be synchronized with an event here # because the other thread locks. Sleep for a little while. time.sleep(1) # 2) ... and this line deadlocks. (see above for 1) Reporter.objects.exclude(id=2).update(id=1) other_thread.join()
AtomicMySQLTests
python
getsentry__sentry
tests/sentry/integrations/vsts/test_integration.py
{ "start": 1305, "end": 8271 }
class ____(VstsIntegrationTestCase): # Test regular install still works @with_feature("organizations:migrate-azure-devops-integration") @patch( "sentry.integrations.vsts.VstsIntegrationProvider.get_scopes", return_value=VstsIntegrationProvider.NEW_SCOPES, ) @patch( "sentry.identity.pipeline.IdentityPipeline._get_provider", return_value=VSTSNewIdentityProvider(), ) def test_original_installation_still_works( self, mock_get_scopes: MagicMock, mock_get_provider: MagicMock ) -> None: self.pipeline = Mock() self.pipeline.organization = self.organization self.assert_installation(new=True) integration = Integration.objects.get(provider="vsts") assert integration.external_id == self.vsts_account_id assert integration.name == self.vsts_account_name metadata = integration.metadata assert set(metadata["scopes"]) == set(VstsIntegrationProvider.NEW_SCOPES) assert metadata["subscription"]["id"] == CREATE_SUBSCRIPTION["id"] assert metadata["domain_name"] == self.vsts_base_url # Test that install second time doesn't have the metadata and updates the integration object # Assert that the Integration object now has the migrated metadata @with_feature("organizations:migrate-azure-devops-integration") @patch( "sentry.integrations.vsts.VstsIntegrationProvider.get_scopes", return_value=VstsIntegrationProvider.NEW_SCOPES, ) def test_migration(self, mock_get_scopes: MagicMock) -> None: state = { "account": {"accountName": self.vsts_account_name, "accountId": self.vsts_account_id}, "base_url": self.vsts_base_url, "identity": { "data": { "access_token": self.access_token, "expires_in": "3600", "refresh_token": self.refresh_token, "token_type": "jwt-bearer", } }, } external_id = self.vsts_account_id # Create the integration with old integration metadata old_integraton_obj = self.create_provider_integration( metadata=state, provider="vsts", external_id=external_id ) assert old_integraton_obj.metadata.get("subscription", None) is None provider = VstsIntegrationProvider() pipeline = Mock() pipeline.organization = self.organization provider.set_pipeline(pipeline) data = provider.build_integration( { "account": {"accountName": self.vsts_account_name, "accountId": external_id}, "base_url": self.vsts_base_url, "identity": { "data": { "access_token": "new_access_token", "expires_in": "3600", "refresh_token": "new_refresh_token", "token_type": "bearer", } }, } ) assert external_id == data["external_id"] subscription = data["metadata"]["subscription"] assert subscription["id"] is not None and subscription["secret"] is not None metadata = data.get("metadata") assert metadata is not None assert set(metadata["scopes"]) == set(VstsIntegrationProvider.NEW_SCOPES) assert metadata["integration_migration_version"] == 1 # Make sure the integration object is updated # ensure_integration will be called in _finish_pipeline new_integration_obj = ensure_integration("vsts", data) assert new_integration_obj.metadata["integration_migration_version"] == 1 assert set(new_integration_obj.metadata["scopes"]) == set( VstsIntegrationProvider.NEW_SCOPES ) # Test that on reinstall of new migration, we keep the migration version @with_feature("organizations:migrate-azure-devops-integration") @patch( "sentry.integrations.vsts.VstsIntegrationProvider.get_scopes", return_value=VstsIntegrationProvider.NEW_SCOPES, ) def test_migration_after_reinstall(self, mock_get_scopes: MagicMock) -> None: state = { "account": {"accountName": self.vsts_account_name, "accountId": self.vsts_account_id}, "base_url": self.vsts_base_url, "identity": { "data": { "access_token": self.access_token, "expires_in": "3600", "refresh_token": self.refresh_token, "token_type": "jwt-bearer", } }, "integration_migration_version": 1, "subscription": { "id": "123", "secret": "456", }, } external_id = self.vsts_account_id # Create the integration with old integration metadata integration = self.create_provider_integration( metadata=state, provider="vsts", external_id=external_id ) self.create_organization_integration( integration_id=integration.id, organization_id=self.organization.id, ) provider = VstsIntegrationProvider() pipeline = Mock() pipeline.organization = self.organization provider.set_pipeline(pipeline) data = provider.build_integration( { "account": {"accountName": self.vsts_account_name, "accountId": external_id}, "base_url": self.vsts_base_url, "identity": { "data": { "access_token": "new_access_token", "expires_in": "3600", "refresh_token": "new_refresh_token", "token_type": "bearer", } }, "subscription": { "id": "123", "secret": "456", }, "integration_migration_version": 1, } ) assert external_id == data["external_id"] subscription = data["metadata"]["subscription"] assert subscription["id"] is not None and subscription["secret"] is not None metadata = data.get("metadata") assert metadata is not None assert set(metadata["scopes"]) == set(VstsIntegrationProvider.NEW_SCOPES) assert metadata["integration_migration_version"] == 1 # Make sure the integration object is updated # ensure_integration will be called in _finish_pipeline new_integration_obj = ensure_integration("vsts", data) assert new_integration_obj.metadata["integration_migration_version"] == 1 assert set(new_integration_obj.metadata["scopes"]) == set( VstsIntegrationProvider.NEW_SCOPES ) @control_silo_test
VstsIntegrationMigrationTest
python
huggingface__transformers
src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py
{ "start": 18153, "end": 18236 }
class ____(GroundingDinoMLPPredictionHead): pass
MMGroundingDinoMLPPredictionHead
python
django-extensions__django-extensions
django_extensions/management/commands/delete_squashed_migrations.py
{ "start": 410, "end": 7731 }
class ____(BaseCommand): help = ( "Deletes left over migrations that have been replaced by a " "squashed migration and converts squashed migration into a normal " "migration. Modifies your source tree! Use with care!" ) def add_arguments(self, parser): parser.add_argument( "app_label", help="App label of the application to delete replaced migrations from.", ) parser.add_argument( "squashed_migration_name", default=None, nargs="?", help="The squashed migration to replace. " "If not specified defaults to the first found.", ) parser.add_argument( "--noinput", "--no-input", action="store_false", dest="interactive", default=True, help="Tells Django to NOT prompt the user for input of any kind.", ) parser.add_argument( "--dry-run", action="store_true", default=False, help="Do not actually delete or change any files", ) parser.add_argument( "--database", default=DEFAULT_DB_ALIAS, help=( "Nominates a database to run command for. " 'Defaults to the "%s" database.' ) % DEFAULT_DB_ALIAS, ) def handle(self, **options): self.verbosity = options["verbosity"] self.interactive = options["interactive"] self.dry_run = options["dry_run"] app_label = options["app_label"] squashed_migration_name = options["squashed_migration_name"] database = options["database"] # Load the current graph state # check the app and migration they asked for exists loader = MigrationLoader(connections[database]) if app_label not in loader.migrated_apps: raise CommandError( "App '%s' does not have migrations (so delete_squashed_migrations on " "it makes no sense)" % app_label ) squashed_migration = None if squashed_migration_name: squashed_migration = self.find_migration( loader, app_label, squashed_migration_name ) if not squashed_migration.replaces: raise CommandError( "The migration %s %s is not a squashed migration." % (squashed_migration.app_label, squashed_migration.name) ) else: leaf_nodes = loader.graph.leaf_nodes(app=app_label) migration = loader.get_migration(*leaf_nodes[0]) previous_migrations = [ loader.get_migration(al, mn) for al, mn in loader.graph.forwards_plan( (migration.app_label, migration.name) ) if al == migration.app_label ] migrations = previous_migrations + [migration] for migration in migrations: if migration.replaces: squashed_migration = migration break if not squashed_migration: raise CommandError( "Cannot find a squashed migration in app '%s'." % (app_label) ) files_to_delete = [] for al, mn in squashed_migration.replaces: try: migration = loader.disk_migrations[al, mn] except KeyError: if self.verbosity > 0: self.stderr.write( "Couldn't find migration file for %s %s\n" % (al, mn) ) else: pyc_file = inspect.getfile(migration.__class__) files_to_delete.append(pyc_file) if pyc_file.endswith(PYC): py_file = py_from_pyc(pyc_file) files_to_delete.append(py_file) # Tell them what we're doing and optionally ask if we should proceed if self.verbosity > 0 or self.interactive: self.stdout.write( self.style.MIGRATE_HEADING("Will delete the following files:") ) for fn in files_to_delete: self.stdout.write(" - %s" % fn) if not self.confirm(): return for fn in files_to_delete: try: if not self.dry_run: os.remove(fn) except OSError: if self.verbosity > 0: self.stderr.write("Couldn't delete %s\n" % (fn,)) # Try and delete replaces only if it's all on one line squashed_migration_fn = inspect.getfile(squashed_migration.__class__) if squashed_migration_fn.endswith(PYC): squashed_migration_fn = py_from_pyc(squashed_migration_fn) with open(squashed_migration_fn) as fp: squashed_migration_content = fp.read() cleaned_migration_content = re.sub( REPLACES_REGEX, "", squashed_migration_content ) if cleaned_migration_content == squashed_migration_content: raise CommandError( ( "Couldn't find 'replaces =' lines in file %s. " "Please finish cleaning up manually." ) % (squashed_migration_fn,) ) if self.verbosity > 0 or self.interactive: # Print the differences between the original and new content diff = difflib.unified_diff( squashed_migration_content.splitlines(), cleaned_migration_content.splitlines(), lineterm="", fromfile="Original", tofile="Modified", ) self.stdout.write( self.style.MIGRATE_HEADING( "The squashed migrations file %s will be modified like this :\n\n%s" % ( squashed_migration_fn, "\n".join(diff), ) ) ) if not self.confirm(): return if not self.dry_run: with open(squashed_migration_fn, "w") as fp: fp.write(cleaned_migration_content) def confirm(self): if self.interactive: answer = None while not answer or answer not in "yn": answer = input("Do you wish to proceed? [yN] ") if not answer: answer = "n" break else: answer = answer[0].lower() return answer == "y" return True def find_migration(self, loader, app_label, name): try: return loader.get_migration_by_prefix(app_label, name) except AmbiguityError: raise CommandError( "More than one migration matches '%s' in app '%s'. Please be " "more specific." % (name, app_label) ) except KeyError: raise CommandError( "Cannot find a migration matching '%s' from app '%s'." % (name, app_label) )
Command
python
faif__python-patterns
patterns/dependency_injection.py
{ "start": 1444, "end": 1805 }
class ____: def __init__(self) -> None: pass def get_current_time_as_html_fragment(self, time_provider: Callable) -> str: current_time = time_provider() current_time_as_html_fragment = '<span class="tinyBoldText">{}</span>'.format( current_time ) return current_time_as_html_fragment
ParameterInjection
python
redis__redis-py
redis/asyncio/multidb/healthcheck.py
{ "start": 5753, "end": 6308 }
class ____(HealthCheck): """ Health check based on PING command. """ async def check_health(self, database) -> bool: if isinstance(database.client, Redis): return await database.client.execute_command("PING") else: # For a cluster checks if all nodes are healthy. all_nodes = database.client.get_nodes() for node in all_nodes: if not await node.redis_connection.execute_command("PING"): return False return True
PingHealthCheck
python
ray-project__ray
python/ray/data/expressions.py
{ "start": 3782, "end": 6544 }
class ____(_ExprVisitor["pyarrow.compute.Expression"]): """Visitor that converts Ray Data expressions to PyArrow compute expressions.""" def visit_column(self, expr: "ColumnExpr") -> "pyarrow.compute.Expression": return pc.field(expr.name) def visit_literal(self, expr: "LiteralExpr") -> "pyarrow.compute.Expression": return pc.scalar(expr.value) def visit_binary(self, expr: "BinaryExpr") -> "pyarrow.compute.Expression": import pyarrow as pa if expr.op in (Operation.IN, Operation.NOT_IN): left = self.visit(expr.left) if isinstance(expr.right, LiteralExpr): right_value = expr.right.value right = ( pa.array(right_value) if isinstance(right_value, list) else pa.array([right_value]) ) else: raise ValueError( f"is_in/not_in operations require the right operand to be a " f"literal list, got {type(expr.right).__name__}." ) result = pc.is_in(left, right) return pc.invert(result) if expr.op == Operation.NOT_IN else result left = self.visit(expr.left) right = self.visit(expr.right) from ray.data._internal.planner.plan_expression.expression_evaluator import ( _ARROW_EXPR_OPS_MAP, ) if expr.op in _ARROW_EXPR_OPS_MAP: return _ARROW_EXPR_OPS_MAP[expr.op](left, right) raise ValueError(f"Unsupported binary operation for PyArrow: {expr.op}") def visit_unary(self, expr: "UnaryExpr") -> "pyarrow.compute.Expression": operand = self.visit(expr.operand) from ray.data._internal.planner.plan_expression.expression_evaluator import ( _ARROW_EXPR_OPS_MAP, ) if expr.op in _ARROW_EXPR_OPS_MAP: return _ARROW_EXPR_OPS_MAP[expr.op](operand) raise ValueError(f"Unsupported unary operation for PyArrow: {expr.op}") def visit_alias(self, expr: "AliasExpr") -> "pyarrow.compute.Expression": return self.visit(expr.expr) def visit_udf(self, expr: "UDFExpr") -> "pyarrow.compute.Expression": raise TypeError("UDF expressions cannot be converted to PyArrow expressions") def visit_download(self, expr: "DownloadExpr") -> "pyarrow.compute.Expression": raise TypeError( "Download expressions cannot be converted to PyArrow expressions" ) def visit_star(self, expr: "StarExpr") -> "pyarrow.compute.Expression": raise TypeError("Star expressions cannot be converted to PyArrow expressions") @DeveloperAPI(stability="alpha") @dataclass(frozen=True)
_PyArrowExpressionVisitor
python
huggingface__transformers
src/transformers/models/deit/modeling_deit.py
{ "start": 11642, "end": 12298 }
class ____(nn.Module): def __init__(self, config: DeiTConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DeiT
DeiTIntermediate
python
walkccc__LeetCode
solutions/3048. Earliest Second to Mark Indices I/3048.py
{ "start": 0, "end": 1093 }
class ____: def earliestSecondToMarkIndices( self, nums: list[int], changeIndices: list[int], ) -> int: def canMark(second: int) -> bool: """ Returns True if all indices of `nums` can be marked within `second`. """ numMarked = 0 decrement = 0 indexToLastSecond = {} for i in range(second): indexToLastSecond[changeIndices[i] - 1] = i for i in range(second): index = changeIndices[i] - 1 # Convert to 0-indexed if i == indexToLastSecond[index]: # Reach the last occurrence of the number. # So, the current second will be used to mark the index. if nums[index] > decrement: # The decrement is less than the number to be marked. return False decrement -= nums[index] numMarked += 1 else: decrement += 1 return numMarked == len(nums) l = 1 r = len(changeIndices) + 1 ans = bisect.bisect_left(range(l, r), True, key=canMark) + l return ans if ans <= len(changeIndices) else -1
Solution
python
getsentry__sentry
src/sentry/sdk_updates.py
{ "start": 330, "end": 1328 }
class ____: def __init__(self, sdk_name, sdk_version, modules, integrations): self.sdk_name = sdk_name self.sdk_version = sdk_version self.modules = dict(modules or ()) self.integrations = list(integrations or ()) def copy(self): return type(self)( sdk_name=self.sdk_name, sdk_version=self.sdk_version, modules=self.modules, integrations=self.integrations, ) @classmethod def from_event_json(cls, event_data): sdk_name = get_path(event_data, "sdk", "name") if sdk_name: sdk_name = sdk_name.lower().rsplit(":", 1)[0] if sdk_name == "sentry-python": sdk_name = "sentry.python" return cls( sdk_name=sdk_name, sdk_version=get_path(event_data, "sdk", "version"), modules=get_path(event_data, "modules"), integrations=get_path(event_data, "sdk", "integrations"), )
SdkSetupState
python
doocs__leetcode
solution/0600-0699/0673.Number of Longest Increasing Subsequence/Solution2.py
{ "start": 677, "end": 1018 }
class ____: def findNumberOfLIS(self, nums: List[int]) -> int: arr = sorted(set(nums)) m = len(arr) tree = BinaryIndexedTree(m) for x in nums: i = bisect_left(arr, x) + 1 v, cnt = tree.query(i - 1) tree.update(i, v + 1, max(cnt, 1)) return tree.query(m)[1]
Solution
python
marshmallow-code__marshmallow
src/marshmallow/fields.py
{ "start": 36829, "end": 40169 }
class ____(Number[decimal.Decimal]): """A field that (de)serializes to the Python ``decimal.Decimal`` type. It's safe to use when dealing with money values, percentages, ratios or other numbers where precision is critical. .. warning:: This field serializes to a `decimal.Decimal` object by default. If you need to render your data as JSON, keep in mind that the `json` module from the standard library does not encode `decimal.Decimal`. Therefore, you must use a JSON library that can handle decimals, such as `simplejson`, or serialize to a string by passing ``as_string=True``. .. warning:: If a JSON `float` value is passed to this field for deserialization it will first be cast to its corresponding `string` value before being deserialized to a `decimal.Decimal` object. The default `__str__` implementation of the built-in Python `float` type may apply a destructive transformation upon its input data and therefore cannot be relied upon to preserve precision. To avoid this, you can instead pass a JSON `string` to be deserialized directly. :param places: How many decimal places to quantize the value. If `None`, does not quantize the value. :param rounding: How to round the value during quantize, for example `decimal.ROUND_UP`. If `None`, uses the rounding value from the current thread's context. :param allow_nan: If `True`, `NaN`, `Infinity` and `-Infinity` are allowed, even though they are illegal according to the JSON specification. :param as_string: If `True`, serialize to a string instead of a Python `decimal.Decimal` type. :param kwargs: The same keyword arguments that :class:`Number` receives. """ num_type = decimal.Decimal #: Default error messages. default_error_messages = { "special": "Special numeric values (nan or infinity) are not permitted." } def __init__( self, places: int | None = None, rounding: str | None = None, *, allow_nan: bool = False, as_string: bool = False, **kwargs: Unpack[_BaseFieldKwargs], ): self.places = ( decimal.Decimal((0, (1,), -places)) if places is not None else None ) self.rounding = rounding self.allow_nan = allow_nan super().__init__(as_string=as_string, **kwargs) # override Number def _format_num(self, value): num = decimal.Decimal(str(value)) if self.allow_nan: if num.is_nan(): return decimal.Decimal("NaN") # avoid sNaN, -sNaN and -NaN if self.places is not None and num.is_finite(): num = num.quantize(self.places, rounding=self.rounding) return num # override Number def _validated(self, value: typing.Any) -> decimal.Decimal: try: num = super()._validated(value) except decimal.InvalidOperation as error: raise self.make_error("invalid") from error if not self.allow_nan and (num.is_nan() or num.is_infinite()): raise self.make_error("special") return num # override Number def _to_string(self, value: decimal.Decimal) -> str: return format(value, "f")
Decimal
python
wireservice__csvkit
csvkit/utilities/csvformat.py
{ "start": 143, "end": 4308 }
class ____(CSVKitUtility): description = 'Convert a CSV file to a custom output format.' override_flags = ['I'] def add_arguments(self): self.argparser.add_argument( '-E', '--skip-header', dest='skip_header', action='store_true', help='Do not output a header row.') self.argparser.add_argument( '-D', '--out-delimiter', dest='out_delimiter', help='Delimiting character of the output file.') self.argparser.add_argument( '-T', '--out-tabs', dest='out_tabs', action='store_true', help='Specify that the output file is delimited with tabs. Overrides "-D".') self.argparser.add_argument( '-A', '--out-asv', dest='out_asv', action='store_true', help='Specify that the output file is delimited with the ASCII unit separator and record separator. ' 'Overrides "-T", "-D" and "-M".') self.argparser.add_argument( '-Q', '--out-quotechar', dest='out_quotechar', help='Character used to quote strings in the output file.') self.argparser.add_argument( '-U', '--out-quoting', dest='out_quoting', type=int, choices=QUOTING_CHOICES, help='Quoting style used in the output file: 0 quote minimal, 1 quote all, ' '2 quote non-numeric, 3 quote none.') self.argparser.add_argument( '-B', '--out-no-doublequote', dest='out_doublequote', action='store_false', help='Whether or not double quotes are doubled in the output file.') self.argparser.add_argument( '-P', '--out-escapechar', dest='out_escapechar', help='Character used to escape the delimiter in the output file if --quoting 3 ("Quote None") is ' 'specified and to escape the QUOTECHAR if --out-no-doublequote is specified.') self.argparser.add_argument( '-M', '--out-lineterminator', dest='out_lineterminator', help='Character used to terminate lines in the output file.') def _extract_csv_writer_kwargs(self): kwargs = {} if self.args.line_numbers: kwargs['line_numbers'] = True if self.args.out_asv: kwargs['delimiter'] = '\x1f' elif self.args.out_tabs: kwargs['delimiter'] = '\t' elif self.args.out_delimiter: kwargs['delimiter'] = self.args.out_delimiter if self.args.out_asv: kwargs['lineterminator'] = '\x1e' elif self.args.out_lineterminator: kwargs['lineterminator'] = self.args.out_lineterminator for arg in ('quotechar', 'quoting', 'doublequote', 'escapechar'): value = getattr(self.args, f'out_{arg}') if value is not None: kwargs[arg] = value return kwargs def main(self): if self.additional_input_expected(): sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n') writer = agate.csv.writer(self.output_file, **self.writer_kwargs) if self.args.out_quoting == 2: table = agate.Table.from_csv( self.input_file, skip_lines=self.args.skip_lines, column_types=self.get_column_types(), **self.reader_kwargs, ) # table.to_csv() has no option to omit the column names. if not self.args.skip_header: writer.writerow(table.column_names) writer.writerows(table.rows) else: reader = agate.csv.reader(self.skip_lines(), **self.reader_kwargs) if self.args.no_header_row: # Peek at a row to get the number of columns. _row = next(reader) headers = make_default_headers(len(_row)) reader = itertools.chain([headers, _row], reader) if self.args.skip_header: next(reader) writer.writerows(reader) def launch_new_instance(): utility = CSVFormat() utility.run() if __name__ == '__main__': launch_new_instance()
CSVFormat
python
PrefectHQ__prefect
tests/server/schemas/test_schedules.py
{ "start": 15993, "end": 21306 }
class ____: async def test_interval_schedule_always_has_the_right_offset(self): """ Tests the situation where a long duration has passed since the start date that crosses a DST boundary; for very short intervals this occasionally could result in "next" scheduled times that are in the past by one hour. """ anchor_date = ( Instant.from_timestamp(1582002945.964696).to_tz("US/Pacific").py_datetime() ) current_date = ( Instant.from_timestamp(1593643144.233938).to_tz("UTC").py_datetime() ) s = IntervalSchedule( interval=timedelta(minutes=1, seconds=15), anchor_date=anchor_date ) dates = await s.get_dates(n=4, start=current_date) assert all(d > current_date for d in dates) async def test_interval_schedule_hourly_daylight_savings_time_forward_with_UTC( self, ): """ On 3/11/2018, at 2am, America/New_York switched clocks forward an hour. """ dt = datetime(2018, 3, 10, 23, tzinfo=ZoneInfo("America/New_York")) s = IntervalSchedule(interval=timedelta(hours=1)) dates = await s.get_dates(n=5, start=dt) # skip 2am assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [ 23, 0, 1, 3, 4, ] # constant hourly clock in utc time assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 5, 6, 7, 8] async def test_interval_schedule_hourly_daylight_savings_time_forward(self): """ On 3/11/2018, at 2am, America/New_York switched clocks forward an hour. """ dt = datetime(2018, 3, 10, 23, tzinfo=ZoneInfo("America/New_York")) s = IntervalSchedule(interval=timedelta(hours=1), timezone="America/New_York") dates = await s.get_dates(n=5, start=dt) # skip 2am assert [d.hour for d in dates] == [ 23, 0, 1, 3, 4, ] # constant hourly clock in utc time assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 5, 6, 7, 8] async def test_interval_schedule_hourly_daylight_savings_time_backward(self): """ 11/4/2018, at 2am, America/New_York switched clocks back an hour. """ dt = datetime(2018, 11, 3, 23, tzinfo=ZoneInfo("America/New_York")) s = IntervalSchedule(interval=timedelta(hours=1), timezone="America/New_York") dates = await s.get_dates(n=5, start=dt) if sys.version_info >= (3, 13): # Hour is repeated because the interval is 1 hour assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [ 23, 0, 1, 1, 2, ] # Runs on every UTC hour assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [ 3, 4, 5, 6, 7, ] else: assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [ 23, 0, 1, 2, 3, ] # skips an hour UTC - note interval clocks skip the "6" assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [ 3, 4, 5, 7, 8, ] async def test_interval_schedule_daily_start_daylight_savings_time_forward(self): """ On 3/11/2018, at 2am, America/New_York switched clocks forward an hour. Confirm that a clock for 9am America/New_York stays 9am through the switch. """ dt = datetime(2018, 3, 8, 9, tzinfo=ZoneInfo("America/New_York")) s = IntervalSchedule(interval=timedelta(days=1), anchor_date=dt) dates = await s.get_dates(n=5, start=dt) # constant 9am start assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [ 9, 9, 9, 9, 9, ] # utc time shifts assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [ 14, 14, 14, 13, 13, ] async def test_interval_schedule_daily_start_daylight_savings_time_backward(self): """ On 11/4/2018, at 2am, America/New_York switched clocks back an hour. Confirm that a clock for 9am America/New_York stays 9am through the switch. """ dt = datetime(2018, 11, 1, 9, tzinfo=ZoneInfo("America/New_York")) s = IntervalSchedule(interval=timedelta(days=1), anchor_date=dt) dates = await s.get_dates(n=5, start=dt) # constant 9am start assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [ 9, 9, 9, 9, 9, ] assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [ 13, 13, 13, 14, 14, ]
TestIntervalScheduleDaylightSavingsTime
python
pytorch__pytorch
test/torch_np/test_basic.py
{ "start": 2381, "end": 3209 }
class ____(TestCase): @parametrize("func", one_arg_axis_funcs) @parametrize("axis", [0, 1, -1, None]) def test_andaxis_tensor(self, func, axis): t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]]) ta = func(t, axis=axis) assert isinstance(ta, w.ndarray) @parametrize("func", one_arg_axis_funcs) @parametrize("axis", [0, 1, -1, None]) def test_andaxis_list(self, func, axis): t = [[1.0, 2, 3], [4, 5, 6]] ta = func(t, axis=axis) assert isinstance(ta, w.ndarray) @parametrize("func", one_arg_axis_funcs) @parametrize("axis", [0, 1, -1, None]) def test_andaxis_array(self, func, axis): t = w.asarray([[1.0, 2, 3], [4, 5, 6]]) ta = func(t, axis=axis) assert isinstance(ta, w.ndarray) @instantiate_parametrized_tests
TestOneArrAndAxis
python
spyder-ide__spyder
spyder/plugins/run/tests/test_run.py
{ "start": 6265, "end": 7957 }
class ____(RunExecutorConfigurationGroup): def __init__(self, parent: QWidget, context: Context, input_extension: str, input_metadata: RunConfigurationMetadata): super().__init__(parent, context, input_extension, input_metadata) default_conf = self.get_default_configuration() self.widgets = {} layout = QVBoxLayout() for key_name in default_conf: default_value = default_conf[key_name] if isinstance(default_value, bool): widget = QCheckBox(key_name) layout.addWidget(widget) elif isinstance(default_value, str): temp_layout = QHBoxLayout() label = QLabel(key_name) temp_layout.addWidget(label) widget = QLineEdit() temp_layout.addWidget(widget) layout.addLayout(temp_layout) self.widgets[key_name] = widget self.setLayout(layout) def get_configuration(self) -> dict: conf = {} for key_name in self.widgets: widget = self.widgets[key_name] if isinstance(widget, QCheckBox): conf[key_name] = widget.isChecked() elif isinstance(widget, QLineEdit): conf[key_name] = widget.text() return conf def set_configuration(self, config: dict): for key_name in config: value = config[key_name] widget = self.widgets[key_name] if isinstance(widget, QCheckBox): widget.setChecked(value) elif isinstance(widget, QLineEdit): widget.setText(value)
GenExampleRunExecutorConf
python
scikit-learn__scikit-learn
sklearn/multiclass.py
{ "start": 3917, "end": 5919 }
class ____(BaseEstimator): """Helper predictor to be used when only one class is present.""" def fit(self, X, y): check_params = dict( ensure_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True ) validate_data( self, X, y, reset=True, validate_separately=(check_params, check_params) ) self.y_ = y return self def predict(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) return np.repeat(self.y_, _num_samples(X)) def decision_function(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) return np.repeat(self.y_, _num_samples(X)) def predict_proba(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) y_ = self.y_.astype(np.float64) return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0) def _estimators_has(attr): """Check if self.estimator or self.estimators_[0] has attr. If `self.estimators_[0]` has the attr, then its safe to assume that other estimators have it too. We raise the original `AttributeError` if `attr` does not exist. This function is used together with `available_if`. """ def check(self): if hasattr(self, "estimators_"): getattr(self.estimators_[0], attr) else: getattr(self.estimator, attr) return True return check
_ConstantPredictor
python
ray-project__ray
doc/external/pytorch_tutorials_hyperparameter_tuning_tutorial.py
{ "start": 3077, "end": 19391 }
class ____(nn.Module): def __init__(self, l1=120, l2=84): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, l1) self.fc2 = nn.Linear(l1, l2) self.fc3 = nn.Linear(l2, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) # flatten all dimensions except batch x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x ###################################################################### # The train function # ------------------ # Now it gets interesting, because we introduce some changes to the example `from the PyTorch # documentation <https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html>`_. # # We wrap the training script in a function ``train_cifar(config, data_dir=None)``. # The ``config`` parameter will receive the hyperparameters we would like to # train with. The ``data_dir`` specifies the directory where we load and store the data, # so that multiple runs can share the same data source. # We also load the model and optimizer state at the start of the run, if a checkpoint # is provided. Further down in this tutorial you will find information on how # to save the checkpoint and what it is used for. # # .. code-block:: python # # net = Net(config["l1"], config["l2"]) # # checkpoint = train.get_checkpoint() # # if checkpoint: # checkpoint_dir = checkpoint.to_directory() # checkpoint_path = os.path.join(checkpoint_dir, "checkpoint.pt") # checkpoint_state = torch.load(checkpoint_path) # start_epoch = checkpoint_state["epoch"] # net.load_state_dict(checkpoint_state["net_state_dict"]) # optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"]) # else: # start_epoch = 0 # # The learning rate of the optimizer is made configurable, too: # # .. code-block:: python # # optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9) # # We also split the training data into a training and validation subset. We thus train on # 80% of the data and calculate the validation loss on the remaining 20%. The batch sizes # with which we iterate through the training and test sets are configurable as well. # # Adding (multi) GPU support with DataParallel # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Image classification benefits largely from GPUs. Luckily, we can continue to use # PyTorch's abstractions in Ray Tune. Thus, we can wrap our model in ``nn.DataParallel`` # to support data parallel training on multiple GPUs: # # .. code-block:: python # # device = "cpu" # if torch.cuda.is_available(): # device = "cuda:0" # if torch.cuda.device_count() > 1: # net = nn.DataParallel(net) # net.to(device) # # By using a ``device`` variable we make sure that training also works when we have # no GPUs available. PyTorch requires us to send our data to the GPU memory explicitly, # like this: # # .. code-block:: python # # for i, data in enumerate(trainloader, 0): # inputs, labels = data # inputs, labels = inputs.to(device), labels.to(device) # # The code now supports training on CPUs, on a single GPU, and on multiple GPUs. Notably, Ray # also supports `fractional GPUs <https://docs.ray.io/en/master/using-ray-with-gpus.html#fractional-gpus>`_ # so we can share GPUs among trials, as long as the model still fits on the GPU memory. We'll come back # to that later. # # Communicating with Ray Tune # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The most interesting part is the communication with Ray Tune: # # .. code-block:: python # checkpoint_data = { # "epoch": epoch, # "net_state_dict": net.state_dict(), # "optimizer_state_dict": optimizer.state_dict(), # } # with TemporaryDirectory() as tmpdir: # torch.save(checkpoint_data, os.path.join(tmpdir, "checkpoint.pt")) # train.report( # {"loss": val_loss / val_steps, "accuracy": correct / total}, # checkpoint=Checkpoint.from_directory(tmpdir), # ) # # Here we first save a checkpoint and then report some metrics back to Ray Tune. Specifically, # we send the validation loss and accuracy back to Ray Tune. Ray Tune can then use these metrics # to decide which hyperparameter configuration lead to the best results. These metrics # can also be used to stop bad performing trials early in order to avoid wasting # resources on those trials. # # The checkpoint saving is optional, however, it is necessary if we wanted to use advanced # schedulers like # `Population Based Training <https://docs.ray.io/en/master/tune/tutorials/tune-advanced-tutorial.html>`_. # Also, by saving the checkpoint we can later load the trained models and validate them # on a test set. Lastly, saving checkpoints is useful for fault tolerance, and it allows # us to interrupt training and continue training later. # # Full training function # ~~~~~~~~~~~~~~~~~~~~~~ # # The full code example looks like this: def train_cifar(config, data_dir=None): net = Net(config["l1"], config["l2"]) device = "cpu" if torch.cuda.is_available(): device = "cuda:0" if torch.cuda.device_count() > 1: net = nn.DataParallel(net) net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9) checkpoint = train.get_checkpoint() if checkpoint: checkpoint_dir = checkpoint.to_directory() checkpoint_path = os.path.join(checkpoint_dir, "checkpoint.pt") checkpoint_state = torch.load(checkpoint_path) start_epoch = checkpoint_state["epoch"] net.load_state_dict(checkpoint_state["net_state_dict"]) optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"]) else: start_epoch = 0 trainset, testset = load_data(data_dir) test_abs = int(len(trainset) * 0.8) train_subset, val_subset = random_split( trainset, [test_abs, len(trainset) - test_abs] ) trainloader = torch.utils.data.DataLoader( train_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8 ) valloader = torch.utils.data.DataLoader( val_subset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=8 ) for epoch in range(start_epoch, 10): # loop over the dataset multiple times running_loss = 0.0 epoch_steps = 0 for i, data in enumerate(trainloader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() epoch_steps += 1 if i % 2000 == 1999: # print every 2000 mini-batches print( "[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / epoch_steps) ) running_loss = 0.0 # Validation loss val_loss = 0.0 val_steps = 0 total = 0 correct = 0 for i, data in enumerate(valloader, 0): with torch.no_grad(): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() loss = criterion(outputs, labels) val_loss += loss.cpu().numpy() val_steps += 1 checkpoint_data = { "epoch": epoch, "net_state_dict": net.state_dict(), "optimizer_state_dict": optimizer.state_dict(), } with TemporaryDirectory() as tmpdir: torch.save(checkpoint_data, os.path.join(tmpdir, "checkpoint.pt")) train.report( {"loss": val_loss / val_steps, "accuracy": correct / total}, checkpoint=Checkpoint.from_directory(tmpdir), ) print("Finished Training") ###################################################################### # As you can see, most of the code is adapted directly from the original example. # # Test set accuracy # ----------------- # Commonly the performance of a machine learning model is tested on a hold-out test # set with data that has not been used for training the model. We also wrap this in a # function: def test_accuracy(net, device="cpu"): trainset, testset = load_data() testloader = torch.utils.data.DataLoader( testset, batch_size=4, shuffle=False, num_workers=2 ) correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() return correct / total ###################################################################### # The function also expects a ``device`` parameter, so we can do the # test set validation on a GPU. # # Configuring the search space # ---------------------------- # Lastly, we need to define Ray Tune's search space. Here is an example: # # .. code-block:: python # # config = { # "l1": tune.choice([2 ** i for i in range(9)]), # "l2": tune.choice([2 ** i for i in range(9)]), # "lr": tune.loguniform(1e-4, 1e-1), # "batch_size": tune.choice([2, 4, 8, 16]) # } # # The ``tune.choice()`` accepts a list of values that are uniformly sampled from. # In this example, the ``l1`` and ``l2`` parameters # should be powers of 2 between 4 and 256, so either 4, 8, 16, 32, 64, 128, or 256. # The ``lr`` (learning rate) should be uniformly sampled between 0.0001 and 0.1. Lastly, # the batch size is a choice between 2, 4, 8, and 16. # # At each trial, Ray Tune will now randomly sample a combination of parameters from these # search spaces. It will then train a number of models in parallel and find the best # performing one among these. We also use the ``ASHAScheduler`` which will terminate bad # performing trials early. # # We wrap the ``train_cifar`` function with ``functools.partial`` to set the constant # ``data_dir`` parameter. We can also tell Ray Tune what resources should be # available for each trial: # # .. code-block:: python # # gpus_per_trial = 2 # # ... # result = tune.run( # partial(train_cifar, data_dir=data_dir), # resources_per_trial={"cpu": 8, "gpu": gpus_per_trial}, # config=config, # num_samples=num_samples, # scheduler=scheduler, # checkpoint_at_end=True) # # You can specify the number of CPUs, which are then available e.g. # to increase the ``num_workers`` of the PyTorch ``DataLoader`` instances. The selected # number of GPUs are made visible to PyTorch in each trial. Trials do not have access to # GPUs that haven't been requested for them - so you don't have to care about two trials # using the same set of resources. # # Here we can also specify fractional GPUs, so something like ``gpus_per_trial=0.5`` is # completely valid. The trials will then share GPUs among each other. # You just have to make sure that the models still fit in the GPU memory. # # After training the models, we will find the best performing one and load the trained # network from the checkpoint file. We then obtain the test set accuracy and report # everything by printing. # # The full main function looks like this: def main(num_samples=10, max_num_epochs=10, gpus_per_trial=2): data_dir = os.path.abspath("./data") load_data(data_dir) config = { "l1": tune.choice([2**i for i in range(9)]), "l2": tune.choice([2**i for i in range(9)]), "lr": tune.loguniform(1e-4, 1e-1), "batch_size": tune.choice([2, 4, 8, 16]), } scheduler = ASHAScheduler( metric="loss", mode="min", max_t=max_num_epochs, grace_period=1, reduction_factor=2, ) result = tune.run( partial(train_cifar, data_dir=data_dir), resources_per_trial={"cpu": 2, "gpu": gpus_per_trial}, config=config, num_samples=num_samples, scheduler=scheduler, ) best_trial = result.get_best_trial("loss", "min", "last") print(f"Best trial config: {best_trial.config}") print(f"Best trial final validation loss: {best_trial.last_result['loss']}") print(f"Best trial final validation accuracy: {best_trial.last_result['accuracy']}") best_trained_model = Net(best_trial.config["l1"], best_trial.config["l2"]) device = "cpu" if torch.cuda.is_available(): device = "cuda:0" if gpus_per_trial > 1: best_trained_model = nn.DataParallel(best_trained_model) best_trained_model.to(device) best_checkpoint = best_trial.checkpoint best_checkpoint_dir = best_checkpoint.to_directory() best_checkpoint_path = os.path.join(best_checkpoint_dir, "checkpoint.pt") best_checkpoint_data = torch.load(best_checkpoint_path) best_trained_model.load_state_dict(best_checkpoint_data["net_state_dict"]) test_acc = test_accuracy(best_trained_model, device) print("Best trial test set accuracy: {}".format(test_acc)) if __name__ == "__main__": # sphinx_gallery_start_ignore # Fixes ``AttributeError: '_LoggingTee' object has no attribute 'fileno'``. # This is only needed to run with sphinx-build. import sys sys.stdout.fileno = lambda: False # sphinx_gallery_end_ignore # You can change the number of GPUs per trial here: main(num_samples=10, max_num_epochs=10, gpus_per_trial=0) ###################################################################### # If you run the code, an example output could look like this: # # :: # # Number of trials: 10/10 (10 TERMINATED) # +-----+--------------+------+------+-------------+--------+---------+------------+ # | ... | batch_size | l1 | l2 | lr | iter | loss | accuracy | # |-----+--------------+------+------+-------------+--------+---------+------------| # | ... | 2 | 1 | 256 | 0.000668163 | 1 | 2.31479 | 0.0977 | # | ... | 4 | 64 | 8 | 0.0331514 | 1 | 2.31605 | 0.0983 | # | ... | 4 | 2 | 1 | 0.000150295 | 1 | 2.30755 | 0.1023 | # | ... | 16 | 32 | 32 | 0.0128248 | 10 | 1.66912 | 0.4391 | # | ... | 4 | 8 | 128 | 0.00464561 | 2 | 1.7316 | 0.3463 | # | ... | 8 | 256 | 8 | 0.00031556 | 1 | 2.19409 | 0.1736 | # | ... | 4 | 16 | 256 | 0.00574329 | 2 | 1.85679 | 0.3368 | # | ... | 8 | 2 | 2 | 0.00325652 | 1 | 2.30272 | 0.0984 | # | ... | 2 | 2 | 2 | 0.000342987 | 2 | 1.76044 | 0.292 | # | ... | 4 | 64 | 32 | 0.003734 | 8 | 1.53101 | 0.4761 | # +-----+--------------+------+------+-------------+--------+---------+------------+ # # Best trial config: {'l1': 64, 'l2': 32, 'lr': 0.0037339984519545164, 'batch_size': 4} # Best trial final validation loss: 1.5310075663924216 # Best trial final validation accuracy: 0.4761 # Best trial test set accuracy: 0.4737 # # Most trials have been stopped early in order to avoid wasting resources. # The best performing trial achieved a validation accuracy of about 47%, which could # be confirmed on the test set. # # So that's it! You can now tune the parameters of your PyTorch models.
Net
python
PrefectHQ__prefect
tests/client/test_collections_metadata_client.py
{ "start": 231, "end": 2076 }
class ____: async def test_returns_cloud_client_when_server_type_is_cloud(self, monkeypatch): mock_get_client = MagicMock() mock_get_cloud_client = MagicMock() monkeypatch.setattr("prefect.client.collections.get_client", mock_get_client) monkeypatch.setattr( "prefect.client.collections.get_cloud_client", mock_get_cloud_client ) mock_get_client.return_value.server_type = ServerType.CLOUD mock_get_cloud_client.return_value = CloudClient( host="test-host", api_key="test-api-key" ) result = get_collections_metadata_client() mock_get_client.assert_called_once() mock_get_cloud_client.assert_called_once() assert isinstance(result, CloudClient) async def test_returns_orchestration_client_when_server_type_is_server( self, monkeypatch ): mock_get_client = MagicMock() monkeypatch.setattr("prefect.client.collections.get_client", mock_get_client) mock_get_client.return_value = PrefectClient(api="test-api") mock_get_client.return_value.server_type = ServerType.SERVER result = get_collections_metadata_client() mock_get_client.assert_called_once() assert isinstance(result, PrefectClient) async def test_returns_orchestration_client_when_server_type_is_ephemeral( self, monkeypatch ): mock_get_client = MagicMock() monkeypatch.setattr("prefect.client.collections.get_client", mock_get_client) mock_get_client.return_value = PrefectClient(api="test-api") mock_get_client.return_value.server_type = ServerType.EPHEMERAL result = get_collections_metadata_client() mock_get_client.assert_called_once() assert isinstance(result, PrefectClient)
TestGetCollectionsMetadataClient
python
pytorch__pytorch
torch/nn/modules/container.py
{ "start": 27819, "end": 37672 }
class ____(Module): r"""Holds parameters in a dictionary. ParameterDict can be indexed like a regular Python dictionary, but Parameters it contains are properly registered, and will be visible by all Module methods. Other objects are treated as would be done by a regular Python dictionary :class:`~torch.nn.ParameterDict` is an **ordered** dictionary. :meth:`~torch.nn.ParameterDict.update` with other unordered mapping types (e.g., Python's plain ``dict``) does not preserve the order of the merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict` will preserve their ordering. Note that the constructor, assigning an element of the dictionary and the :meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`. Args: values (iterable, optional): a mapping (dictionary) of (string : Any) or an iterable of key-value pairs of type (string, Any) Example:: class MyModule(nn.Module): def __init__(self) -> None: super().__init__() self.params = nn.ParameterDict( { "left": nn.Parameter(torch.randn(5, 10)), "right": nn.Parameter(torch.randn(5, 10)), } ) def forward(self, x, choice): x = self.params[choice].mm(x) return x """ def __init__(self, parameters: Any = None) -> None: super().__init__() self._keys: dict[str, None] = {} if parameters is not None: self.update(parameters) def _key_to_attr(self, key: str) -> str: if not isinstance(key, str): raise TypeError( "Index given to ParameterDict cannot be used as a key as it is " f"not a string (type is '{type(key).__name__}'). Open an issue on " "github if you need non-string keys." ) else: # Use the key as-is so that `.named_parameters()` returns the right thing return key def __getitem__(self, key: str) -> Any: attr = self._key_to_attr(key) return getattr(self, attr) def __setitem__(self, key: str, value: Any) -> None: # Note that all other function that add an entry to the dictionary part of # the ParameterDict end up here. So this is the only place where we need # to wrap things into Parameter if needed. # Objects added via setattr() are not in the dictionary part and thus won't # call into this function. self._keys[key] = None attr = self._key_to_attr(key) if isinstance(value, torch.Tensor) and not isinstance(value, Parameter): value = Parameter(value) setattr(self, attr, value) def __delitem__(self, key: str) -> None: del self._keys[key] attr = self._key_to_attr(key) delattr(self, attr) def __len__(self) -> int: return len(self._keys) def __iter__(self) -> Iterator[str]: return iter(self._keys) def __reversed__(self) -> Iterator[str]: return reversed(self._keys) def copy(self) -> ParameterDict: """Return a copy of this :class:`~torch.nn.ParameterDict` instance.""" # We have to use an OrderedDict because the ParameterDict constructor # behaves differently on plain dict vs OrderedDict return ParameterDict(OrderedDict((k, self[k]) for k in self._keys)) def __contains__(self, key: str) -> bool: return key in self._keys def setdefault(self, key: str, default: Optional[Any] = None) -> Any: """Set the default for a key in the Parameterdict. If key is in the ParameterDict, return its value. If not, insert `key` with a parameter `default` and return `default`. `default` defaults to `None`. Args: key (str): key to set default for default (Any): the parameter set to the key """ if key not in self: self[key] = default return self[key] def clear(self) -> None: """Remove all items from the ParameterDict.""" for k in self._keys.copy(): del self[k] def pop(self, key: str) -> Any: r"""Remove key from the ParameterDict and return its parameter. Args: key (str): key to pop from the ParameterDict """ v = self[key] del self[key] return v def popitem(self) -> tuple[str, Any]: """Remove and return the last inserted `(key, parameter)` pair from the ParameterDict.""" k, _ = self._keys.popitem() # We need the key in the _keys to be able to access/del self._keys[k] = None val = self[k] del self[k] return k, val def get(self, key: str, default: Optional[Any] = None) -> Any: r"""Return the parameter associated with key if present. Otherwise return default if provided, None if not. Args: key (str): key to get from the ParameterDict default (Parameter, optional): value to return if key not present """ return self[key] if key in self else default # noqa: SIM401 def fromkeys( self, keys: Iterable[str], default: Optional[Any] = None ) -> ParameterDict: r"""Return a new ParameterDict with the keys provided. Args: keys (iterable, string): keys to make the new ParameterDict from default (Parameter, optional): value to set for all keys """ return ParameterDict((k, default) for k in keys) def keys(self) -> container_abcs.KeysView[str]: r"""Return an iterable of the ParameterDict keys.""" return self._keys.keys() def items(self) -> Iterable[tuple[str, Any]]: r"""Return an iterable of the ParameterDict key/value pairs.""" return ((k, self[k]) for k in self._keys) def values(self) -> Iterable[Any]: r"""Return an iterable of the ParameterDict values.""" return (self[k] for k in self._keys) def update(self, parameters: Mapping[str, Any] | ParameterDict) -> None: r"""Update the :class:`~torch.nn.ParameterDict` with key-value pairs from ``parameters``, overwriting existing keys. .. note:: If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or an iterable of key-value pairs, the order of new elements in it is preserved. Args: parameters (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Parameter`, or an iterable of key-value pairs of type (string, :class:`~torch.nn.Parameter`) """ if not isinstance(parameters, container_abcs.Iterable): raise TypeError( "ParametersDict.update should be called with an " "iterable of key/value pairs, but got " + type(parameters).__name__ ) if isinstance(parameters, (OrderedDict, ParameterDict)): for key, parameter in parameters.items(): self[key] = parameter elif isinstance(parameters, container_abcs.Mapping): for key, parameter in sorted(parameters.items()): self[key] = parameter else: for j, p in enumerate(parameters): if not isinstance(p, container_abcs.Iterable): raise TypeError( "ParameterDict update sequence element " "#" + str(j) + " should be Iterable; is" + type(p).__name__ ) # pyrefly: ignore [bad-argument-type] if not len(p) == 2: raise ValueError( "ParameterDict update sequence element " # pyrefly: ignore [bad-argument-type] "#" + str(j) + " has length " + str(len(p)) + "; 2 is required" ) # parameters as length-2 list too cumbersome to type, see ModuleDict.update comment self[p[0]] = p[1] # type: ignore[assignment] def extra_repr(self) -> str: child_lines = [] for k, p in self.items(): if isinstance(p, torch.Tensor): size_str = "x".join(str(size) for size in p.size()) if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]: device_str = f" ({p.device})" else: device_str = "" parastr = "{} containing: [{} of size {}{}]".format( "Parameter" if isinstance(p, Parameter) else "Tensor", torch.typename(p), size_str, device_str, ) # pyrefly: ignore [bad-argument-type] child_lines.append(" (" + str(k) + "): " + parastr) else: child_lines.append( # pyrefly: ignore [bad-argument-type] " (" + str(k) + "): Object of type: " + type(p).__name__ ) tmpstr = "\n".join(child_lines) return tmpstr def __call__(self, input): raise RuntimeError("ParameterDict should not be called.") def __or__(self, other: ParameterDict) -> ParameterDict: copy = self.copy() copy.update(other) return copy def __ror__(self, other: ParameterDict) -> ParameterDict: copy = other.copy() copy.update(self) return copy def __ior__(self, other: ParameterDict) -> Self: self.update(other) return self
ParameterDict
python
airbytehq__airbyte
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_product_search_query_performance_report.py
{ "start": 23456, "end": 30934 }
class ____(TestBaseProductSearchQueryPerformanceReport): stream_name = "product_search_query_performance_report_monthly" report_file = "product_search_query_performance_report_monthly" records_number = 6 incremental_report_file = "product_search_query_performance_report_monthly_incremental" incremental_report_file_with_records_further_cursor = ( "product_search_query_performance_report_monthly_incremental_with_records_further_cursor" ) report_file_with_records_further_start_date = "product_search_query_performance_report_monthly_with_records_further_start_date" state_file = "product_search_query_performance_report_state" state_file_legacy = "product_search_query_performance_report_state" def mock_report_apis(self): super().mock_report_apis() self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', ) self.mock_generate_report_api( endpoint="Submit", response_template="generate_report", body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "ProductSearchQueryPerformanceReport", "ReturnOnlyCompleteData": false, "Type": "ProductSearchQueryPerformanceReportRequest", "Aggregation": "Monthly", "Columns": ["TimePeriod", "AccountId", "AccountNumber", "AccountName", "AdId", "AdGroupId", "AdGroupName", "CampaignId", "CampaignName", "DestinationUrl", "DeviceType", "DeviceOS", "Language", "SearchQuery", "Network", "MerchantProductId", "Title", "ClickTypeId", "TotalClicksOnAdElements", "ClickType", "AdGroupCriterionId", "ProductGroup", "PartitionType", "Impressions", "Clicks", "Ctr", "AverageCpc", "Spend", "Conversions", "ConversionRate", "Assists", "CostPerAssist", "Revenue", "CostPerConversion", "RevenuePerConversion", "RevenuePerAssist", "CustomerId", "CustomerName", "AssistedImpressions", "AssistedClicks", "AssistedConversions", "AllConversions", "AllRevenue", "AllConversionRate", "AllCostPerConversion", "AllRevenuePerConversion", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "AverageCpm", "ConversionsQualified", "AssistedConversionsQualified", "AllConversionsQualified", "CampaignType", "AssetGroupId", "AssetGroupName"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}', )
TestProductSearchQueryPerformanceReportMonthlyStream
python
getsentry__sentry
src/sentry/utils/kvstore/memory.py
{ "start": 217, "end": 297 }
class ____(Generic[V]): value: V expires_at: datetime | None = None
Record
python
oauthlib__oauthlib
tests/openid/connect/core/grant_types/test_base.py
{ "start": 210, "end": 391 }
class ____(GrantTypeBase): """Class to test GrantTypeBase""" def __init__(self, request_validator=None, **kwargs): self.request_validator = request_validator
GrantBase
python
joke2k__faker
tests/providers/test_address.py
{ "start": 56924, "end": 58736 }
class ____: """Test zh_TW address provider methods""" def test_postcode(self, faker, num_samples): for _ in range(num_samples): postcode = faker.postcode() assert isinstance(postcode, str) assert re.fullmatch(r"[1-9]\d{2}(?:\d{2})?", postcode) def test_city_name(self, faker, num_samples): for _ in range(num_samples): city_name = faker.city_name() assert isinstance(city_name, str) assert city_name in ZhTwAddressProvider.cities def test_city_suffix(self, faker, num_samples): for _ in range(num_samples): city_suffix = faker.city_suffix() assert isinstance(city_suffix, str) assert city_suffix in ZhTwAddressProvider.city_suffixes def test_city(self, faker, num_samples): city_pattern: Pattern = re.compile(r"(?P<city_name>.*?)[市縣]?") for _ in range(num_samples): city = faker.city() assert isinstance(city, str) match = city_pattern.fullmatch(city) assert match assert match.group("city_name") in ZhTwAddressProvider.cities def test_country(self, faker, num_samples): for _ in range(num_samples): country = faker.country() assert isinstance(country, str) assert country in ZhTwAddressProvider.countries def test_street_name(self, faker, num_samples): for _ in range(num_samples): street_name = faker.street_name() assert isinstance(street_name, str) assert street_name in ZhTwAddressProvider.street_names def test_address(self, faker, num_samples): for _ in range(num_samples): address = faker.address() assert isinstance(address, str)
TestZhTw
python
tensorflow__tensorflow
tensorflow/python/training/server_lib_test.py
{ "start": 16029, "end": 19418 }
class ____(test.TestCase): def testLocalServer(self): cluster_def = server_lib.ClusterSpec( {"local": ["localhost:2222"]} ).as_cluster_def() server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_def, job_name="local", task_index=0, protocol="grpc" ) self.assertProtoEquals( """ cluster { job { name: 'local' tasks { key: 0 value: 'localhost:2222' } } } job_name: 'local' task_index: 0 protocol: 'grpc' """, server_def, ) # Verifies round trip from Proto->Spec->Proto is correct. cluster_spec = server_lib.ClusterSpec(cluster_def) self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def()) def testTwoProcesses(self): cluster_def = server_lib.ClusterSpec( {"local": ["localhost:2222", "localhost:2223"]} ).as_cluster_def() server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_def, job_name="local", task_index=1, protocol="grpc" ) self.assertProtoEquals( """ cluster { job { name: 'local' tasks { key: 0 value: 'localhost:2222' } tasks { key: 1 value: 'localhost:2223' } } } job_name: 'local' task_index: 1 protocol: 'grpc' """, server_def, ) # Verifies round trip from Proto->Spec->Proto is correct. cluster_spec = server_lib.ClusterSpec(cluster_def) self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def()) def testTwoJobs(self): cluster_def = server_lib.ClusterSpec({ "ps": ["ps0:2222", "ps1:2222"], "worker": ["worker0:2222", "worker1:2222", "worker2:2222"], }).as_cluster_def() server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc" ) self.assertProtoEquals( """ cluster { job { name: 'ps' tasks { key: 0 value: 'ps0:2222' } tasks { key: 1 value: 'ps1:2222' } } job { name: 'worker' tasks { key: 0 value: 'worker0:2222' } tasks { key: 1 value: 'worker1:2222' } tasks { key: 2 value: 'worker2:2222' } } } job_name: 'worker' task_index: 2 protocol: 'grpc' """, server_def, ) # Verifies round trip from Proto->Spec->Proto is correct. cluster_spec = server_lib.ClusterSpec(cluster_def) self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def()) def testDenseAndSparseJobs(self): cluster_def = server_lib.ClusterSpec({ "ps": ["ps0:2222", "ps1:2222"], "worker": {0: "worker0:2222", 2: "worker2:2222"}, }).as_cluster_def() server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_def, job_name="worker", task_index=2, protocol="grpc" ) self.assertProtoEquals( """ cluster { job { name: 'ps' tasks { key: 0 value: 'ps0:2222' } tasks { key: 1 value: 'ps1:2222' } } job { name: 'worker' tasks { key: 0 value: 'worker0:2222' } tasks { key: 2 value: 'worker2:2222' } } } job_name: 'worker' task_index: 2 protocol: 'grpc' """, server_def, ) # Verifies round trip from Proto->Spec->Proto is correct. cluster_spec = server_lib.ClusterSpec(cluster_def) self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())
ServerDefTest
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 59961, "end": 60460 }
class ____(CBaseTypeNode): # base_type CBaseTypeNode # is_const boolean # is_volatile boolean child_attrs = ["base_type"] def analyse(self, env, could_be_name=False): base = self.base_type.analyse(env, could_be_name) if base.is_pyobject: error(self.pos, "Const/volatile base type cannot be a Python object") return PyrexTypes.c_const_or_volatile_type(base, self.is_const, self.is_volatile)
CConstOrVolatileTypeNode
python
google__jax
jax/_src/core.py
{ "start": 140691, "end": 151295 }
class ____: var_names: defaultdict[Var, str] # Shared jaxprs are those that are used multiple times and are printed # first. shared_jaxprs: MutableMapping[Jaxpr, str] # maps shared jaxpr to its name shared_jaxpr_names: MutableSet[str] def __init__(self) -> None: self.shared_jaxprs = {} self.shared_jaxpr_names = set() fresh_names: Iterator[str] = ( name for i in it.count() if (name := _encode_digits_alphabetic(i)) not in self.shared_jaxpr_names ) self.var_names = defaultdict(fresh_names.__next__) def suggest_same_var_names(self, for_vars: Sequence[Atom], like_vars: Sequence[Atom]) -> None: """Suggests the names for `for_vars` to match those of `like_vars`. `for_vars` are distinct Vars, and are aliased with `like_vars`. """ used_like_vars: set[Var] = set() if len(for_vars) != len(like_vars): # The mismatch can happen if a primitive containing a subjaxpr is invoked # with the wrong number of arguments, e.g., when printing an invalid Jaxpr. return for for_v, like_v in zip(for_vars, like_vars): if (isinstance(like_v, Var) and like_v not in used_like_vars and isinstance(for_v, Var) and for_v not in self.var_names): used_like_vars.add(like_v) self.var_names[for_v] = pp_var(like_v, self) def pp_var(v: Var | Literal, context: JaxprPpContext, *, print_literal_dtype: bool = True) -> str: return v.pretty_print(context, print_dtype=print_literal_dtype) def pp_aval(a: AbstractValue, context: JaxprPpContext) -> str: if isinstance(a, DShapedArray): shape = [pp_var(d, context) if type(d) is Var else str(d) for d in a.shape] dtype = dtypes.short_dtype_name(a.dtype) return f'{dtype}[{",".join(shape)}]' else: return a.str_short(short_dtypes=True) def pp_vars(vs: Sequence[Atom], context: JaxprPpContext, *, separator="", print_shapes: bool = False) -> pp.Doc: if print_shapes: return pp.nest(2, pp.group( pp.join(pp.text(separator) + pp.group(pp.brk()), [ pp.text(pp_var(v, context)) + pp.type_annotation(pp.text(":" + pp_aval(v.aval, context))) for v in vs ]) )) else: return pp.nest(2, pp.group( pp.join(pp.text(separator) + pp.group(pp.brk()), [pp.text(pp_var(v, context)) for v in vs]) )) def pp_kv_pair(k:str, v: Any, context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: if type(v) is tuple and all(isinstance(j, (Jaxpr, ClosedJaxpr)) for j in v): pp_v = pp_jaxprs(v, context, settings) elif isinstance(v, Jaxpr): pp_v = pp_jaxpr(v, context, settings) elif isinstance(v, ClosedJaxpr): pp_v = pp_jaxpr(v.jaxpr, context, settings) else: pp_v = pp.text(str(v)) return pp.text(f'{k}=') + pp_v def pp_kv_pairs(kv_pairs, context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: if not kv_pairs: return pp.nil() return pp.group(pp.concat([ pp.nest(2, pp.concat([ pp.text("["), pp.brk(""), pp.join(pp.brk(), [pp_kv_pair(k, v, context, settings) for k, v in kv_pairs]) ])), pp.brk(""), pp.text("]") ])) def pp_eqn(eqn: JaxprEqn, context: JaxprPpContext, settings: JaxprPpSettings ) -> pp.Doc: rule = (_pp_eqn if not settings.custom_pp_eqn_rules else pp_eqn_rules.get(eqn.primitive, _pp_eqn)) doc = rule(eqn, context, settings) user_frame = source_info_util.user_frame(eqn.source_info.traceback) return doc if user_frame is None else pp.source_map(doc, user_frame) def _pp_eqn(eqn: JaxprEqn, context: JaxprPpContext, settings: JaxprPpSettings, params: Sequence[str] | None = None) -> pp.Doc: annotation = (source_info_util.summarize(eqn.source_info) if settings.source_info else None) if params is None: params = sorted(eqn.params) name_stack_annotation = f'[{eqn.source_info.name_stack}]' if settings.name_stack else None lhs = pp_vars(eqn.outvars, context, print_shapes=settings.print_shapes) rhs = [pp.text(eqn.primitive.name, annotation=name_stack_annotation), pp_kv_pairs([(p, eqn.params[p]) for p in params], context, settings), pp.text(" ") + pp_vars(eqn.invars, context)] if eqn.outvars: return pp.concat([lhs, pp.text(" = ", annotation=annotation), *rhs]) else: return pp.concat(rhs) CustomPpEqnRule = Callable[[JaxprEqn, JaxprPpContext, JaxprPpSettings], pp.Doc] pp_eqn_rules: dict[Primitive, CustomPpEqnRule] = {} def pp_eqns(eqns: Sequence[JaxprEqn], context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: return pp.join( pp.brk("; "), [pp_eqn(e, context, settings) for e in eqns]) def _compact_eqn_should_include(k: str, v: Any) -> bool: if k == 'branches': return False if isinstance(v, (Jaxpr, ClosedJaxpr)): return False if (isinstance(v, tuple) and any(isinstance(e, (Jaxpr, ClosedJaxpr)) for e in v)): return False return True def str_eqn_compact(primitive: Primitive, params: dict[Any, Any]) -> str: "Compact equation to string conversion used in HLO metadata." if primitive in custom_str_eqn_compact_rules: return custom_str_eqn_compact_rules[primitive](primitive, params) primitive_name = primitive.name kvs = " ".join(f"{k}={v}" for k, v in params.items() if _compact_eqn_should_include(k, v)) return f"{primitive_name}[{kvs}]" if len(kvs) > 0 else primitive_name custom_str_eqn_compact_rules: dict[ Primitive, Callable[[Primitive, dict[Any, Any]], str] ] = {} def pp_jaxpr_skeleton(jaxpr: Jaxpr, eqns_fn, context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: constvars = pp_vars(jaxpr.constvars, context, print_shapes=settings.print_shapes) invars = pp_vars(jaxpr.invars, context, print_shapes=settings.print_shapes) eqns = eqns_fn() outvars = pp.concat([ pp.text("("), pp_vars(jaxpr.outvars, context, separator=","), pp.text(")" if len(jaxpr.outvars) != 1 else ",)")]) if settings.print_effects: # TODO(sharadmv): render an entire signature here eff_text = [pp.text(" : { ")] for i, eff in enumerate(jaxpr.effects): if i > 0: eff_text.append(pp.text(", ")) if isinstance(eff, effects.JaxprInputEffect): index = eff.input_index all_vars = [*jaxpr.constvars, *jaxpr.invars] eff_text.append(pp_effect(eff.replace(input_index=all_vars[index]), context)) else: eff_text.append(pp_effect(eff, context)) eff_text.append(pp.text(" }")) else: eff_text = [] return pp.group(pp.nest(2, pp.concat([ pp.text("{ "), pp.keyword(pp.text("lambda ")), constvars, pp.text("; "), invars, pp.text(". "), pp.keyword(pp.text("let")), pp.nest(2, pp.brk() + eqns), pp.brk(), pp.keyword(pp.text("in ")), outvars, pp.concat(eff_text) ])) + pp.text(" }")) def pp_shared_jaxpr( name: str, jaxpr: Jaxpr, context: JaxprPpContext, settings: JaxprPpSettings, ) -> pp.Doc: return pp.concat([ pp.text("let " + name + " = "), pp_jaxpr(jaxpr, context, settings), pp.text(" in"), pp.brk(), ]) def pp_jaxpr( jaxpr: Jaxpr, context: JaxprPpContext, settings: JaxprPpSettings, ) -> pp.Doc: if name := context.shared_jaxprs.get(jaxpr): return pp.text(name) eqns_fn = lambda: pp_eqns(jaxpr.eqns, context, settings) return pp_jaxpr_skeleton(jaxpr, eqns_fn, context, settings) def pp_jaxprs(jaxprs: Sequence[ClosedJaxpr | Jaxpr], context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: jaxprs = [j.jaxpr if isinstance(j, ClosedJaxpr) else j for j in jaxprs] return pp.group(pp.concat([pp.nest(2, pp.concat([ pp.text('('), pp.brk(""), pp.join(pp.brk(), map(lambda x: pp_jaxpr(x, context, settings), jaxprs))] )), pp.brk(""), pp.text(')')]) ) def pp_jaxpr_eqn_range(jaxpr: Jaxpr, lo: int, hi: int, context: JaxprPpContext, settings: JaxprPpSettings) -> pp.Doc: lo = max(lo, 0) hi = max(lo, min(hi, len(jaxpr.eqns))) eqns = jaxpr.eqns[lo:hi] def eqns_fn(): pps = [] if len(eqns) == 0 and len(jaxpr.eqns) != 0: pps.append(pp.text('...')) else: if lo != 0: pps.append(pp.text('...')) pps.extend(map((lambda e: pp_eqn(e, context, settings)), eqns)) if hi != len(jaxpr.eqns): pps.append(pp.text('...')) return pp.join(pp.brk("; "), pps) return pp_jaxpr_skeleton(jaxpr, eqns_fn, context, settings) def pp_effect(effect: Effect, context: JaxprPpContext) -> pp.Doc: if hasattr(effect, "_pretty_print"): return effect._pretty_print(context) return pp.text(str(effect)) # ------------------- Jaxpr util ------------------- def last_used(jaxpr: Jaxpr) -> dict[Var, JaxprEqn | None]: """Returns a mapping from every var in jaxpr to what equation uses it last.""" last_used: dict[Var, JaxprEqn | None] = { v: None for v in jaxpr.outvars if not isinstance(v, Literal)} for eqn in reversed(jaxpr.eqns): for v in eqn.invars: if not isinstance(v, Literal) and v not in last_used: last_used[v] = eqn return last_used def clean_up_dead_vars(eqn: JaxprEqn, env: dict[Var, Any], last_used: dict[Var, JaxprEqn | None]): """Remove all eqn.invars from env if eqn is the last time they were used.""" for v in {v for v in eqn.invars if not isinstance(v, Literal)}: if last_used[v] is eqn: # Delete ref to variable when it is no longer needed by next equations. del env[v] # Used in shard_map for converting avals shard_aval_handlers = {} # type: ignore unshard_aval_handlers = {} # type: ignore def shard_aval(mesh, manual_axes, check_vma, spec, aval: AbstractValue ) -> AbstractValue: if type(aval) in shard_aval_handlers: return shard_aval_handlers[type(aval)](mesh, manual_axes, check_vma, spec, aval) raise NotImplementedError(f"Unsupported aval type: {type(aval)}") def unshard_aval(mesh, check_vma, spec, aval: AbstractValue ) -> AbstractValue: if type(aval) in unshard_aval_handlers: return unshard_aval_handlers[type(aval)](mesh, check_vma, spec, aval) else: raise NotImplementedError(f"Unsupported aval type: {type(aval)}") # ----------------- external APIs for querying tracing context ----------------- # TODO(dougalm, jakevdp): expose these via jax.extend # Comparable object for checking whether JAX's trace state has changed.
JaxprPpContext
python
numba__numba
numba/cuda/tests/cudadrv/test_emm_plugins.py
{ "start": 6683, "end": 7094 }
class ____(CUDATestCase): """ Ensure that Numba rejects EMM Plugins with incompatible version numbers. """ def test_bad_plugin_version(self): with self.assertRaises(RuntimeError) as raises: cuda.set_memory_manager(BadVersionEMMPlugin) self.assertIn('version 1 required', str(raises.exception)) if __name__ == '__main__': unittest.main()
TestBadEMMPluginVersion
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/glue_crawler.py
{ "start": 1103, "end": 2372 }
class ____(AwsBaseWaiterTrigger): """ Watches for a glue crawl, triggers when it finishes. :param crawler_name: name of the crawler to watch :param aws_conn_id: The Airflow connection used for AWS credentials. """ def __init__( self, crawler_name: str, aws_conn_id: str | None = "aws_default", waiter_delay: int = 5, waiter_max_attempts: int = 1500, **kwargs, ): super().__init__( serialized_fields={"crawler_name": crawler_name}, waiter_name="crawler_ready", waiter_args={"Name": crawler_name}, failure_message="Error while waiting for glue crawl to complete", status_message="Status of glue crawl is", status_queries=["Crawler.State", "Crawler.LastCrawl"], return_value=None, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, **kwargs, ) def hook(self) -> AwsGenericHook: return GlueCrawlerHook( aws_conn_id=self.aws_conn_id, region_name=self.region_name, verify=self.verify, config=self.botocore_config, )
GlueCrawlerCompleteTrigger
python
ipython__ipython
tests/test_pretty.py
{ "start": 972, "end": 1034 }
class ____(object): def somemethod(self): pass
MyObj
python
sphinx-doc__sphinx
sphinx/directives/admonitions.py
{ "start": 1750, "end": 1808 }
class ____(SphinxAdmonition): node_class = nodes.tip
Tip
python
Lightning-AI__lightning
tests/tests_pytorch/trainer/logging_/test_logger_connector.py
{ "start": 5338, "end": 6080 }
class ____(Callback): def __init__(self, not_supported): def call(hook, trainer=None, model=None, *_, **__): if trainer is None: # `state_dict`, `load_state_dict` do not have the `Trainer` available assert hook in ("state_dict", "load_state_dict") return lightning_module = trainer.lightning_module or model if hook in not_supported: with pytest.raises(MisconfigurationException, match=not_supported[hook]): lightning_module.log("anything", 1) else: lightning_module.log(hook, 1) for h in get_members(Callback): setattr(self, h, partial(call, h))
HookedCallback
python
openai__openai-python
src/openai/types/beta/assistant_tool_choice_function.py
{ "start": 165, "end": 269 }
class ____(BaseModel): name: str """The name of the function to call."""
AssistantToolChoiceFunction
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 61310, "end": 61718 }
class ____(torch.nn.Module): def __init__(self, mod_type): super().__init__() self.qconfig = default_dynamic_qconfig if mod_type == "GRU": self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float) if mod_type == "LSTM": self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float) def forward(self, x): x = self.mod(x) return x
RNNDynamicModel
python
scrapy__scrapy
tests/test_feedexport.py
{ "start": 28488, "end": 61637 }
class ____(TestFeedExportBase): async def run_and_export( self, spider_cls: type[Spider], settings: dict[str, Any] ) -> dict[str, Any]: """Run spider with specified settings; return exported data.""" FEEDS = settings.get("FEEDS") or {} settings["FEEDS"] = { printf_escape(path_to_url(file_path)): feed_options for file_path, feed_options in FEEDS.items() } content: dict[str, Any] = {} try: spider_cls.start_urls = [self.mockserver.url("/")] crawler = get_crawler(spider_cls, settings) await maybe_deferred_to_future(crawler.crawl()) for file_path, feed_options in FEEDS.items(): content[feed_options["format"]] = ( Path(file_path).read_bytes() if Path(file_path).exists() else None ) finally: for file_path in FEEDS: if not Path(file_path).exists(): continue Path(file_path).unlink() return content async def assertExportedCsv( self, items: Iterable[Any], header: Iterable[str], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "csv"}, }, } ) data = await self.exported_data(items, settings) reader = csv.DictReader(to_unicode(data["csv"]).splitlines()) assert reader.fieldnames == list(header) assert rows == list(reader) async def assertExportedJsonLines( self, items: Iterable[Any], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "jl"}, }, } ) data = await self.exported_data(items, settings) parsed = [json.loads(to_unicode(line)) for line in data["jl"].splitlines()] rows = [{k: v for k, v in row.items() if v} for row in rows] assert rows == parsed async def assertExportedXml( self, items: Iterable[Any], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "xml"}, }, } ) data = await self.exported_data(items, settings) rows = [{k: v for k, v in row.items() if v} for row in rows] root = lxml.etree.fromstring(data["xml"]) got_rows = [{e.tag: e.text for e in it} for it in root.findall("item")] assert rows == got_rows async def assertExportedMultiple( self, items: Iterable[Any], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "xml"}, self._random_temp_filename(): {"format": "json"}, }, } ) data = await self.exported_data(items, settings) rows = [{k: v for k, v in row.items() if v} for row in rows] # XML root = lxml.etree.fromstring(data["xml"]) xml_rows = [{e.tag: e.text for e in it} for it in root.findall("item")] assert rows == xml_rows # JSON json_rows = json.loads(to_unicode(data["json"])) assert rows == json_rows async def assertExportedPickle( self, items: Iterable[Any], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "pickle"}, }, } ) data = await self.exported_data(items, settings) expected = [{k: v for k, v in row.items() if v} for row in rows] result = self._load_until_eof(data["pickle"], load_func=pickle.load) assert result == expected async def assertExportedMarshal( self, items: Iterable[Any], rows: Iterable[dict[str, Any]], settings: dict[str, Any] | None = None, ) -> None: settings = settings or {} settings.update( { "FEEDS": { self._random_temp_filename(): {"format": "marshal"}, }, } ) data = await self.exported_data(items, settings) expected = [{k: v for k, v in row.items() if v} for row in rows] result = self._load_until_eof(data["marshal"], load_func=marshal.load) assert result == expected @inlineCallbacks def test_stats_file_success(self): settings = { "FEEDS": { printf_escape(path_to_url(str(self._random_temp_filename()))): { "format": "json", } }, } crawler = get_crawler(ItemSpider, settings) yield crawler.crawl(mockserver=self.mockserver) assert "feedexport/success_count/FileFeedStorage" in crawler.stats.get_stats() assert crawler.stats.get_value("feedexport/success_count/FileFeedStorage") == 1 @inlineCallbacks def test_stats_file_failed(self): settings = { "FEEDS": { printf_escape(path_to_url(str(self._random_temp_filename()))): { "format": "json", } }, } crawler = get_crawler(ItemSpider, settings) with mock.patch( "scrapy.extensions.feedexport.FileFeedStorage.store", side_effect=KeyError("foo"), ): yield crawler.crawl(mockserver=self.mockserver) assert "feedexport/failed_count/FileFeedStorage" in crawler.stats.get_stats() assert crawler.stats.get_value("feedexport/failed_count/FileFeedStorage") == 1 @inlineCallbacks def test_stats_multiple_file(self): settings = { "FEEDS": { printf_escape(path_to_url(str(self._random_temp_filename()))): { "format": "json", }, "stdout:": { "format": "xml", }, }, } crawler = get_crawler(ItemSpider, settings) with mock.patch.object(S3FeedStorage, "store"): yield crawler.crawl(mockserver=self.mockserver) assert "feedexport/success_count/FileFeedStorage" in crawler.stats.get_stats() assert "feedexport/success_count/StdoutFeedStorage" in crawler.stats.get_stats() assert crawler.stats.get_value("feedexport/success_count/FileFeedStorage") == 1 assert ( crawler.stats.get_value("feedexport/success_count/StdoutFeedStorage") == 1 ) @deferred_f_from_coro_f async def test_export_items(self): # feed exporters use field names from Item items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), self.MyItem({"foo": "bar2", "egg": "spam2", "baz": "quux2"}), ] rows = [ {"egg": "spam1", "foo": "bar1", "baz": ""}, {"egg": "spam2", "foo": "bar2", "baz": "quux2"}, ] header = self.MyItem.fields.keys() await self.assertExported(items, header, rows) @deferred_f_from_coro_f async def test_export_no_items_not_store_empty(self): for fmt in ("json", "jsonlines", "xml", "csv"): settings = { "FEEDS": { self._random_temp_filename(): {"format": fmt}, }, "FEED_STORE_EMPTY": False, } data = await self.exported_no_data(settings) assert data[fmt] is None @deferred_f_from_coro_f async def test_start_finish_exporting_items(self): items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), ] settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, }, "FEED_EXPORT_INDENT": None, } listener = IsExportingListener() InstrumentedFeedSlot.subscribe__listener(listener) with mock.patch("scrapy.extensions.feedexport.FeedSlot", InstrumentedFeedSlot): await self.exported_data(items, settings) assert not listener.start_without_finish assert not listener.finish_without_start @deferred_f_from_coro_f async def test_start_finish_exporting_no_items(self): items = [] settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, }, "FEED_EXPORT_INDENT": None, } listener = IsExportingListener() InstrumentedFeedSlot.subscribe__listener(listener) with mock.patch("scrapy.extensions.feedexport.FeedSlot", InstrumentedFeedSlot): await self.exported_data(items, settings) assert not listener.start_without_finish assert not listener.finish_without_start @deferred_f_from_coro_f async def test_start_finish_exporting_items_exception(self): items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), ] settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, }, "FEED_EXPORTERS": {"json": ExceptionJsonItemExporter}, "FEED_EXPORT_INDENT": None, } listener = IsExportingListener() InstrumentedFeedSlot.subscribe__listener(listener) with mock.patch("scrapy.extensions.feedexport.FeedSlot", InstrumentedFeedSlot): await self.exported_data(items, settings) assert not listener.start_without_finish assert not listener.finish_without_start @deferred_f_from_coro_f async def test_start_finish_exporting_no_items_exception(self): items = [] settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, }, "FEED_EXPORTERS": {"json": ExceptionJsonItemExporter}, "FEED_EXPORT_INDENT": None, } listener = IsExportingListener() InstrumentedFeedSlot.subscribe__listener(listener) with mock.patch("scrapy.extensions.feedexport.FeedSlot", InstrumentedFeedSlot): await self.exported_data(items, settings) assert not listener.start_without_finish assert not listener.finish_without_start @deferred_f_from_coro_f async def test_export_no_items_store_empty(self): formats = ( ("json", b"[]"), ("jsonlines", b""), ("xml", b'<?xml version="1.0" encoding="utf-8"?>\n<items></items>'), ("csv", b""), ) for fmt, expctd in formats: settings = { "FEEDS": { self._random_temp_filename(): {"format": fmt}, }, "FEED_STORE_EMPTY": True, "FEED_EXPORT_INDENT": None, } data = await self.exported_no_data(settings) assert expctd == data[fmt] @deferred_f_from_coro_f async def test_export_no_items_multiple_feeds(self): """Make sure that `storage.store` is called for every feed.""" settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, self._random_temp_filename(): {"format": "xml"}, self._random_temp_filename(): {"format": "csv"}, }, "FEED_STORAGES": {"file": LogOnStoreFileStorage}, "FEED_STORE_EMPTY": False, } with LogCapture() as log: await self.exported_no_data(settings) assert str(log).count("Storage.store is called") == 0 @deferred_f_from_coro_f async def test_export_multiple_item_classes(self): items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), self.MyItem2({"hello": "world2", "foo": "bar2"}), self.MyItem({"foo": "bar3", "egg": "spam3", "baz": "quux3"}), {"hello": "world4", "egg": "spam4"}, ] # by default, Scrapy uses fields of the first Item for CSV and # all fields for JSON Lines header = self.MyItem.fields.keys() rows_csv = [ {"egg": "spam1", "foo": "bar1", "baz": ""}, {"egg": "", "foo": "bar2", "baz": ""}, {"egg": "spam3", "foo": "bar3", "baz": "quux3"}, {"egg": "spam4", "foo": "", "baz": ""}, ] rows_jl = [dict(row) for row in items] await self.assertExportedCsv(items, header, rows_csv) await self.assertExportedJsonLines(items, rows_jl) @deferred_f_from_coro_f async def test_export_items_empty_field_list(self): # FEED_EXPORT_FIELDS==[] means the same as default None items = [{"foo": "bar"}] header = ["foo"] rows = [{"foo": "bar"}] settings = {"FEED_EXPORT_FIELDS": []} await self.assertExportedCsv(items, header, rows) await self.assertExportedJsonLines(items, rows, settings) @deferred_f_from_coro_f async def test_export_items_field_list(self): items = [{"foo": "bar"}] header = ["foo", "baz"] rows = [{"foo": "bar", "baz": ""}] settings = {"FEED_EXPORT_FIELDS": header} await self.assertExported(items, header, rows, settings=settings) @deferred_f_from_coro_f async def test_export_items_comma_separated_field_list(self): items = [{"foo": "bar"}] header = ["foo", "baz"] rows = [{"foo": "bar", "baz": ""}] settings = {"FEED_EXPORT_FIELDS": ",".join(header)} await self.assertExported(items, header, rows, settings=settings) @deferred_f_from_coro_f async def test_export_items_json_field_list(self): items = [{"foo": "bar"}] header = ["foo", "baz"] rows = [{"foo": "bar", "baz": ""}] settings = {"FEED_EXPORT_FIELDS": json.dumps(header)} await self.assertExported(items, header, rows, settings=settings) @deferred_f_from_coro_f async def test_export_items_field_names(self): items = [{"foo": "bar"}] header = {"foo": "Foo"} rows = [{"Foo": "bar"}] settings = {"FEED_EXPORT_FIELDS": header} await self.assertExported(items, list(header.values()), rows, settings=settings) @deferred_f_from_coro_f async def test_export_items_dict_field_names(self): items = [{"foo": "bar"}] header = { "baz": "Baz", "foo": "Foo", } rows = [{"Baz": "", "Foo": "bar"}] settings = {"FEED_EXPORT_FIELDS": header} await self.assertExported(items, ["Baz", "Foo"], rows, settings=settings) @deferred_f_from_coro_f async def test_export_items_json_field_names(self): items = [{"foo": "bar"}] header = {"foo": "Foo"} rows = [{"Foo": "bar"}] settings = {"FEED_EXPORT_FIELDS": json.dumps(header)} await self.assertExported(items, list(header.values()), rows, settings=settings) @deferred_f_from_coro_f async def test_export_based_on_item_classes(self): items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), self.MyItem2({"hello": "world2", "foo": "bar2"}), {"hello": "world3", "egg": "spam3"}, ] formats = { "csv": b"baz,egg,foo\r\n,spam1,bar1\r\n", "json": b'[\n{"hello": "world2", "foo": "bar2"}\n]', "jsonlines": ( b'{"foo": "bar1", "egg": "spam1"}\n{"hello": "world2", "foo": "bar2"}\n' ), "xml": ( b'<?xml version="1.0" encoding="utf-8"?>\n<items>\n<item>' b"<foo>bar1</foo><egg>spam1</egg></item>\n<item><hello>" b"world2</hello><foo>bar2</foo></item>\n<item><hello>world3" b"</hello><egg>spam3</egg></item>\n</items>" ), } settings = { "FEEDS": { self._random_temp_filename(): { "format": "csv", "item_classes": [self.MyItem], }, self._random_temp_filename(): { "format": "json", "item_classes": [self.MyItem2], }, self._random_temp_filename(): { "format": "jsonlines", "item_classes": [self.MyItem, self.MyItem2], }, self._random_temp_filename(): { "format": "xml", }, }, } data = await self.exported_data(items, settings) for fmt, expected in formats.items(): assert data[fmt] == expected @deferred_f_from_coro_f async def test_export_based_on_custom_filters(self): items = [ self.MyItem({"foo": "bar1", "egg": "spam1"}), self.MyItem2({"hello": "world2", "foo": "bar2"}), {"hello": "world3", "egg": "spam3"}, ] MyItem = self.MyItem class CustomFilter1: def __init__(self, feed_options): pass def accepts(self, item): return isinstance(item, MyItem) class CustomFilter2(scrapy.extensions.feedexport.ItemFilter): def accepts(self, item): return "foo" in item.fields class CustomFilter3(scrapy.extensions.feedexport.ItemFilter): def accepts(self, item): return ( isinstance(item, tuple(self.item_classes)) and item["foo"] == "bar1" ) formats = { "json": b'[\n{"foo": "bar1", "egg": "spam1"}\n]', "xml": ( b'<?xml version="1.0" encoding="utf-8"?>\n<items>\n<item>' b"<foo>bar1</foo><egg>spam1</egg></item>\n<item><hello>" b"world2</hello><foo>bar2</foo></item>\n</items>" ), "jsonlines": b'{"foo": "bar1", "egg": "spam1"}\n', } settings = { "FEEDS": { self._random_temp_filename(): { "format": "json", "item_filter": CustomFilter1, }, self._random_temp_filename(): { "format": "xml", "item_filter": CustomFilter2, }, self._random_temp_filename(): { "format": "jsonlines", "item_classes": [self.MyItem, self.MyItem2], "item_filter": CustomFilter3, }, }, } data = await self.exported_data(items, settings) for fmt, expected in formats.items(): assert data[fmt] == expected @deferred_f_from_coro_f async def test_export_dicts(self): # When dicts are used, only keys from the first row are used as # a header for CSV, and all fields are used for JSON Lines. items = [ {"foo": "bar", "egg": "spam"}, {"foo": "bar", "egg": "spam", "baz": "quux"}, ] rows_csv = [{"egg": "spam", "foo": "bar"}, {"egg": "spam", "foo": "bar"}] rows_jl = items await self.assertExportedCsv(items, ["foo", "egg"], rows_csv) await self.assertExportedJsonLines(items, rows_jl) @deferred_f_from_coro_f async def test_export_tuple(self): items = [ {"foo": "bar1", "egg": "spam1"}, {"foo": "bar2", "egg": "spam2", "baz": "quux"}, ] settings = {"FEED_EXPORT_FIELDS": ("foo", "baz")} rows = [{"foo": "bar1", "baz": ""}, {"foo": "bar2", "baz": "quux"}] await self.assertExported(items, ["foo", "baz"], rows, settings=settings) @deferred_f_from_coro_f async def test_export_feed_export_fields(self): # FEED_EXPORT_FIELDS option allows to order export fields # and to select a subset of fields to export, both for Items and dicts. for item_cls in [self.MyItem, dict]: items = [ item_cls({"foo": "bar1", "egg": "spam1"}), item_cls({"foo": "bar2", "egg": "spam2", "baz": "quux2"}), ] # export all columns settings = {"FEED_EXPORT_FIELDS": "foo,baz,egg"} rows = [ {"egg": "spam1", "foo": "bar1", "baz": ""}, {"egg": "spam2", "foo": "bar2", "baz": "quux2"}, ] await self.assertExported( items, ["foo", "baz", "egg"], rows, settings=settings ) # export a subset of columns settings = {"FEED_EXPORT_FIELDS": "egg,baz"} rows = [{"egg": "spam1", "baz": ""}, {"egg": "spam2", "baz": "quux2"}] await self.assertExported(items, ["egg", "baz"], rows, settings=settings) @deferred_f_from_coro_f async def test_export_encoding(self): items = [{"foo": "Test\xd6"}] formats = { "json": b'[{"foo": "Test\\u00d6"}]', "jsonlines": b'{"foo": "Test\\u00d6"}\n', "xml": ( '<?xml version="1.0" encoding="utf-8"?>\n' "<items><item><foo>Test\xd6</foo></item></items>" ).encode(), "csv": "foo\r\nTest\xd6\r\n".encode(), } for fmt, expected in formats.items(): settings = { "FEEDS": { self._random_temp_filename(): {"format": fmt}, }, "FEED_EXPORT_INDENT": None, } data = await self.exported_data(items, settings) assert data[fmt] == expected formats = { "json": b'[{"foo": "Test\xd6"}]', "jsonlines": b'{"foo": "Test\xd6"}\n', "xml": ( b'<?xml version="1.0" encoding="latin-1"?>\n' b"<items><item><foo>Test\xd6</foo></item></items>" ), "csv": b"foo\r\nTest\xd6\r\n", } for fmt, expected in formats.items(): settings = { "FEEDS": { self._random_temp_filename(): {"format": fmt}, }, "FEED_EXPORT_INDENT": None, "FEED_EXPORT_ENCODING": "latin-1", } data = await self.exported_data(items, settings) assert data[fmt] == expected @deferred_f_from_coro_f async def test_export_multiple_configs(self): items = [{"foo": "FOO", "bar": "BAR"}] formats = { "json": b'[\n{"bar": "BAR"}\n]', "xml": ( b'<?xml version="1.0" encoding="latin-1"?>\n' b"<items>\n <item>\n <foo>FOO</foo>\n </item>\n</items>" ), "csv": b"bar,foo\r\nBAR,FOO\r\n", } settings = { "FEEDS": { self._random_temp_filename(): { "format": "json", "indent": 0, "fields": ["bar"], "encoding": "utf-8", }, self._random_temp_filename(): { "format": "xml", "indent": 2, "fields": ["foo"], "encoding": "latin-1", }, self._random_temp_filename(): { "format": "csv", "indent": None, "fields": ["bar", "foo"], "encoding": "utf-8", }, }, } data = await self.exported_data(items, settings) for fmt, expected in formats.items(): assert data[fmt] == expected @deferred_f_from_coro_f async def test_export_indentation(self): items = [ {"foo": ["bar"]}, {"key": "value"}, ] test_cases = [ # JSON { "format": "json", "indent": None, "expected": b'[{"foo": ["bar"]},{"key": "value"}]', }, { "format": "json", "indent": -1, "expected": b"""[ {"foo": ["bar"]}, {"key": "value"} ]""", }, { "format": "json", "indent": 0, "expected": b"""[ {"foo": ["bar"]}, {"key": "value"} ]""", }, { "format": "json", "indent": 2, "expected": b"""[ { "foo": [ "bar" ] }, { "key": "value" } ]""", }, { "format": "json", "indent": 4, "expected": b"""[ { "foo": [ "bar" ] }, { "key": "value" } ]""", }, { "format": "json", "indent": 5, "expected": b"""[ { "foo": [ "bar" ] }, { "key": "value" } ]""", }, # XML { "format": "xml", "indent": None, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items><item><foo><value>bar</value></foo></item><item><key>value</key></item></items>""", }, { "format": "xml", "indent": -1, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items> <item><foo><value>bar</value></foo></item> <item><key>value</key></item> </items>""", }, { "format": "xml", "indent": 0, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items> <item><foo><value>bar</value></foo></item> <item><key>value</key></item> </items>""", }, { "format": "xml", "indent": 2, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items> <item> <foo> <value>bar</value> </foo> </item> <item> <key>value</key> </item> </items>""", }, { "format": "xml", "indent": 4, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items> <item> <foo> <value>bar</value> </foo> </item> <item> <key>value</key> </item> </items>""", }, { "format": "xml", "indent": 5, "expected": b"""<?xml version="1.0" encoding="utf-8"?> <items> <item> <foo> <value>bar</value> </foo> </item> <item> <key>value</key> </item> </items>""", }, ] for row in test_cases: settings = { "FEEDS": { self._random_temp_filename(): { "format": row["format"], "indent": row["indent"], }, }, } data = await self.exported_data(items, settings) assert data[row["format"]] == row["expected"] @deferred_f_from_coro_f async def test_init_exporters_storages_with_crawler(self): settings = { "FEED_EXPORTERS": {"csv": FromCrawlerCsvItemExporter}, "FEED_STORAGES": {"file": FromCrawlerFileFeedStorage}, "FEEDS": { self._random_temp_filename(): {"format": "csv"}, }, } await self.exported_data(items=[], settings=settings) assert FromCrawlerCsvItemExporter.init_with_crawler assert FromCrawlerFileFeedStorage.init_with_crawler @deferred_f_from_coro_f async def test_str_uri(self): settings = { "FEED_STORE_EMPTY": True, "FEEDS": {str(self._random_temp_filename()): {"format": "csv"}}, } data = await self.exported_no_data(settings) assert data["csv"] == b"" @deferred_f_from_coro_f async def test_multiple_feeds_success_logs_blocking_feed_storage(self): settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, self._random_temp_filename(): {"format": "xml"}, self._random_temp_filename(): {"format": "csv"}, }, "FEED_STORAGES": {"file": DummyBlockingFeedStorage}, } items = [ {"foo": "bar1", "baz": ""}, {"foo": "bar2", "baz": "quux"}, ] with LogCapture() as log: await self.exported_data(items, settings) for fmt in ["json", "xml", "csv"]: assert f"Stored {fmt} feed (2 items)" in str(log) @deferred_f_from_coro_f async def test_multiple_feeds_failing_logs_blocking_feed_storage(self): settings = { "FEEDS": { self._random_temp_filename(): {"format": "json"}, self._random_temp_filename(): {"format": "xml"}, self._random_temp_filename(): {"format": "csv"}, }, "FEED_STORAGES": {"file": FailingBlockingFeedStorage}, } items = [ {"foo": "bar1", "baz": ""}, {"foo": "bar2", "baz": "quux"}, ] with LogCapture() as log: await self.exported_data(items, settings) for fmt in ["json", "xml", "csv"]: assert f"Error storing {fmt} feed (2 items)" in str(log) @deferred_f_from_coro_f async def test_extend_kwargs(self): items = [{"foo": "FOO", "bar": "BAR"}] expected_with_title_csv = b"foo,bar\r\nFOO,BAR\r\n" expected_without_title_csv = b"FOO,BAR\r\n" test_cases = [ # with title { "options": { "format": "csv", "item_export_kwargs": {"include_headers_line": True}, }, "expected": expected_with_title_csv, }, # without title { "options": { "format": "csv", "item_export_kwargs": {"include_headers_line": False}, }, "expected": expected_without_title_csv, }, ] for row in test_cases: feed_options = row["options"] settings = { "FEEDS": { self._random_temp_filename(): feed_options, }, "FEED_EXPORT_INDENT": None, } data = await self.exported_data(items, settings) assert data[feed_options["format"]] == row["expected"] @deferred_f_from_coro_f async def test_storage_file_no_postprocessing(self): @implementer(IFeedStorage) class Storage: def __init__(self, uri, *, feed_options=None): pass def open(self, spider): Storage.open_file = tempfile.NamedTemporaryFile(prefix="feed-") return Storage.open_file def store(self, file): Storage.store_file = file file.close() settings = { "FEEDS": {self._random_temp_filename(): {"format": "jsonlines"}}, "FEED_STORAGES": {"file": Storage}, } await self.exported_no_data(settings) assert Storage.open_file is Storage.store_file @deferred_f_from_coro_f async def test_storage_file_postprocessing(self): @implementer(IFeedStorage) class Storage: def __init__(self, uri, *, feed_options=None): pass def open(self, spider): Storage.open_file = tempfile.NamedTemporaryFile(prefix="feed-") return Storage.open_file def store(self, file): Storage.store_file = file Storage.file_was_closed = file.closed file.close() settings = { "FEEDS": { self._random_temp_filename(): { "format": "jsonlines", "postprocessing": [ "scrapy.extensions.postprocessing.GzipPlugin", ], }, }, "FEED_STORAGES": {"file": Storage}, } await self.exported_no_data(settings) assert Storage.open_file is Storage.store_file assert not Storage.file_was_closed
TestFeedExport
python
python__mypy
mypy/fastparse.py
{ "start": 84921, "end": 86233 }
class ____(TraverserVisitor): """Check if an AST contains attribute assignments (e.g. self.x = 0).""" def __init__(self) -> None: self.lvalue = False self.found = False def visit_assignment_stmt(self, s: AssignmentStmt) -> None: self.lvalue = True for lv in s.lvalues: lv.accept(self) self.lvalue = False def visit_with_stmt(self, s: WithStmt) -> None: self.lvalue = True for lv in s.target: if lv is not None: lv.accept(self) self.lvalue = False s.body.accept(self) def visit_for_stmt(self, s: ForStmt) -> None: self.lvalue = True s.index.accept(self) self.lvalue = False s.body.accept(self) if s.else_body: s.else_body.accept(self) def visit_expression_stmt(self, s: ExpressionStmt) -> None: # No need to look inside these pass def visit_call_expr(self, e: CallExpr) -> None: # No need to look inside these pass def visit_index_expr(self, e: IndexExpr) -> None: # No need to look inside these pass def visit_member_expr(self, e: MemberExpr) -> None: if self.lvalue and isinstance(e.expr, NameExpr): self.found = True
FindAttributeAssign
python
great-expectations__great_expectations
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_average_to_be_within_range_of_given_point.py
{ "start": 1337, "end": 2632 }
class ____(ColumnAggregateMetricProvider): # This is the id string that will be used to reference your Metric. metric_name = "column.coordinates.distance" value_keys = ("center_point",) # This method implements the core logic for the PandasExecutionEngine @column_aggregate_value(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): center_point = kwargs.get("center_point") avg_lat = mean([point[0] for point in column]) avg_lon = mean([point[1] for point in column]) distance = cls.fcc_projection((avg_lat, avg_lon), center_point) return distance @staticmethod def fcc_projection(loc1, loc2): lat1, lat2 = float(loc1[0]), float(loc2[0]) lon1, lon2 = float(loc1[1]), float(loc2[1]) mean_lat = (lat1 + lat2) / 2 delta_lat = lat2 - lat1 delta_lon = lon2 - lon1 k1 = 111.13209 - (0.56605 * cos(2 * mean_lat)) + (0.0012 * cos(4 * mean_lat)) k2 = ( (111.41513 * cos(mean_lat)) - (0.09455 * cos(3 * mean_lat)) + (0.00012 * cos(5 * mean_lat)) ) distance = sqrt((k1 * delta_lat) ** 2 + (k2 * delta_lon) ** 2) return distance # This class defines the Expectation itself
ColumnCoordinatesDistance
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/coercions.py
{ "start": 14430, "end": 14504 }
class ____: __slots__ = () _resolve_literal_only = True
_StringOnly
python
kamyu104__LeetCode-Solutions
Python/subarrays-distinct-element-sum-of-squares-ii.py
{ "start": 2015, "end": 6244 }
class ____(object): def sumCounts(self, nums): """ :type nums: List[int] :rtype: int """ MOD = 10**9+7 # Template: # https://github.com/kamyu104/LeetCode-Solutions/blob/master/Python/longest-substring-of-one-repeating-character.py class SegmentTree(object): def __init__(self, N, build_fn=None, query_fn=lambda x, y: y if x is None else x if y is None else (x+y)%MOD, update_fn=lambda x, y: y if x is None else (x+y)%MOD): self.tree = [None]*(1<<((N-1).bit_length()+1)) self.base = len(self.tree)>>1 self.lazy = [None]*self.base self.query_fn = query_fn self.update_fn = update_fn if build_fn is not None: for i in xrange(self.base, self.base+N): self.tree[i] = build_fn(i-self.base) for i in reversed(xrange(1, self.base)): self.tree[i] = query_fn(self.tree[i<<1], self.tree[(i<<1)+1]) self.count = [1]*len(self.tree) # added for i in reversed(xrange(1, self.base)): # added self.count[i] = self.count[i<<1] + self.count[(i<<1)+1] def __apply(self, x, val): self.tree[x] = self.update_fn(self.tree[x], val*self.count[x]) # modified if x < self.base: self.lazy[x] = self.update_fn(self.lazy[x], val) def __push(self, x): for h in reversed(xrange(1, x.bit_length())): y = x>>h if self.lazy[y] is not None: self.__apply(y<<1, self.lazy[y]) self.__apply((y<<1)+1, self.lazy[y]) self.lazy[y] = None def update(self, L, R, h): # Time: O(logN), Space: O(N) def pull(x): while x > 1: x >>= 1 self.tree[x] = self.query_fn(self.tree[x<<1], self.tree[(x<<1)+1]) if self.lazy[x] is not None: self.tree[x] = self.update_fn(self.tree[x], self.lazy[x]*self.count[x]) # modified L += self.base R += self.base # self.__push(L) # enable if range assignment # self.__push(R) # enable if range assignment L0, R0 = L, R while L <= R: if L & 1: # is right child self.__apply(L, h) L += 1 if R & 1 == 0: # is left child self.__apply(R, h) R -= 1 L >>= 1 R >>= 1 pull(L0) pull(R0) def query(self, L, R): if L > R: return None L += self.base R += self.base self.__push(L) self.__push(R) left = right = None while L <= R: if L & 1: left = self.query_fn(left, self.tree[L]) L += 1 if R & 1 == 0: right = self.query_fn(self.tree[R], right) R -= 1 L >>= 1 R >>= 1 return self.query_fn(left, right) result = accu = 0 sl = {} st = SegmentTree(len(nums)) for i in xrange(len(nums)): j = sl[nums[i]] if nums[i] in sl else -1 # sum(count(k, i)^2 for k in range(i+1)) - sum(count(k, i-1)^2 for k in range(i)) # = sum(2*count(k, i-1)+1 for k in range(j+1, i+1)) # = (i-j) + sum(2*count(k, i-1) for k in range(j+1, i+1)) accu = (accu+((i-j)+2*max(st.query(j+1, i), 0)))%MOD result = (result+accu)%MOD st.update(j+1, i, 1) # count(k, i) = count(k, i-1)+(1 if k >= j+1 else 0) for k in range(i+1) sl[nums[i]] = i return result
Solution2
python
huggingface__transformers
src/transformers/models/splinter/modeling_splinter.py
{ "start": 12919, "end": 13090 }
class ____(PreTrainedModel): config: SplinterConfig base_model_prefix = "splinter" supports_gradient_checkpointing = True @auto_docstring
SplinterPreTrainedModel
python
django__django
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
{ "start": 26535, "end": 26638 }
class ____(TranslationBlockTranslateTagTests): tag_name = "blocktrans"
TranslationBlockTransnTagTests
python
numba__numba
numba/tests/test_numpy_support.py
{ "start": 8426, "end": 9647 }
class ____(object): __slots__ = ('nin', 'nout', 'types', 'ntypes') __name__ = "fake ufunc" def __init__(self, types): self.types = types in_, out = self.types[0].split('->') self.nin = len(in_) self.nout = len(out) self.ntypes = len(types) for tp in types: in_, out = self.types[0].split('->') assert len(in_) == self.nin assert len(out) == self.nout # Typical types for np.add, np.multiply, np.isnan _add_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'Mm->M', 'mm->m', 'mM->M', 'OO->O'] _mul_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'mq->m', 'qm->m', 'md->m', 'dm->m', 'OO->O'] # Those ones only have floating-point loops _isnan_types = ['e->?', 'f->?', 'd->?', 'g->?', 'F->?', 'D->?', 'G->?'] _sqrt_types = ['e->e', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
FakeUFunc
python
run-llama__llama_index
llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/events.py
{ "start": 390, "end": 508 }
class ____(BaseVoiceAgentEvent): model_config = ConfigDict(extra="allow") agent_response: str
AgentResponseEvent
python
pyqtgraph__pyqtgraph
pyqtgraph/multiprocess/remoteproxy.py
{ "start": 208, "end": 384 }
class ____(Exception): """Raised when an event handler receives a request to close the connection or discovers that the connection has been closed.""" pass
ClosedError
python
kamyu104__LeetCode-Solutions
Python/longest-even-odd-subarray-with-threshold.py
{ "start": 37, "end": 506 }
class ____(object): def longestAlternatingSubarray(self, nums, threshold): """ :type nums: List[int] :type threshold: int :rtype: int """ result = l = 0 for x in nums: if x > threshold: l = 0 continue if l%2 == x%2: l += 1 else: l = int(x%2 == 0) result = max(result, l) return result
Solution
python
ansible__ansible
test/lib/ansible_test/_internal/commands/integration/coverage.py
{ "start": 12681, "end": 15601 }
class ____: """Manager for code coverage configuration and state.""" def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None: self.args = args self.host_state = host_state self.inventory_path = inventory_path if self.args.coverage: handler_types = set(get_handler_type(type(profile.config)) for profile in host_state.profiles) handler_types.discard(None) else: handler_types = set() handlers = [handler_type(args=args, host_state=host_state, inventory_path=inventory_path) for handler_type in handler_types] self.handlers = [handler for handler in handlers if handler.is_active] def setup(self) -> None: """Perform setup for code coverage.""" if not self.args.coverage: return for handler in self.handlers: handler.setup() def teardown(self) -> None: """Perform teardown for code coverage.""" if not self.args.coverage: return for handler in self.handlers: handler.teardown() def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]: """Return a dictionary of environment variables for running tests with code coverage.""" if not self.args.coverage or 'non_local/' in aliases: return {} env = {} for handler in self.handlers: env.update(handler.get_environment(target_name, aliases)) return env @cache def get_config_handler_type_map() -> dict[t.Type[HostConfig], t.Type[CoverageHandler]]: """Create and return a mapping of HostConfig types to CoverageHandler types.""" return get_type_map(CoverageHandler, HostConfig) def get_handler_type(config_type: t.Type[HostConfig]) -> t.Optional[t.Type[CoverageHandler]]: """Return the coverage handler type associated with the given host config type if found, otherwise return None.""" queue = [config_type] type_map = get_config_handler_type_map() while queue: config_type = queue.pop(0) handler_type = type_map.get(config_type) if handler_type: return handler_type queue.extend(config_type.__bases__) return None def update_coverage_filename(original_filename: str, platform: str) -> str: """Validate the given filename and insert the specified platform, then return the result.""" parts = original_filename.split('=') if original_filename != os.path.basename(original_filename) or len(parts) != 5 or parts[2] != 'platform': raise Exception(f'Unexpected coverage filename: {original_filename}') parts[2] = platform updated_filename = '='.join(parts) display.info(f'Coverage file for platform "{platform}": {original_filename} -> {updated_filename}', verbosity=3) return updated_filename
CoverageManager
python
spack__spack
lib/spack/spack/fetch_strategy.py
{ "start": 66106, "end": 66220 }
class ____(spack.error.FetchError): """Raised after attempt to checksum when URL has no digest."""
NoDigestError
python
kamyu104__LeetCode-Solutions
Python/rectangle-area-ii.py
{ "start": 1002, "end": 1797 }
class ____(object): def rectangleArea(self, rectangles): """ :type rectangles: List[List[int]] :rtype: int """ OPEN, CLOSE = 1, -1 events = [] X = set() for x1, y1, x2, y2 in rectangles: events.append((y1, OPEN, x1, x2)) events.append((y2, CLOSE, x1, x2)) X.add(x1) X.add(x2) events.sort() X = sorted(X) Xi = {x: i for i, x in enumerate(X)} st = SegmentTreeNode(0, len(X)-1) result = 0 cur_x_sum = 0 cur_y = events[0][0] for y, typ, x1, x2 in events: result += cur_x_sum * (y-cur_y) cur_x_sum = st.update(X, Xi[x1], Xi[x2], typ) cur_y = y return result % (10**9+7)
Solution
python
TheAlgorithms__Python
data_structures/heap/binomial_heap.py
{ "start": 73, "end": 1268 }
class ____: """ Node in a doubly-linked binomial tree, containing: - value - size of left subtree - link to left, right and parent nodes """ def __init__(self, val): self.val = val # Number of nodes in left subtree self.left_tree_size = 0 self.left = None self.right = None self.parent = None def merge_trees(self, other): """ In-place merge of two binomial trees of equal size. Returns the root of the resulting tree """ assert self.left_tree_size == other.left_tree_size, "Unequal Sizes of Blocks" if self.val < other.val: other.left = self.right other.parent = None if self.right: self.right.parent = other self.right = other self.left_tree_size = self.left_tree_size * 2 + 1 return self else: self.left = other.right self.parent = None if other.right: other.right.parent = self other.right = self other.left_tree_size = other.left_tree_size * 2 + 1 return other
Node
python
neetcode-gh__leetcode
python/0513-find-bottom-left-tree-value.py
{ "start": 204, "end": 734 }
class ____: def findBottomLeftValue(self, root: Optional[TreeNode]) -> int: res = [] q = deque() q.append(root) while q: qlen = len(q) level = [] for i in range(qlen): node = q.popleft() if node: q.append(node.left) q.append(node.right) level.append(node.val) if level: res.append(level) return res[-1][0] # recursive
Solution
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 393452, "end": 395261 }
class ____(ExprNode): # An inlined generator expression for which the result is calculated # inside of the loop and returned as a single, first and only Generator # return value. # This will only be created by transforms when replacing safe builtin # calls on generator expressions. # # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes # orig_func String the name of the builtin function this node replaces # target ExprNode or None a 'target' for a ComprehensionAppend node subexprs = ["gen"] orig_func = None target = None is_temp = True type = py_object_type def __init__(self, pos, gen, comprehension_type=None, **kwargs): gbody = gen.def_node.gbody gbody.is_inlined = True if comprehension_type is not None: assert comprehension_type in (list_type, set_type, dict_type), comprehension_type gbody.inlined_comprehension_type = comprehension_type kwargs.update( target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname), type=comprehension_type, ) super().__init__(pos, gen=gen, **kwargs) def may_be_none(self): return self.orig_func not in ('any', 'all', 'sorted') def infer_type(self, env): return self.type def analyse_types(self, env): self.gen = self.gen.analyse_expressions(env) return self def generate_result_code(self, code): code.putln("%s = __Pyx_Generator_GetInlinedResult(%s); %s" % ( self.result(), self.gen.result(), code.error_goto_if_null(self.result(), self.pos))) self.generate_gotref(code)
InlinedGeneratorExpressionNode
python
numba__numba
numba/parfors/parfor_lowering_utils.py
{ "start": 170, "end": 5638 }
class ____: """Helper class for building Numba-IR and lowering for Parfor. """ def __init__(self, lowerer, scope, loc): self._lowerer = lowerer self._scope = scope self._loc = loc @property def _context(self): return self._lowerer.context @property def _typingctx(self): return self._context.typing_context @property def _typemap(self): return self._lowerer.fndesc.typemap @property def _calltypes(self): return self._lowerer.fndesc.calltypes def bind_global_function(self, fobj, ftype, args, kws=None): """Binds a global function to a variable. Parameters ---------- fobj : object The function to be bound. ftype : types.Type args : Sequence[types.Type] kws : Mapping[str, types.Type] Returns ------- callable: _CallableNode """ if kws is None: kws = {} loc = self._loc varname = f"{fobj.__name__}_func" gvname = f"{fobj.__name__}" func_sig = self._typingctx.resolve_function_type(ftype, args, kws) func_var = self.assign( rhs=ir.Global(gvname, fobj, loc=loc), typ=ftype, name=varname ) return _CallableNode(func=func_var, sig=func_sig) def make_const_variable(self, cval, typ, name="pf_const") -> ir.Var: """Makes a constant variable Parameters ---------- cval : object The constant value typ : types.Type type of the value name : str variable name to store to Returns ------- res : ir.Var """ return self.assign( rhs=ir.Const(cval, loc=self._loc), typ=typ, name=name ) def make_tuple_variable(self, varlist, name="pf_tuple") -> ir.Var: """Makes a tuple variable Parameters ---------- varlist : Sequence[ir.Var] Variables containing the values to be stored. name : str variable name to store to Returns ------- res : ir.Var """ loc = self._loc vartys = [self._typemap[x.name] for x in varlist] tupty = types.Tuple.from_types(vartys) return self.assign( rhs=ir.Expr.build_tuple(varlist, loc), typ=tupty, name=name ) def assign(self, rhs, typ, name="pf_assign") -> ir.Var: """Assign a value to a new variable Parameters ---------- rhs : object The value typ : types.Type type of the value name : str variable name to store to Returns ------- res : ir.Var """ loc = self._loc var = self._scope.redefine(name, loc) self._typemap[var.name] = typ assign = ir.Assign(rhs, var, loc) self._lowerer.lower_inst(assign) return var def assign_inplace(self, rhs, typ, name) -> ir.Var: """Assign a value to a new variable or inplace if it already exist Parameters ---------- rhs : object The value typ : types.Type type of the value name : str variable name to store to Returns ------- res : ir.Var """ loc = self._loc var = ir.Var(self._scope, name, loc) assign = ir.Assign(rhs, var, loc) self._typemap.setdefault(var.name, typ) self._lowerer.lower_inst(assign) return var def call(self, callable_node, args, kws=None) -> ir.Expr: """Call a bound callable Parameters ---------- callable_node : _CallableNode The callee args : Sequence[ir.Var] kws : Mapping[str, ir.Var] Returns ------- res : ir.Expr The expression node for the return value of the call """ if kws is None: kws = {} call = ir.Expr.call(callable_node.func, args, kws, loc=self._loc) self._calltypes[call] = callable_node.sig return call def setitem(self, obj, index, val) -> ir.SetItem: """Makes a setitem call Parameters ---------- obj : ir.Var the object being indexed index : ir.Var the index val : ir.Var the value to be stored Returns ------- res : ir.SetItem """ loc = self._loc tm = self._typemap setitem = ir.SetItem(obj, index, val, loc=loc) self._lowerer.fndesc.calltypes[setitem] = signature( types.none, tm[obj.name], tm[index.name], tm[val.name] ) self._lowerer.lower_inst(setitem) return setitem def getitem(self, obj, index, typ) -> ir.Expr: """Makes a getitem call Parameters ---------- obj : ir.Var the object being indexed index : ir.Var the index val : ir.Var the ty Returns ------- res : ir.Expr the retrieved value """ tm = self._typemap getitem = ir.Expr.getitem(obj, index, loc=self._loc) self._lowerer.fndesc.calltypes[getitem] = signature( typ, tm[obj.name], tm[index.name], ) return getitem
ParforLoweringBuilder
python
spyder-ide__spyder
external-deps/qtconsole/qtconsole/pygments_highlighter.py
{ "start": 3208, "end": 3744 }
class ____(QtGui.QTextBlockUserData): """ Storage for the user data associated with each line. """ syntax_stack = ('root',) def __init__(self, **kwds): for key, value in kwds.items(): setattr(self, key, value) QtGui.QTextBlockUserData.__init__(self) def __repr__(self): attrs = ['syntax_stack'] kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr)) for attr in attrs ]) return 'PygmentsBlockUserData(%s)' % kwds
PygmentsBlockUserData
python
PyCQA__pylint
tests/functional/d/dataclass/dataclass_with_default_factory.py
{ "start": 552, "end": 1118 }
class ____: """Test dataclass that uses a renamed import of dataclasses""" int_prop: int = dc.field(default=10) list_prop: list = dc.field(default_factory=list) dict_prop: dict = dc.field(default_factory=dict) TEST2 = Test2() for _ in TEST2.list_prop: # This is okay pass TEST2.dict_prop["key"] = "value" # This is okay # Test2.int_prop is inferred as 10, not a Field print(Test2.int_prop + 1) for _ in Test2.int_prop: # [not-an-iterable] pass Test2.int_prop["key"] = "value" # [unsupported-assignment-operation] @dc.dataclass
Test2
python
pallets__werkzeug
src/werkzeug/exceptions.py
{ "start": 5891, "end": 6196 }
class ____(HTTPException): """*400* `Bad Request` Raise if the browser sends something to the application the application or server cannot handle. """ code = 400 description = ( "The browser (or proxy) sent a request that this server could not understand." )
BadRequest
python
huggingface__transformers
src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
{ "start": 43394, "end": 51209 }
class ____(PreTrainedModel): config: OmDetTurboConfig base_model_prefix = "model" main_input_name = "pixel_values" input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): def linear_init_(module_to_init): bound = 1 / math.sqrt(module_to_init.weight.shape[0]) init.uniform_(module_to_init.weight, -bound, bound) if hasattr(module_to_init, "bias") and module_to_init.bias is not None: init.uniform_(module_to_init.bias, -bound, bound) if isinstance(module, OmDetTurboEncoderLayer): linear_init_(module.fc1) linear_init_(module.fc2) elif isinstance(module, OmDetTurboDecoder): init.constant_(module.encoder_bbox_head.layers[-1].weight, 0.0) init.constant_(module.encoder_bbox_head.layers[-1].bias, 0.0) for mlp in module.decoder_bbox_head: init.constant_(mlp.layers[-1].weight, 0.0) init.constant_(mlp.layers[-1].bias, 0.0) linear_init_(module.encoder_vision_features[0]) init.xavier_uniform_(module.encoder_vision_features[0].weight) if module.learn_initial_query: init.xavier_uniform_(module.tgt_embed.weight) init.xavier_uniform_(module.query_position_head.layers[0].weight) init.xavier_uniform_(module.query_position_head.layers[1].weight) for layer in module.channel_projection_layers: init.xavier_uniform_(layer[0].weight) elif isinstance(module, OmDetTurboLanguageBackbone): init.normal_(module.text_projection, std=self.config.text_projection_in_dim**-0.5) elif isinstance(module, (nn.Linear, nn.Conv2d)): init.normal_(module.weight, mean=0.0, std=self.config.init_std) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): init.ones_(module.weight) init.zeros_(module.bias) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, OmDetTurboDecoder): module.gradient_checkpointing = value @staticmethod def _get_cache_key_at_index(input_ids, attention_mask, index): input_ids = input_ids[index] input_mask = attention_mask[index] cache_key = tuple(input_ids[input_mask != 0].tolist()) return cache_key def get_cached_class_embeddings(self, classes_input_ids, classes_attention_mask): not_cached_index = [] not_cached_classes = [] total_embeddings = [] for idx, _ in enumerate(classes_input_ids): cache_key = self._get_cache_key_at_index(classes_input_ids, classes_attention_mask, idx) if self.language_cache_class.has(cache_key): total_embeddings.append(self.language_cache_class.get(cache_key)) else: total_embeddings.append(None) not_cached_index.append(idx) not_cached_classes.append(cache_key) if not_cached_classes: not_cached_classes_ids = torch.stack([classes_input_ids[idx] for idx in not_cached_index]) embeddings = self.language_backbone(not_cached_classes_ids, encode_type="class") for idx, emb in enumerate(embeddings): idx_to_put = not_cached_index[idx] total_embeddings[idx_to_put] = emb self.language_cache_class.put(not_cached_classes[idx], emb) total_class_embs = torch.stack(total_embeddings).to(self.device) return total_class_embs def get_cached_task_embeddings(self, tasks_input_ids, tasks_attention_mask): not_cached_index = [] not_cached_tasks = [] total_task_features = [] total_task_masks = [] for idx, _ in enumerate(tasks_input_ids): cache_key = self._get_cache_key_at_index(tasks_input_ids, tasks_attention_mask, idx) if self.language_cache_prompt.has(cache_key): task_feature, task_mask = self.language_cache_prompt.get(cache_key) total_task_features.append(task_feature) total_task_masks.append(task_mask) else: total_task_features.append(None) total_task_masks.append(None) not_cached_index.append(idx) not_cached_tasks.append(cache_key) if not_cached_tasks: not_cached_index_ids = torch.stack([tasks_input_ids[idx] for idx in not_cached_index]) not_cached_mask = torch.stack([tasks_attention_mask[idx] for idx in not_cached_index]) embeddings, masks = self.language_backbone(not_cached_index_ids, mask=not_cached_mask, encode_type="task") for idx in range(embeddings.shape[1]): emb = embeddings[:, [idx], :] idx_to_put = not_cached_index[idx] cur_mask = torch.unsqueeze(masks[idx], dim=0).to(self.device) total_task_features[idx_to_put] = emb total_task_masks[idx_to_put] = cur_mask self.language_cache_prompt.put(not_cached_tasks[idx], (emb, cur_mask)) # pad before concat if needed max_len = max(task.shape[0] for task in total_task_features) for idx, task in enumerate(total_task_features): if task.shape[0] < max_len: pad_size = max_len - task.shape[0] total_task_features[idx] = F.pad(task, (0, 0, 0, 0, 0, pad_size)) total_task_masks[idx] = F.pad(total_task_masks[idx], (0, pad_size)) total_task_features = torch.cat(total_task_features, dim=1).to(self.device) total_task_masks = torch.cat(total_task_masks, dim=0).to(self.device) return total_task_features, total_task_masks def get_language_embedding( self, classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure, ): batched_classes_embeddings = self.get_cached_class_embeddings(classes_input_ids, classes_attention_mask) # regroup class embeddings using saved structure max_class_size = torch.max(classes_structure) class_embeddings_regrouped = [] start = 0 for size in classes_structure: pad_size = max_class_size - size class_embeddings_regrouped.append( F.pad(batched_classes_embeddings[start : start + size], (0, 0, 0, pad_size)).unsqueeze(1) ) start += size class_embeddings = torch.cat(class_embeddings_regrouped, dim=1) task_embeddings, task_mask = self.get_cached_task_embeddings(tasks_input_ids, tasks_attention_mask) return class_embeddings, task_embeddings, task_mask def _cosine_similarity_scaled(a, b, logit_scale): a = a / a.norm(dim=2, keepdim=True).clamp_min(1e-12) b = b / b.norm(dim=1, keepdim=True).clamp_min(1e-12) logit_scale = logit_scale.exp() logits_per_image = logit_scale * torch.bmm(a, b) return logits_per_image def get_class_similarity(class_distance_type, cls_feature, class_proj): logit_scale = torch.tensor(1 / 0.07).log() if class_distance_type == "cosine": class_logits = _cosine_similarity_scaled(cls_feature, class_proj, logit_scale) elif class_distance_type == "dot": class_logits = torch.bmm(cls_feature, class_proj) else: raise Exception(f"Unknown class_distance_type {class_distance_type}") return class_logits def _inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2)
OmDetTurboPreTrainedModel
python
pytorch__pytorch
test/dynamo/test_repros.py
{ "start": 29300, "end": 29368 }
class ____: def __init__(self, x): self.x = x + 1
IncByOne
python
pallets__werkzeug
src/werkzeug/exceptions.py
{ "start": 21837, "end": 22845 }
class ____(HTTPException): """*500* `Internal Server Error` Raise if an internal server error occurred. This is a good fallback if an unknown error occurred in the dispatcher. .. versionchanged:: 1.0.0 Added the :attr:`original_exception` attribute. """ code = 500 description = ( "The server encountered an internal error and was unable to" " complete your request. Either the server is overloaded or" " there is an error in the application." ) def __init__( self, description: str | None = None, response: SansIOResponse | None = None, original_exception: BaseException | None = None, ) -> None: #: The original exception that caused this 500 error. Can be #: used by frameworks to provide context when handling #: unexpected errors. self.original_exception = original_exception super().__init__(description=description, response=response)
InternalServerError
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/internal/conjecture/junkdrawer.py
{ "start": 5671, "end": 9013 }
class ____(Generic[T]): """A "copy" of a sequence that works by inserting a mask in front of the underlying sequence, so that you can mutate it without changing the underlying sequence. Effectively behaves as if you could do list(x) in O(1) time. The full list API is not supported yet but there's no reason in principle it couldn't be.""" def __init__(self, values: Sequence[T]): self.__values = values self.__len = len(values) self.__mask: dict[int, T] | None = None self.__popped_indices: SortedList[int] | None = None def __len__(self) -> int: if self.__popped_indices is None: return self.__len return self.__len - len(self.__popped_indices) def pop(self, i: int = -1) -> T: if len(self) == 0: raise IndexError("Cannot pop from empty list") i = self.__underlying_index(i) v = None if self.__mask is not None: v = self.__mask.pop(i, None) if v is None: v = self.__values[i] if self.__popped_indices is None: self.__popped_indices = SortedList() self.__popped_indices.add(i) return v def swap(self, i: int, j: int) -> None: """Swap the elements ls[i], ls[j].""" if i == j: return self[i], self[j] = self[j], self[i] def __getitem__(self, i: int) -> T: i = self.__underlying_index(i) default = self.__values[i] if self.__mask is None: return default else: return self.__mask.get(i, default) def __setitem__(self, i: int, v: T) -> None: i = self.__underlying_index(i) if self.__mask is None: self.__mask = {} self.__mask[i] = v def __underlying_index(self, i: int) -> int: n = len(self) if i < -n or i >= n: raise IndexError(f"Index {i} out of range [0, {n})") if i < 0: i += n assert 0 <= i < n if self.__popped_indices is not None: # given an index i in the popped representation of the list, compute # its corresponding index in the underlying list. given # l = [1, 4, 2, 10, 188] # l.pop(3) # l.pop(1) # assert l == [1, 2, 188] # # we want l[i] == self.__values[f(i)], where f is this function. assert len(self.__popped_indices) <= len(self.__values) for idx in self.__popped_indices: if idx > i: break i += 1 return i # even though we have len + getitem, mypyc requires iter. def __iter__(self) -> Iterable[T]: for i in range(len(self)): yield self[i] def stack_depth_of_caller() -> int: """Get stack size for caller's frame. From https://stackoverflow.com/a/47956089/9297601 , this is a simple but much faster alternative to `len(inspect.stack(0))`. We use it with get/set recursionlimit to make stack overflows non-flaky; see https://github.com/HypothesisWorks/hypothesis/issues/2494 for details. """ frame = sys._getframe(2) size = 1 while frame: frame = frame.f_back # type: ignore[assignment] size += 1 return size
LazySequenceCopy
python
google__pytype
pytype/tests/test_dataclass_transform.py
{ "start": 97, "end": 997 }
class ____(test_base.BaseTest): """Tests for the @dataclass_transform decorator.""" def test_invalid_target(self): self.CheckWithErrors(""" from typing_extensions import dataclass_transform x = 10 dataclass_transform()(x) # dataclass-error """) def test_args(self): self.CheckWithErrors(""" from typing_extensions import dataclass_transform dataclass_transform(eq_default=True) # not-supported-yet def f(cls): return cls """) def test_pyi_args(self): # Args directly in a pyi file are silently ignored. with self.DepTree([( "foo.pyi", """ from typing import dataclass_transform @dataclass_transform(eq_default=True) def dc(cls): ... """, )]): self.Check(""" import foo @foo.dc class A: x: int a = A(x=10) """)
TestDecorator
python
tensorflow__tensorflow
tensorflow/python/data/experimental/ops/cardinality.py
{ "start": 3824, "end": 4578 }
class ____(dataset_ops.UnaryUnchangedStructureDataset): """A `Dataset` that assert the cardinality of its input.""" def __init__(self, input_dataset, expected_cardinality): self._input_dataset = input_dataset self._expected_cardinality = ops.convert_to_tensor( expected_cardinality, dtype=dtypes.int64, name="expected_cardinality") # pylint: enable=protected-access variant_tensor = ged_ops.assert_cardinality_dataset( self._input_dataset._variant_tensor, # pylint: disable=protected-access self._expected_cardinality, **self._flat_structure) super(_AssertCardinalityDataset, self).__init__(input_dataset, variant_tensor)
_AssertCardinalityDataset
python
pypa__warehouse
tests/unit/test_tasks.py
{ "start": 12034, "end": 17320 }
class ____: def test_gets_task(self): task_func = pretend.stub(__name__="task_func", __module__="tests.foo") task_obj = pretend.stub() celery_app = pretend.stub( gen_task_name=lambda func, module: module + "." + func, tasks={"tests.foo.task_func": task_obj}, ) assert tasks._get_task(celery_app, task_func) is task_obj def test_get_task_via_request(self): task_func = pretend.stub(__name__="task_func", __module__="tests.foo") task_obj = pretend.stub() celery_app = pretend.stub( gen_task_name=lambda func, module: module + "." + func, tasks={"tests.foo.task_func": task_obj}, ) request = pretend.stub(registry={"celery.app": celery_app}) get_task = tasks._get_task_from_request(request) assert get_task(task_func) is task_obj def test_get_task_via_config(self): task_func = pretend.stub(__name__="task_func", __module__="tests.foo") task_obj = pretend.stub() celery_app = pretend.stub( gen_task_name=lambda func, module: module + "." + func, tasks={"tests.foo.task_func": task_obj}, ) config = pretend.stub(registry={"celery.app": celery_app}) assert tasks._get_task_from_config(config, task_func) def test_add_periodic_task(): signature = pretend.stub() task_obj = pretend.stub(s=lambda: signature) celery_app = pretend.stub( add_periodic_task=pretend.call_recorder(lambda *a, **k: None) ) actions = [] config = pretend.stub( action=pretend.call_recorder(lambda d, f, order: actions.append(f)), registry={"celery.app": celery_app}, task=pretend.call_recorder(lambda t: task_obj), ) schedule = pretend.stub() func = pretend.stub() tasks._add_periodic_task(config, schedule, func) for action in actions: action() assert config.action.calls == [pretend.call(None, mock.ANY, order=100)] assert config.task.calls == [pretend.call(func)] assert celery_app.add_periodic_task.calls == [ pretend.call(schedule, signature, args=(), kwargs=(), name=None) ] def test_make_celery_app(): celery_app = pretend.stub() config = pretend.stub(registry={"celery.app": celery_app}) assert tasks._get_celery_app(config) is celery_app @pytest.mark.parametrize( ( "env", "ssl", "broker_redis_url", "expected_url", "transport_options", ), [ ( Environment.production, True, "redis://127.0.0.1:6379/10", "redis://127.0.0.1:6379/10", {}, ), ( Environment.production, True, ( "rediss://user:pass@redis.example.com:6379/10" "?socket_timeout=5&irreleveant=0" "&ssl_cert_reqs=required&ssl_ca_certs=/p/a/t/h/cacert.pem" ), ( "rediss://user:pass@redis.example.com:6379/10" "?ssl_cert_reqs=required&ssl_ca_certs=/p/a/t/h/cacert.pem" ), { "socket_timeout": 5, }, ), ], ) def test_includeme(env, ssl, broker_redis_url, expected_url, transport_options): registry_dict = {} config = pretend.stub( action=pretend.call_recorder(lambda *a, **kw: None), add_directive=pretend.call_recorder(lambda *a, **kw: None), add_request_method=pretend.call_recorder(lambda *a, **kw: None), registry=pretend.stub( __getitem__=registry_dict.__getitem__, __setitem__=registry_dict.__setitem__, settings={ "warehouse.env": env, "celery.broker_redis_url": broker_redis_url, "celery.result_url": pretend.stub(), "celery.scheduler_url": pretend.stub(), }, ), ) tasks.includeme(config) app = config.registry["celery.app"] assert app.Task is tasks.WarehouseTask assert app.pyramid_config is config for key, value in { "broker_transport_options": transport_options, "broker_url": expected_url, "broker_use_ssl": ssl, "worker_disable_rate_limits": True, "task_default_queue": "default", "task_default_routing_key": "task.default", "task_serializer": "json", "accept_content": ["json", "msgpack"], "task_queue_ha_policy": "all", "task_queues": (Queue("default", routing_key="task.#"),), "task_routes": {}, "REDBEAT_REDIS_URL": (config.registry.settings["celery.scheduler_url"]), }.items(): assert app.conf[key] == value assert config.action.calls == [pretend.call(("celery", "finalize"), app.finalize)] assert config.add_directive.calls == [ pretend.call("add_periodic_task", tasks._add_periodic_task, action_wrap=False), pretend.call("make_celery_app", tasks._get_celery_app, action_wrap=False), pretend.call("task", tasks._get_task_from_config, action_wrap=False), ] assert config.add_request_method.calls == [ pretend.call(tasks._get_task_from_request, name="task", reify=True) ]
TestCeleryTaskGetter
python
joke2k__faker
tests/test_generator.py
{ "start": 487, "end": 5224 }
class ____: """Test Generator class""" def test_get_formatter_returns_correct_formatter(self, generator): foo_provider = generator.providers[0] formatter = generator.get_formatter("foo_formatter") assert callable(formatter) and formatter == foo_provider.foo_formatter def test_get_formatter_with_unknown_formatter(self, generator): with pytest.raises(AttributeError) as excinfo: generator.get_formatter("barFormatter") assert str(excinfo.value) == "Unknown formatter 'barFormatter'" fake = Faker("it_IT") with pytest.raises(AttributeError) as excinfo: fake.get_formatter("barFormatter") assert str(excinfo.value) == "Unknown formatter 'barFormatter' with locale 'it_IT'" def test_format_calls_formatter_on_provider(self, generator): assert generator.format("foo_formatter") == "foobar" def test_format_passes_arguments_to_formatter(self, generator): result = generator.format("foo_formatter_with_arguments", "foo", append="!") assert result == "bazfoo!" def test_add_provider_overrides_old_provider(self, generator): assert generator.format("foo_formatter") == "foobar" generator.add_provider(BarProvider()) assert generator.format("foo_formatter") == "barfoo" def test_parse_without_formatter_tokens(self, generator): assert generator.parse("fooBar#?") == "fooBar#?" def test_parse_with_valid_formatter_tokens(self, generator): result = generator.parse('This is {{foo_formatter}} a text with "{{ foo_formatter }}"') assert result == 'This is foobar a text with "foobar"' def test_arguments_group_with_values(self, generator): generator.set_arguments("group1", "argument1", 1) generator.set_arguments("group1", "argument2", 2) assert generator.get_arguments("group1", "argument1") == 1 assert generator.del_arguments("group1", "argument2") == 2 assert generator.get_arguments("group1", "argument2") is None assert generator.get_arguments("group1") == {"argument1": 1} def test_arguments_group_with_dictionaries(self, generator): generator.set_arguments("group2", {"argument1": 3, "argument2": 4}) assert generator.get_arguments("group2") == {"argument1": 3, "argument2": 4} assert generator.del_arguments("group2") == {"argument1": 3, "argument2": 4} assert generator.get_arguments("group2") is None def test_arguments_group_with_invalid_name(self, generator): assert generator.get_arguments("group3") is None assert generator.del_arguments("group3") is None def test_arguments_group_with_invalid_argument_type(self, generator): with pytest.raises(ValueError) as excinfo: generator.set_arguments("group", ["foo", "bar"]) assert str(excinfo.value) == "Arguments must be either a string or dictionary" def test_parse_with_valid_formatter_arguments(self, generator): generator.set_arguments("format_name", {"param": "foo", "append": "bar"}) result = generator.parse('This is "{{foo_formatter_with_arguments:format_name}}"') generator.del_arguments("format_name") assert result == 'This is "bazfoobar"' def test_parse_with_unknown_arguments_group(self, generator): with pytest.raises(AttributeError) as excinfo: generator.parse('This is "{{foo_formatter_with_arguments:unknown}}"') assert str(excinfo.value) == "Unknown argument group 'unknown'" def test_parse_with_unknown_formatter_token(self, generator): with pytest.raises(AttributeError) as excinfo: generator.parse("{{barFormatter}}") assert str(excinfo.value) == "Unknown formatter 'barFormatter'" def test_magic_call_calls_format(self, generator): assert generator.foo_formatter() == "foobar" def test_magic_call_calls_format_with_arguments(self, generator): assert generator.foo_formatter_with_arguments("foo") == "bazfoo" @patch("faker.generator.random_module.getstate") def test_get_random(self, mock_system_random, generator): random_instance = generator.random random_instance.getstate() mock_system_random.assert_not_called() @patch("faker.generator.random_module.seed") def test_random_seed_doesnt_seed_system_random(self, mock_system_random, generator): # Save original state of shared random instance to avoid affecting other tests state = generator.random.getstate() generator.seed(0) mock_system_random.assert_not_called() # Restore state of shared random instance generator.random.setstate(state)
TestGenerator
python
dagster-io__dagster
python_modules/libraries/dagster-k8s/dagster_k8s/client.py
{ "start": 2856, "end": 8627 }
class ____(ApiClient): # Forked from ApiClient implementation to pass configuration object down into created model # objects, avoiding lock contention issues. See https://github.com/kubernetes-client/python/issues/2284 # Intentionally circumventing private name mangling # (https://docs.python.org/3/reference/expressions.html#private-name-mangling) of the __deserialize_model method on ApiClient def _ApiClient__deserialize_model(self, data, klass): """Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object. """ if not klass.openapi_types and not hasattr(klass, "get_real_child_model"): return data # Below is the only change from the base ApiClient implementation - pass through the # Configuration object to each newly created model so that each one does not have to create # one and acquire a lock kwargs = {"local_vars_configuration": self.configuration} if data is not None and klass.openapi_types is not None and isinstance(data, (list, dict)): for attr, attr_type in six.iteritems(klass.openapi_types): if klass.attribute_map[attr] in data: value = data[klass.attribute_map[attr]] kwargs[attr] = self._ApiClient__deserialize(value, attr_type) instance = klass(**kwargs) if hasattr(instance, "get_real_child_model"): klass_name = instance.get_real_child_model(data) if klass_name: instance = self._ApiClient__deserialize(data, klass_name) return instance def k8s_api_retry( fn: Callable[..., T], max_retries: int, timeout: float, msg_fn=lambda: "Unexpected error encountered in Kubernetes API Client.", ) -> T: check.callable_param(fn, "fn") check.int_param(max_retries, "max_retries") check.numeric_param(timeout, "timeout") remaining_attempts = 1 + max_retries while remaining_attempts > 0: remaining_attempts -= 1 try: return fn() except kubernetes.client.rest.ApiException as e: # Only catch whitelisted ApiExceptions status = e.status # Check if the status code is generally whitelisted whitelisted = status in WHITELISTED_TRANSIENT_K8S_STATUS_CODES # If there are remaining attempts, swallow the error if whitelisted and remaining_attempts > 0: time.sleep(timeout) elif whitelisted and remaining_attempts == 0: raise DagsterK8sAPIRetryLimitExceeded( msg_fn(), k8s_api_exception=e, max_retries=max_retries, original_exc_info=sys.exc_info(), ) from e else: raise DagsterK8sUnrecoverableAPIError( msg_fn(), k8s_api_exception=e, original_exc_info=sys.exc_info(), ) from e check.failed("Unreachable.") def k8s_api_retry_creation_mutation( fn: Callable[..., None], max_retries: int, timeout: float, msg_fn=lambda: "Unexpected error encountered in Kubernetes API Client.", ) -> None: """Like k8s_api_retry, but ensures idempotence by allowing a 409 error after a failure, which indicates that the desired mutation actually went through. Also has an empty return type since we can't guarantee on being able to return anything as a result of this case. """ check.callable_param(fn, "fn") check.int_param(max_retries, "max_retries") check.numeric_param(timeout, "timeout") remaining_attempts = 1 + max_retries retry_count = 0 while remaining_attempts > 0: remaining_attempts -= 1 try: fn() return except kubernetes.client.rest.ApiException as e: retry_count = retry_count + 1 # Only catch whitelisted ApiExceptions status = e.status # 409 (Conflict) here indicates that hte object actually was created # during a previous attempt, despite logging a failure if retry_count > 1 and status == 409: return # Check if the status code is generally whitelisted whitelisted = status in WHITELISTED_TRANSIENT_K8S_STATUS_CODES # If there are remaining attempts, swallow the error if whitelisted and remaining_attempts > 0: time.sleep(timeout) elif whitelisted and remaining_attempts == 0: raise DagsterK8sAPIRetryLimitExceeded( msg_fn(), k8s_api_exception=e, max_retries=max_retries, original_exc_info=sys.exc_info(), ) from e else: raise DagsterK8sUnrecoverableAPIError( msg_fn(), k8s_api_exception=e, original_exc_info=sys.exc_info(), ) from e except urllib3.exceptions.HTTPError as e: # Temporary for recovery detection logger.error( f"k8s_api_retry_creation_mutation: {e.__module__}.{e.__class__.__name__}: {e!s}" ) if remaining_attempts > 0: time.sleep(timeout) else: raise DagsterK8sAPIRetryLimitExceeded( msg_fn(), k8s_api_exception=e, max_retries=max_retries, original_exc_info=sys.exc_info(), ) from e check.failed("Unreachable.")
PatchedApiClient
python
MongoEngine__mongoengine
mongoengine/fields.py
{ "start": 70971, "end": 75982 }
class ____(BaseField): """Provides a sequential counter see: https://www.mongodb.com/docs/manual/reference/method/ObjectId/#ObjectIDs-SequenceNumbers .. note:: Although traditional databases often use increasing sequence numbers for primary keys. In MongoDB, the preferred approach is to use Object IDs instead. The concept is that in a very large cluster of machines, it is easier to create an object ID than have global, uniformly increasing sequence numbers. :param collection_name: Name of the counter collection (default 'mongoengine.counters') :param sequence_name: Name of the sequence in the collection (default 'ClassName.counter') :param value_decorator: Any callable to use as a counter (default int) Use any callable as `value_decorator` to transform calculated counter into any value suitable for your needs, e.g. string or hexadecimal representation of the default integer counter value. .. note:: In case the counter is defined in the abstract document, it will be common to all inherited documents and the default sequence name will be the class name of the abstract document. """ _auto_gen = True COLLECTION_NAME = "mongoengine.counters" VALUE_DECORATOR = int def __init__( self, collection_name=None, db_alias=None, sequence_name=None, value_decorator=None, *args, **kwargs, ): self.collection_name = collection_name or self.COLLECTION_NAME self.db_alias = db_alias or DEFAULT_CONNECTION_NAME self.sequence_name = sequence_name self.value_decorator = ( value_decorator if callable(value_decorator) else self.VALUE_DECORATOR ) super().__init__(*args, **kwargs) def generate(self): """ Generate and Increment the counter """ sequence_name = self.get_sequence_name() sequence_id = f"{sequence_name}.{self.name}" collection = get_db(alias=self.db_alias)[self.collection_name] counter = collection.find_one_and_update( filter={"_id": sequence_id}, update={"$inc": {"next": 1}}, return_document=ReturnDocument.AFTER, upsert=True, session=_get_session(), ) return self.value_decorator(counter["next"]) def set_next_value(self, value): """Helper method to set the next sequence value""" sequence_name = self.get_sequence_name() sequence_id = f"{sequence_name}.{self.name}" collection = get_db(alias=self.db_alias)[self.collection_name] counter = collection.find_one_and_update( filter={"_id": sequence_id}, update={"$set": {"next": value}}, return_document=ReturnDocument.AFTER, upsert=True, session=_get_session(), ) return self.value_decorator(counter["next"]) def get_next_value(self): """Helper method to get the next value for previewing. .. warning:: There is no guarantee this will be the next value as it is only fixed on set. """ sequence_name = self.get_sequence_name() sequence_id = f"{sequence_name}.{self.name}" collection = get_db(alias=self.db_alias)[self.collection_name] data = collection.find_one({"_id": sequence_id}, session=_get_session()) if data: return self.value_decorator(data["next"] + 1) return self.value_decorator(1) def get_sequence_name(self): if self.sequence_name: return self.sequence_name owner = self.owner_document if issubclass(owner, Document) and not owner._meta.get("abstract"): return owner._get_collection_name() else: return ( "".join("_%s" % c if c.isupper() else c for c in owner._class_name) .strip("_") .lower() ) def __get__(self, instance, owner): value = super().__get__(instance, owner) if value is None and instance._initialised: value = self.generate() instance._data[self.name] = value instance._mark_as_changed(self.name) return value def __set__(self, instance, value): if value is None and instance._initialised: value = self.generate() return super().__set__(instance, value) def prepare_query_value(self, op, value): """ This method is overridden in order to convert the query value into to required type. We need to do this in order to be able to successfully compare query values passed as string, the base implementation returns the value as is. """ return self.value_decorator(value) def to_python(self, value): if value is None: value = self.generate() return value
SequenceField
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/message_count_tokens_params.py
{ "start": 2364, "end": 10063 }
class ____(TypedDict, total=False): messages: Required[Iterable[BetaMessageParam]] """Input messages. Our models are trained to operate on alternating `user` and `assistant` conversational turns. When creating a new `Message`, you specify the prior conversational turns with the `messages` parameter, and the model then generates the next `Message` in the conversation. Consecutive `user` or `assistant` turns in your request will be combined into a single turn. Each input message must be an object with a `role` and `content`. You can specify a single `user`-role message, or you can include multiple `user` and `assistant` messages. If the final message uses the `assistant` role, the response content will continue immediately from the content in that message. This can be used to constrain part of the model's response. Example with a single `user` message: ```json [{ "role": "user", "content": "Hello, Claude" }] ``` Example with multiple conversational turns: ```json [ { "role": "user", "content": "Hello there." }, { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, { "role": "user", "content": "Can you explain LLMs in plain English?" } ] ``` Example with a partially-filled response from Claude: ```json [ { "role": "user", "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" }, { "role": "assistant", "content": "The best answer is (" } ] ``` Each input message `content` may be either a single `string` or an array of content blocks, where each block has a specific `type`. Using a `string` for `content` is shorthand for an array of one content block of type `"text"`. The following input messages are equivalent: ```json { "role": "user", "content": "Hello, Claude" } ``` ```json { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } ``` See [input examples](https://docs.claude.com/en/api/messages-examples). Note that if you want to include a [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the top-level `system` parameter — there is no `"system"` role for input messages in the Messages API. There is a limit of 100,000 messages in a single request. """ model: Required[ModelParam] """ The model that will complete your prompt.\n\nSee [models](https://docs.anthropic.com/en/docs/models-overview) for additional details and options. """ context_management: Optional[BetaContextManagementConfigParam] """Context management configuration. This allows you to control how Claude manages context across multiple requests, such as whether to clear function results or not. """ mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] """MCP servers to be utilized in this request""" output_config: BetaOutputConfigParam """Configuration options for the model's output. Controls aspects like how much effort the model puts into its response. """ output_format: Optional[BetaJSONOutputFormatParam] """A schema to specify Claude's output format in responses.""" system: Union[str, Iterable[BetaTextBlockParam]] """System prompt. A system prompt is a way of providing context and instructions to Claude, such as specifying a particular goal or role. See our [guide to system prompts](https://docs.claude.com/en/docs/system-prompts). """ thinking: BetaThinkingConfigParam """Configuration for enabling Claude's extended thinking. When enabled, responses include `thinking` content blocks showing Claude's thinking process before the final answer. Requires a minimum budget of 1,024 tokens and counts towards your `max_tokens` limit. See [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking) for details. """ tool_choice: BetaToolChoiceParam """How the model should use the provided tools. The model can use a specific tool, any available tool, decide by itself, or not use tools at all. """ tools: Iterable[Tool] """Definitions of tools that the model may use. If you include `tools` in your API request, the model may return `tool_use` content blocks that represent the model's use of those tools. You can then run those tools using the tool input generated by the model and then optionally return results back to the model using `tool_result` content blocks. There are two types of tools: **client tools** and **server tools**. The behavior described below applies to client tools. For [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools), see their individual documentation as each has its own behavior (e.g., the [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)). Each tool definition includes: - `name`: Name of the tool. - `description`: Optional, but strongly-recommended description of the tool. - `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the tool `input` shape that the model will produce in `tool_use` output content blocks. For example, if you defined `tools` as: ```json [ { "name": "get_stock_price", "description": "Get the current stock price for a given ticker symbol.", "input_schema": { "type": "object", "properties": { "ticker": { "type": "string", "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." } }, "required": ["ticker"] } } ] ``` And then asked the model "What's the S&P 500 at today?", the model might produce `tool_use` content blocks in the response like this: ```json [ { "type": "tool_use", "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", "name": "get_stock_price", "input": { "ticker": "^GSPC" } } ] ``` You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an input, and return the following back to the model in a subsequent `user` message: ```json [ { "type": "tool_result", "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", "content": "259.75 USD" } ] ``` Tools can be used for workflows that include running client-side tools and functions, or more generally whenever you want the model to produce a particular JSON structure of output. See our [guide](https://docs.claude.com/en/docs/tool-use) for more details. """ betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")] """Optional header to specify the beta version(s) you want to use.""" Tool: TypeAlias = Union[ BetaToolParam, BetaToolBash20241022Param, BetaToolBash20250124Param, BetaCodeExecutionTool20250522Param, BetaCodeExecutionTool20250825Param, BetaToolComputerUse20241022Param, BetaMemoryTool20250818Param, BetaToolComputerUse20250124Param, BetaToolTextEditor20241022Param, BetaToolComputerUse20251124Param, BetaToolTextEditor20250124Param, BetaToolTextEditor20250429Param, BetaToolTextEditor20250728Param, BetaWebSearchTool20250305Param, BetaWebFetchTool20250910Param, BetaToolSearchToolBm25_20251119Param, BetaToolSearchToolRegex20251119Param, BetaMCPToolsetParam, ]
MessageCountTokensParams
python
doocs__leetcode
solution/1500-1599/1544.Make The String Great/Solution.py
{ "start": 0, "end": 254 }
class ____: def makeGood(self, s: str) -> str: stk = [] for c in s: if not stk or abs(ord(stk[-1]) - ord(c)) != 32: stk.append(c) else: stk.pop() return "".join(stk)
Solution
python
python-markdown__markdown
tests/test_syntax/inline/test_autolinks.py
{ "start": 781, "end": 2689 }
class ____(TestCase): def test_email_address(self): self.assertMarkdownRenders( 'asdfasdfadsfasd <yuri@freewisdom.org> or you can say ', '<p>asdfasdfadsfasd <a href="&#109;&#97;&#105;&#108;&#116;&#111;&#58;&#121;&#117;&#114;' '&#105;&#64;&#102;&#114;&#101;&#101;&#119;&#105;&#115;&#100;&#111;&#109;&#46;&#111;&#114;' '&#103;">&#121;&#117;&#114;&#105;&#64;&#102;&#114;&#101;&#101;&#119;&#105;&#115;&#100;' '&#111;&#109;&#46;&#111;&#114;&#103;</a> or you can say </p>' ) def test_mailto_email_address(self): self.assertMarkdownRenders( 'instead <mailto:yuri@freewisdom.org>', '<p>instead <a href="&#109;&#97;&#105;&#108;&#116;&#111;&#58;&#121;&#117;&#114;&#105;&#64;' '&#102;&#114;&#101;&#101;&#119;&#105;&#115;&#100;&#111;&#109;&#46;&#111;&#114;&#103;">' '&#121;&#117;&#114;&#105;&#64;&#102;&#114;&#101;&#101;&#119;&#105;&#115;&#100;&#111;&#109;' '&#46;&#111;&#114;&#103;</a></p>' ) def test_email_address_with_ampersand(self): self.assertMarkdownRenders( '<bob&sue@example.com>', '<p><a href="&#109;&#97;&#105;&#108;&#116;&#111;&#58;&#98;&#111;&#98;&#38;&#115;&#117;&#101;' '&#64;&#101;&#120;&#97;&#109;&#112;&#108;&#101;&#46;&#99;&#111;&#109;">&#98;&#111;&#98;&amp;' '&#115;&#117;&#101;&#64;&#101;&#120;&#97;&#109;&#112;&#108;&#101;&#46;&#99;&#111;&#109;</a></p>' ) def test_invalid_email_address_local_part(self): self.assertMarkdownRenders( 'Missing local-part <@domain>', '<p>Missing local-part &lt;@domain&gt;</p>' ) def test_invalid_email_address_domain(self): self.assertMarkdownRenders( 'Missing domain <local-part@>', '<p>Missing domain &lt;local-part@&gt;</p>' )
TestAutomaticLinks
python
viewflow__viewflow
viewflow/workflow/flow/viewset.py
{ "start": 423, "end": 5495 }
class ____(metaclass=ViewsetMeta): """Common Views for Flow and FlowApp viewsets""" def __init__(self, flow_class, **kwargs): super().__init__(**kwargs) self._flow_class = flow_class def filter_kwargs(self, view_class, **kwargs): return super().filter_kwargs( view_class, **{"flow_class": self._flow_class, "viewset": self, **kwargs} ) """ Permissions """ def has_view_permission(self, user, obj=None): return self._flow_class.instance.has_view_permission(user) """ Dashboard """ dashboard_view_class = views.DashboardView def get_dashboard_view_kwargs(self, **kwargs): return self.filter_kwargs(self.dashboard_view_class, **kwargs) @viewprop def dashboard_view(self): return self.dashboard_view_class.as_view(**self.get_dashboard_view_kwargs()) @property def index_path(self): return path("", self.dashboard_view, name="index") """ Process list """ process_list_view_class = views.DashboardProcessListView def get_process_list_view_kwargs(self, **kwargs): return self.filter_kwargs(self.process_list_view_class, **kwargs) @viewprop def process_list_view(self): return self.process_list_view_class.as_view( **self.get_process_list_view_kwargs() ) @property def process_list_path(self): return path("flows/", self.process_list_view, name="process_list") """ Process detail """ process_detail_view_class = views.DetailProcessView def get_process_detail_view_kwargs(self, **kwargs): return self.filter_kwargs(self.process_detail_view_class, **kwargs) @viewprop def process_detail_view(self): return self.process_detail_view_class.as_view( **self.get_process_detail_view_kwargs() ) @property def process_detail_path(self): return path( "<int:process_pk>/", self.process_detail_view, name="process_detail" ) """ Process cancel """ process_cancel_view_class = views.CancelProcessView def get_process_cancel_view_kwargs(self, **kwargs): return self.filter_kwargs(self.process_cancel_view_class, **kwargs) @viewprop def process_cancel_view(self): return self.process_cancel_view_class.as_view( **self.get_process_cancel_view_kwargs() ) @property def process_cancel_path(self): return path( "<int:process_pk>/cancel/", self.process_cancel_view, name="process_cancel" ) """ Task list """ task_list_view_class = views.DashboardTaskListView def get_task_list_view_kwargs(self, **kwargs): return self.filter_kwargs(self.task_list_view_class, **kwargs) @viewprop def task_list_view(self): return self.task_list_view_class.as_view(**self.get_task_list_view_kwargs()) @property def task_list_path(self): return path("tasks/", self.task_list_view, name="task_list") """ Chart View """ chart_view_class = views.FlowChartView def get_chart_view_kwargs(self, **kwargs): return self.filter_kwargs(self.chart_view_class, **kwargs) @viewprop def chart_view(self): return self.chart_view_class.as_view(**self.get_chart_view_kwargs()) @property def chart_path(self): return path("chart/", self.chart_view, name="chart") @property def flow_chart_path(self): return path("<int:process_pk>/chart/", self.chart_view, name="process_chart") """ Utils """ def get_success_url(self, request): if not hasattr(request, "activation"): return self.reverse("index") if "_continue" in request.POST: manager = self._flow_class.task_class._default_manager next_user_task = request.activation.task if next_user_task.status == STATUS.DONE: next_user_task = manager.next_user_task( request.activation.process, request.user, ) if next_user_task: return next_user_task.flow_task.reverse( "index", args=[next_user_task.process_id, next_user_task.pk] ) else: return self.reverse( "process_detail", args=[request.activation.process.pk] ) if "back" in request.GET: back_url = request.GET["back"] if not is_safe_url(url=back_url, allowed_hosts={request.get_host()}): back_url = "/" return back_url if hasattr(request, "session") and "vf-pin-location" in request.session: back_url = request.session.get("vf-pin-location", "") if not is_safe_url(url=back_url, allowed_hosts={request.get_host()}): back_url = "/" return back_url return self.reverse("process_detail", args=[request.activation.process.pk])
BaseFlowViewsMixin
python
doocs__leetcode
solution/0700-0799/0766.Toeplitz Matrix/Solution.py
{ "start": 0, "end": 298 }
class ____: def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool: m, n = len(matrix), len(matrix[0]) for i in range(1, m): for j in range(1, n): if matrix[i][j] != matrix[i - 1][j - 1]: return False return True
Solution
python
walkccc__LeetCode
solutions/3330. Find the Original Typed String I/3330.py
{ "start": 0, "end": 150 }
class ____: def possibleStringCount(self, word: str) -> int: return 1 + sum(a == b for a, b in itertools.pairwise(word))
Solution
python
tiangolo__fastapi
tests/test_response_by_alias.py
{ "start": 267, "end": 11379 }
class ____(BaseModel): name: str if PYDANTIC_V2: model_config = ConfigDict( json_schema_extra={ "description": ( "response_model_by_alias=False is basically a quick hack, to support " "proper OpenAPI use another model with the correct field names" ) } ) else: class Config: schema_extra = { "description": ( "response_model_by_alias=False is basically a quick hack, to support " "proper OpenAPI use another model with the correct field names" ) } @app.get("/dict", response_model=Model, response_model_by_alias=False) def read_dict(): return {"alias": "Foo"} @app.get("/model", response_model=Model, response_model_by_alias=False) def read_model(): return Model(alias="Foo") @app.get("/list", response_model=List[Model], response_model_by_alias=False) def read_list(): return [{"alias": "Foo"}, {"alias": "Bar"}] @app.get("/by-alias/dict", response_model=Model) def by_alias_dict(): return {"alias": "Foo"} @app.get("/by-alias/model", response_model=Model) def by_alias_model(): return Model(alias="Foo") @app.get("/by-alias/list", response_model=List[Model]) def by_alias_list(): return [{"alias": "Foo"}, {"alias": "Bar"}] @app.get("/no-alias/dict", response_model=ModelNoAlias) def no_alias_dict(): return {"name": "Foo"} @app.get("/no-alias/model", response_model=ModelNoAlias) def no_alias_model(): return ModelNoAlias(name="Foo") @app.get("/no-alias/list", response_model=List[ModelNoAlias]) def no_alias_list(): return [{"name": "Foo"}, {"name": "Bar"}] client = TestClient(app) def test_read_dict(): response = client.get("/dict") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo"} def test_read_model(): response = client.get("/model") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo"} def test_read_list(): response = client.get("/list") assert response.status_code == 200, response.text assert response.json() == [ {"name": "Foo"}, {"name": "Bar"}, ] def test_read_dict_by_alias(): response = client.get("/by-alias/dict") assert response.status_code == 200, response.text assert response.json() == {"alias": "Foo"} def test_read_model_by_alias(): response = client.get("/by-alias/model") assert response.status_code == 200, response.text assert response.json() == {"alias": "Foo"} def test_read_list_by_alias(): response = client.get("/by-alias/list") assert response.status_code == 200, response.text assert response.json() == [ {"alias": "Foo"}, {"alias": "Bar"}, ] def test_read_dict_no_alias(): response = client.get("/no-alias/dict") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo"} def test_read_model_no_alias(): response = client.get("/no-alias/model") assert response.status_code == 200, response.text assert response.json() == {"name": "Foo"} def test_read_list_no_alias(): response = client.get("/no-alias/list") assert response.status_code == 200, response.text assert response.json() == [ {"name": "Foo"}, {"name": "Bar"}, ] def test_openapi_schema(): response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/dict": { "get": { "summary": "Read Dict", "operationId": "read_dict_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Model"} } }, } }, } }, "/model": { "get": { "summary": "Read Model", "operationId": "read_model_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Model"} } }, } }, } }, "/list": { "get": { "summary": "Read List", "operationId": "read_list_list_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response Read List List Get", "type": "array", "items": {"$ref": "#/components/schemas/Model"}, } } }, } }, } }, "/by-alias/dict": { "get": { "summary": "By Alias Dict", "operationId": "by_alias_dict_by_alias_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Model"} } }, } }, } }, "/by-alias/model": { "get": { "summary": "By Alias Model", "operationId": "by_alias_model_by_alias_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Model"} } }, } }, } }, "/by-alias/list": { "get": { "summary": "By Alias List", "operationId": "by_alias_list_by_alias_list_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response By Alias List By Alias List Get", "type": "array", "items": {"$ref": "#/components/schemas/Model"}, } } }, } }, } }, "/no-alias/dict": { "get": { "summary": "No Alias Dict", "operationId": "no_alias_dict_no_alias_dict_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ModelNoAlias" } } }, } }, } }, "/no-alias/model": { "get": { "summary": "No Alias Model", "operationId": "no_alias_model_no_alias_model_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ModelNoAlias" } } }, } }, } }, "/no-alias/list": { "get": { "summary": "No Alias List", "operationId": "no_alias_list_no_alias_list_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "title": "Response No Alias List No Alias List Get", "type": "array", "items": { "$ref": "#/components/schemas/ModelNoAlias" }, } } }, } }, } }, }, "components": { "schemas": { "Model": { "title": "Model", "required": ["alias"], "type": "object", "properties": {"alias": {"title": "Alias", "type": "string"}}, }, "ModelNoAlias": { "title": "ModelNoAlias", "required": ["name"], "type": "object", "properties": {"name": {"title": "Name", "type": "string"}}, "description": "response_model_by_alias=False is basically a quick hack, to support proper OpenAPI use another model with the correct field names", }, } }, }
ModelNoAlias
python
explosion__spaCy
spacy/util.py
{ "start": 10280, "end": 11119 }
class ____(dict): """Simplified implementation of a frozen dict, mainly used as default function or method argument (for arguments that should default to empty dictionary). Will raise an error if user or spaCy attempts to add to dict. """ def __init__(self, *args, error: str = Errors.E095, **kwargs) -> None: """Initialize the frozen dict. Can be initialized with pre-defined values. error (str): The error message when user tries to assign to dict. """ super().__init__(*args, **kwargs) self.error = error def __setitem__(self, key, value): raise NotImplementedError(self.error) def pop(self, key, default=None): raise NotImplementedError(self.error) def update(self, other): raise NotImplementedError(self.error)
SimpleFrozenDict
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 200588, "end": 202144 }
class ____(Operation): def __init__(self, repeats, *, name=None): super().__init__(name=name) self.repeats = repeats def call(self, x): return backend.numpy.tile(x, self.repeats) def compute_output_spec(self, x): x_shape = list(x.shape) repeats = self.repeats if isinstance(repeats, int): repeats = [repeats] if len(x_shape) > len(repeats): repeats = [1] * (len(x_shape) - len(repeats)) + repeats else: x_shape = [1] * (len(repeats) - len(x_shape)) + x_shape output_shape = [] for x_size, repeat in zip(x_shape, repeats): if x_size is None: output_shape.append(None) else: output_shape.append(x_size * repeat) return KerasTensor(output_shape, dtype=x.dtype) @keras_export(["keras.ops.tile", "keras.ops.numpy.tile"]) def tile(x, repeats): """Repeat `x` the number of times given by `repeats`. If `repeats` has length `d`, the result will have dimension of `max(d, x.ndim)`. If `x.ndim < d`, `x` is promoted to be d-dimensional by prepending new axes. If `x.ndim > d`, `repeats` is promoted to `x.ndim` by prepending 1's to it. Args: x: Input tensor. repeats: The number of repetitions of `x` along each axis. Returns: The tiled output tensor. """ if any_symbolic_tensors((x,)): return Tile( repeats, ).symbolic_call(x) return backend.numpy.tile(x, repeats)
Tile
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0083_init_generic_webhooks.py
{ "start": 214, "end": 3904 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0082_add_extra_history_fields"), ] operations = [ migrations.CreateModel( name="WebHookEvent", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "name", models.CharField( choices=[ ("build:triggered", "Build triggered"), ("build:passed", "Build passed"), ("build:failed", "Build failed"), ], max_length=256, unique=True, ), ), ], ), migrations.AddField( model_name="emailhook", name="created", field=django_extensions.db.fields.CreationDateTimeField( auto_now_add=True, default=django.utils.timezone.now, verbose_name="created", null=True, ), preserve_default=False, ), migrations.AddField( model_name="emailhook", name="modified", field=django_extensions.db.fields.ModificationDateTimeField( auto_now=True, verbose_name="modified", null=True ), ), migrations.AddField( model_name="webhook", name="created", field=django_extensions.db.fields.CreationDateTimeField( auto_now_add=True, default=django.utils.timezone.now, verbose_name="created", null=True, ), preserve_default=False, ), migrations.AddField( model_name="webhook", name="modified", field=django_extensions.db.fields.ModificationDateTimeField( auto_now=True, verbose_name="modified", null=True ), ), migrations.AddField( model_name="webhook", name="payload", field=models.TextField( blank=True, help_text='JSON payload to send to the webhook. Check <a href="https://docs.readthedocs.io/page/build-notifications.html#variable-substitutions-reference">the docs</a> for available substitutions.', max_length=25000, null=True, verbose_name="JSON payload", ), ), migrations.AddField( model_name="webhook", name="secret", field=models.CharField( blank=True, help_text="Secret used to sign the payload of the webhook", max_length=255, null=True, ), ), migrations.AlterField( model_name="webhook", name="url", field=models.URLField( help_text="URL to send the webhook to", max_length=600, verbose_name="URL", ), ), migrations.AddField( model_name="webhook", name="events", field=models.ManyToManyField( help_text="Events to subscribe", related_name="webhooks", to="projects.WebHookEvent", ), ), ]
Migration
python
tiangolo__fastapi
tests/test_include_router_defaults_overrides.py
{ "start": 176, "end": 255 }
class ____(JSONResponse): media_type = "application/x-level-0"
ResponseLevel0
python
Netflix__metaflow
test/unit/inheritance/flows/mutator_with_base_config_base.py
{ "start": 216, "end": 1854 }
class ____(FlowMutator): """ Mutator that uses config values from base class to inject parameters. This mutator looks for a 'mutator_config' and injects parameters based on its values. """ def init(self, config_name): self.config_name = config_name def pre_mutate(self, mutable_flow): """Add parameters based on config values from base class""" print(f"ConfigBasedMutator: Looking for config '{self.config_name}'") # Find the config in the flow config_value = None for name, config in mutable_flow.configs: if name == self.config_name: # Config is a dictionary-like object config_value = dict(config) print(f"Found config: {name} with value {config_value}") break if config_value: # Inject parameters based on config if "param_to_inject" in config_value: param_name = config_value["param_to_inject"] default_val = config_value.get("default_value", 999) print(f"Injecting parameter: {param_name} with default {default_val}") mutable_flow.add_parameter( param_name, Parameter(param_name, default=default_val) ) if "inject_count" in config_value: count_val = config_value["inject_count"] print(f"Injecting count parameter with value {count_val}") mutable_flow.add_parameter( "injected_count", Parameter("injected_count", default=count_val) )
ConfigBasedMutator
python
ipython__ipython
docs/autogen_shortcuts.py
{ "start": 542, "end": 710 }
class ____: #: a sequence of keys (each element on the list corresponds to pressing one or more keys) keys_sequence: List[str] filter: str @dataclass
Shortcut
python
tornadoweb__tornado
tornado/test/web_test.py
{ "start": 86021, "end": 86412 }
class ____(WebTestCase): def get_handlers(self): return [("/foo", RequestHandler)] def get_app_kwargs(self): return dict( default_handler_class=ErrorHandler, default_handler_args=dict(status_code=403), ) def test_403(self): response = self.fetch("/") self.assertEqual(response.code, 403)
DefaultHandlerArgumentsTest
python
allegroai__clearml
clearml/backend_api/services/v2_20/queues.py
{ "start": 82618, "end": 83517 }
class ____(Request): """ Peek the next task from a given queue :param queue: ID of the queue :type queue: str """ _service = "queues" _action = "peek_task" _version = "2.20" _schema = { "definitions": {}, "properties": {"queue": {"description": "ID of the queue", "type": "string"}}, "required": ["queue"], "type": "object", } def __init__(self, queue: str, **kwargs: Any) -> None: super(PeekTaskRequest, self).__init__(**kwargs) self.queue = queue @schema_property("queue") def queue(self) -> str: return self._property_queue @queue.setter def queue(self, value: str) -> None: if value is None: self._property_queue = None return self.assert_isinstance(value, "queue", six.string_types) self._property_queue = value
PeekTaskRequest
python
mlflow__mlflow
mlflow/utils/proto_json_utils.py
{ "start": 10690, "end": 27044 }
class ____(MlflowInvalidInputException): def __init__(self, col_name, col_type, ex): super().__init__( message=f"Data is not compatible with model signature. " f"Failed to convert column {col_name} to type '{col_type}'. Error: '{ex!r}'" ) def cast_df_types_according_to_schema(pdf, schema): import numpy as np from mlflow.models.utils import _enforce_array, _enforce_map, _enforce_object from mlflow.types.schema import AnyType, Array, DataType, Map, Object actual_cols = set(pdf.columns) if schema.has_input_names(): dtype_list = zip(schema.input_names(), schema.input_types()) elif schema.is_tensor_spec() and len(schema.input_types()) == 1: dtype_list = zip(actual_cols, [schema.input_types()[0] for _ in actual_cols]) else: n = min(len(schema.input_types()), len(pdf.columns)) dtype_list = zip(pdf.columns[:n], schema.input_types()[:n]) required_input_names = set(schema.required_input_names()) for col_name, col_type_spec in dtype_list: if isinstance(col_type_spec, DataType): col_type = col_type_spec.to_pandas() else: col_type = col_type_spec if col_name in actual_cols: required = col_name in required_input_names try: if isinstance(col_type_spec, DataType) and col_type_spec == DataType.binary: # NB: We expect binary data to be passed base64 encoded pdf[col_name] = pdf[col_name].map( lambda x: base64.decodebytes(bytes(x, "utf8")) ) elif col_type == np.dtype(bytes): pdf[col_name] = pdf[col_name].map(lambda x: bytes(x, "utf8")) elif schema.is_tensor_spec() and isinstance(pdf[col_name].iloc[0], list): # For dataframe with multidimensional column, it contains # list type values, we cannot convert # its type by `astype`, skip conversion. # The conversion will be done in `_enforce_schema` while # `PyFuncModel.predict` being called. pass elif isinstance(col_type_spec, Array): pdf[col_name] = pdf[col_name].map( lambda x: _enforce_array(x, col_type_spec, required=required) ) elif isinstance(col_type_spec, Object): pdf[col_name] = pdf[col_name].map( lambda x: _enforce_object(x, col_type_spec, required=required) ) elif isinstance(col_type_spec, Map): pdf[col_name] = pdf[col_name].map( lambda x: _enforce_map(x, col_type_spec, required=required) ) elif isinstance(col_type_spec, AnyType): pass else: pdf[col_name] = pdf[col_name].astype(col_type, copy=False) except Exception as ex: raise MlflowFailedTypeConversion(col_name, col_type, ex) return pdf def dataframe_from_parsed_json(decoded_input, pandas_orient, schema=None): """Convert parsed json into pandas.DataFrame. If schema is provided this methods will attempt to cast data types according to the schema. This include base64 decoding for binary columns. Args: decoded_input: Parsed json - either a list or a dictionary. pandas_orient: pandas data frame convention used to store the data. schema: MLflow schema used when parsing the data. Returns: pandas.DataFrame. """ import pandas as pd if pandas_orient == "records": if not isinstance(decoded_input, list): if isinstance(decoded_input, dict): typemessage = "dictionary" else: typemessage = f"type {type(decoded_input)}" raise MlflowInvalidInputException( f"Dataframe records format must be a list of records. Got {typemessage}." ) try: pdf = pd.DataFrame(data=decoded_input) except Exception as ex: raise MlflowInvalidInputException( f"Provided dataframe_records field is not a valid dataframe representation in " f"'records' format. Error: '{ex}'" ) elif pandas_orient == "split": if not isinstance(decoded_input, dict): if isinstance(decoded_input, list): typemessage = "list" else: typemessage = f"type {type(decoded_input)}" raise MlflowInvalidInputException( f"Dataframe split format must be a dictionary. Got {typemessage}." ) keys = set(decoded_input.keys()) missing_data = "data" not in keys extra_keys = keys.difference({"columns", "data", "index"}) if missing_data or extra_keys: raise MlflowInvalidInputException( f"Dataframe split format must have 'data' field and optionally 'columns' " f"and 'index' fields. Got {keys}.'" ) try: pdf = pd.DataFrame( index=decoded_input.get("index"), columns=decoded_input.get("columns"), data=decoded_input["data"], ) except Exception as ex: raise MlflowInvalidInputException( f"Provided dataframe_split field is not a valid dataframe representation in " f"'split' format. Error: '{ex}'" ) if schema is not None: pdf = cast_df_types_according_to_schema(pdf, schema) return pdf def dataframe_from_raw_json(path_or_str, schema=None, pandas_orient: str = "split"): """Parse raw json into a pandas.Dataframe. If schema is provided this methods will attempt to cast data types according to the schema. This include base64 decoding for binary columns. Args: path_or_str: Path to a json file or a json string. schema: MLflow schema used when parsing the data. pandas_orient: pandas data frame convention used to store the data. Returns: pandas.DataFrame. """ if os.path.exists(path_or_str): with open(path_or_str) as f: parsed_json = json.load(f) else: parsed_json = json.loads(path_or_str) return dataframe_from_parsed_json(parsed_json, pandas_orient, schema) def _get_jsonable_obj(data, pandas_orient="records"): """Attempt to make the data json-able via standard library. Look for some commonly used types that are not jsonable and convert them into json-able ones. Unknown data types are returned as is. Args: data: Data to be converted, works with pandas and numpy, rest will be returned as is. pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON dictionary using this Pandas serialization orientation. """ import numpy as np import pandas as pd if isinstance(data, np.ndarray): return data.tolist() if isinstance(data, pd.DataFrame): return data.to_dict(orient=pandas_orient) if isinstance(data, pd.Series): return pd.DataFrame(data).to_dict(orient=pandas_orient) else: # by default just return whatever this is and hope for the best return data def convert_data_type(data, spec): """ Convert input data to the type specified in the spec. Args: data: Input data. spec: ColSpec or TensorSpec. """ import numpy as np from mlflow.models.utils import _enforce_array, _enforce_map, _enforce_object from mlflow.types.schema import AnyType, Array, ColSpec, DataType, Map, Object, TensorSpec try: if spec is None: return np.array(data) if isinstance(spec, TensorSpec): return np.array(data, dtype=spec.type) if isinstance(spec, ColSpec): if isinstance(spec.type, DataType): return ( np.array(data, spec.type.to_numpy()) if isinstance(data, (list, np.ndarray)) else np.array([data], spec.type.to_numpy())[0] ) elif isinstance(spec.type, Array): # convert to numpy array for backwards compatibility return np.array(_enforce_array(data, spec.type, required=spec.required)) elif isinstance(spec.type, Object): return _enforce_object(data, spec.type, required=spec.required) elif isinstance(spec.type, Map): return _enforce_map(data, spec.type, required=spec.required) elif isinstance(spec.type, AnyType): return data except MlflowException as e: raise MlflowInvalidInputException(e.message) except Exception as ex: raise MlflowInvalidInputException(f"{ex}") raise MlflowInvalidInputException( f"Failed to convert data type for data `{data}` with spec `{spec}`." ) def _cast_schema_type(input_data, schema=None): import numpy as np input_data = deepcopy(input_data) # spec_name -> spec mapping types_dict = schema.input_dict() if schema and schema.has_input_names() else {} if schema is not None: if ( len(types_dict) == 1 and isinstance(input_data, list) and not any(isinstance(x, dict) for x in input_data) ): # for data with a single column (not List[Dict]), match input with column input_data = {next(iter(types_dict)): input_data} # Un-named schema should only contain a single column or a single value elif not schema.has_input_names() and not ( isinstance(input_data, list) or np.isscalar(input_data) ): raise MlflowInvalidInputException( "Failed to parse input data. This model contains an un-named " " model signature which expects a single n-dimensional array or " "a single value as input, however, an input of type " f"{type(input_data)} was found." ) if isinstance(input_data, dict): # each key corresponds to a column, values should be # checked against the schema input_data = { col: convert_data_type(data, types_dict.get(col)) for col, data in input_data.items() } elif isinstance(input_data, list): # List of dictionaries of column_name -> value mapping # List[Dict] must correspond to a schema with named columns if all(isinstance(x, dict) for x in input_data): input_data = [ {col: convert_data_type(value, types_dict.get(col)) for col, value in data.items()} for data in input_data ] # List of values else: spec = schema.inputs[0] if schema else None input_data = convert_data_type(input_data, spec) else: spec = schema.inputs[0] if schema else None try: input_data = convert_data_type(input_data, spec) except Exception as e: raise MlflowInvalidInputException( f"Failed to convert data `{input_data}` to type `{spec}` defined " "in the model signature." ) from e return input_data def parse_instances_data(data, schema=None): import numpy as np from mlflow.types.schema import Array if "instances" not in data: raise MlflowInvalidInputException("Expecting data to have `instances` as key.") data = data["instances"] # List[Dict] if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict): # convert items to column format (map column/input name to tensor) data_dict = defaultdict(list) types_dict = schema.input_dict() if schema and schema.has_input_names() else {} for item in data: for col, v in item.items(): data_dict[col].append(convert_data_type(v, types_dict.get(col))) # convert to numpy array for backwards compatibility data = {col: np.array(v) for col, v in data_dict.items()} else: data = _cast_schema_type(data, schema) # Sanity check inputted data. This check will only be applied # when the row-format `instances` is used since it requires # same 0-th dimension for all items. if isinstance(data, dict): # ensure all columns have the same number of items # Only check the data when it's a list or numpy array check_data = {k: v for k, v in data.items() if isinstance(v, (list, np.ndarray))} if schema and schema.has_input_names(): # Only check required columns required_cols = schema.required_input_names() # For Array schema we should not check the length of the data matching check_cols = { col for col, spec in schema.input_dict().items() if not isinstance(spec.type, Array) } check_cols = list(set(required_cols) & check_cols & set(check_data.keys())) else: check_cols = list(check_data.keys()) if check_cols: expected_len = len(check_data[check_cols[0]]) if not all(len(check_data[col]) == expected_len for col in check_cols[1:]): raise MlflowInvalidInputException( "The length of values for each input/column name are not the same" ) return data # TODO: Reuse this function for `inputs` key data parsing in serving, and # add `convert_to_numpy` param to avoid converting data to numpy arrays for # genAI flavors. def parse_inputs_data(inputs_data_or_path, schema=None): """ Helper function to cast inputs_data based on the schema. Inputs data must be able to pass to the model for pyfunc predict directly. Args: inputs_data_or_path: A json-serializable object or path to a json file schema: data schema to cast to. Be of type `mlflow.types.Schema`. """ if isinstance(inputs_data_or_path, str) and os.path.exists(inputs_data_or_path): with open(inputs_data_or_path) as handle: inputs_data = json.load(handle) else: inputs_data = inputs_data_or_path return _cast_schema_type(inputs_data, schema) def parse_tf_serving_input(inp_dict, schema=None): """ Args: inp_dict: A dict deserialized from a JSON string formatted as described in TF's serving API doc (https://www.tensorflow.org/tfx/serving/api_rest#request_format_2) schema: MLflow schema used when parsing the data. """ if "signature_name" in inp_dict: raise MlflowInvalidInputException('"signature_name" parameter is currently not supported') if not (list(inp_dict.keys()) == ["instances"] or list(inp_dict.keys()) == ["inputs"]): raise MlflowInvalidInputException( 'One of "instances" and "inputs" must be specified (not both or any other keys).' f"Received: {list(inp_dict.keys())}" ) # Read the JSON try: # objects & arrays schema for List[Dict] and Dict[List] are different # so the conversion for `instances` below changes the schema. # e.g. # [{"col1": 1, "col2": 2}, {"col1": 3, "col2": 4}] -> {"col1": [1, 3], "col2": [2, 4]} # Schema([ColSpec(long, "col1"), ColSpec(long, "col2")]) -> # Schema([ColSpec(Array(long), "col1"), ColSpec(Array(long), "col2")]) # To avoid this, we shouldn't use `instances` for such data. if "instances" in inp_dict: return parse_instances_data(inp_dict, schema) else: # items already in column format, convert values to tensor return _cast_schema_type(inp_dict["inputs"], schema) except MlflowException as e: raise e except Exception as e: # Add error into message to provide details for serving usage raise MlflowInvalidInputException( f"Ensure that the input is a valid JSON-formatted string.\nError: {e!r}" ) from e # Reference: https://stackoverflow.com/a/12126976
MlflowFailedTypeConversion
python
ray-project__ray
python/ray/serve/tests/unit/test_deployment_class.py
{ "start": 1075, "end": 2138 }
class ____: def test_empty(self): assert get_random_dict_combos({}, 1) == [{}] def test_basic(self): d = {"a": 1, "b": 2, "c": 3} combos = get_random_dict_combos(d, 8) # Sort combos for comparison (sort by length, break ties by value sum) combos.sort(key=lambda d: len(d) * 100 + sum(d.values())) assert combos == [ # Dictionaries of length 0 {}, # Dictionaries of length 1 *({"a": 1}, {"b": 2}, {"c": 3}), # Dictionaries of length 2 *({"a": 1, "b": 2}, {"a": 1, "c": 3}, {"b": 2, "c": 3}), # Dictionaries of length 3 {"a": 1, "b": 2, "c": 3}, ] def test_len(self): d = {i: i + 1 for i in range(50)} assert len(get_random_dict_combos(d, 1000)) == 1000 def test_randomness(self): d = {i: i + 1 for i in range(1000)} combo1 = get_random_dict_combos(d, 1000)[0] combo2 = get_random_dict_combos(d, 1000)[0] assert combo1 != combo2
TestGetDictCombos
python
facebookresearch__faiss
demos/index_pq_flat_separate_codes_from_codebook.py
{ "start": 5253, "end": 8362 }
class ____: type_: str index: faiss.Index args: tuple recall: float results = [] for m, nbits in pq_m_nbits: print("pq", m, nbits) index = faiss.index_factory(d, f"IDMap2,PQ{m}x{nbits}") index.train(training_data) index.add_with_ids(database_vector_float32s, database_vector_ids) _, result_ids = index.search(query_vector_float32s, k=1) recall = sum(result_ids == ground_truth_result_ids) results.append(Record("pq", index, (m, nbits), recall)) for nbits in lsh_nbits: print("lsh", nbits) index = faiss.IndexIDMap2(faiss.IndexLSH(d, nbits)) index.add_with_ids(database_vector_float32s, database_vector_ids) _, result_ids = index.search(query_vector_float32s, k=1) recall = sum(result_ids == ground_truth_result_ids) results.append(Record("lsh", index, (nbits,), recall)) """:py '556918346720794'""" import matplotlib.pyplot as plt import numpy as np def create_grouped_bar_chart(x_values, y_values_list, labels_list, xlabel, ylabel, title): num_bars_per_group = len(x_values) plt.figure(figsize=(12, 6)) for x, y_values, labels in zip(x_values, y_values_list, labels_list): num_bars = len(y_values) bar_width = 0.08 * x bar_positions = np.arange(num_bars) * bar_width - (num_bars - 1) * bar_width / 2 + x bars = plt.bar(bar_positions, y_values, width=bar_width) for bar, label in zip(bars, labels): height = bar.get_height() plt.annotate( label, xy=(bar.get_x() + bar.get_width() / 2, height), xytext=(0, 3), textcoords="offset points", ha='center', va='bottom' ) plt.xscale('log') plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) plt.xticks(x_values, labels=[str(x) for x in x_values]) plt.tight_layout() plt.show() # # Example usage: # x_values = [1, 2, 4, 8, 16, 32] # y_values_list = [ # [2.5, 3.6, 1.8], # [3.0, 2.8], # [2.5, 3.5, 4.0, 1.0], # [4.2], # [3.0, 5.5, 2.2], # [6.0, 4.5] # ] # labels_list = [ # ['A1', 'B1', 'C1'], # ['A2', 'B2'], # ['A3', 'B3', 'C3', 'D3'], # ['A4'], # ['A5', 'B5', 'C5'], # ['A6', 'B6'] # ] # create_grouped_bar_chart(x_values, y_values_list, labels_list, "x axis", "y axis", "title") """:py '1630106834206134'""" # x-axis: compression ratio # y-axis: recall@1 from collections import defaultdict x = defaultdict(list) x[1].append(("flat", 1.00)) for r in results: y_value = r.recall[0] / n x_value = int(d * 4 / r.index.sa_code_size()) label = None if r.type_ == "pq": label = f"PQ{r.args[0]}x{r.args[1]}" if r.type_ == "lsh": label = f"LSH{r.args[0]}" x[x_value].append((label, y_value)) x_values = sorted(list(x.keys())) create_grouped_bar_chart( x_values, [[e[1] for e in x[x_value]] for x_value in x_values], [[e[0] for e in x[x_value]] for x_value in x_values], "compression ratio", "recall@1 q=1,000 queries", "recall@1 for a database of n=1,000 d=768 vectors", )
Record
python
python-visualization__folium
folium/plugins/side_by_side.py
{ "start": 119, "end": 1574 }
class ____(JSCSSMixin, MacroElement): """ Creates a SideBySideLayers that takes two Layers and adds a sliding control with the leaflet-side-by-side plugin. Uses the Leaflet leaflet-side-by-side plugin https://github.com/digidem/leaflet-side-by-side Parameters ---------- layer_left: Layer. The left Layer within the side by side control. Must be created and added to the map before being passed to this class. layer_right: Layer. The right Layer within the side by side control. Must be created and added to the map before being passed to this class. Examples -------- >>> sidebyside = SideBySideLayers(layer_left, layer_right) >>> sidebyside.add_to(m) """ _template = Template( """ {% macro script(this, kwargs) %} var {{ this.get_name() }} = L.control.sideBySide( {{ this.layer_left.get_name() }}, {{ this.layer_right.get_name() }} ).addTo({{ this._parent.get_name() }}); {% endmacro %} """ ) default_js = [ ( "leaflet.sidebyside", "https://cdn.jsdelivr.net/gh/digidem/leaflet-side-by-side@2.0.0/leaflet-side-by-side.min.js", ), ] def __init__(self, layer_left, layer_right): super().__init__() self._name = "SideBySideLayers" self.layer_left = layer_left self.layer_right = layer_right
SideBySideLayers