body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, temboo_session): '\n Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(ListMembers, self).__init__(temboo_session, '/Library/MailChimp/ListMembers')
7,404,257,545,218,842,000
Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.
temboo/Library/MailChimp/ListMembers.py
__init__
jordanemedlock/psychtruths
python
def __init__(self, temboo_session): '\n Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(ListMembers, self).__init__(temboo_session, '/Library/MailChimp/ListMembers')
def set_APIKey(self, value): '\n Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.)\n ' super(ListMembersInputSet, self)._set_input('APIKey', value)
2,786,447,955,702,532,000
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.)
temboo/Library/MailChimp/ListMembers.py
set_APIKey
jordanemedlock/psychtruths
python
def set_APIKey(self, value): '\n \n ' super(ListMembersInputSet, self)._set_input('APIKey', value)
def set_Limit(self, value): '\n Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.)\n ' super(ListMembersInputSet, self)._set_input('Limit', value)
-6,993,926,749,332,148,000
Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.)
temboo/Library/MailChimp/ListMembers.py
set_Limit
jordanemedlock/psychtruths
python
def set_Limit(self, value): '\n \n ' super(ListMembersInputSet, self)._set_input('Limit', value)
def set_ListId(self, value): '\n Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.)\n ' super(ListMembersInputSet, self)._set_input('ListId', value)
3,846,483,881,471,627,000
Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.)
temboo/Library/MailChimp/ListMembers.py
set_ListId
jordanemedlock/psychtruths
python
def set_ListId(self, value): '\n \n ' super(ListMembersInputSet, self)._set_input('ListId', value)
def set_ResponseFormat(self, value): '\n Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).)\n ' super(ListMembersInputSet, self)._set_input('ResponseFormat', value)
-5,770,926,192,589,782,000
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).)
temboo/Library/MailChimp/ListMembers.py
set_ResponseFormat
jordanemedlock/psychtruths
python
def set_ResponseFormat(self, value): '\n \n ' super(ListMembersInputSet, self)._set_input('ResponseFormat', value)
def set_Since(self, value): "\n Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.)\n " super(ListMembersInputSet, self)._set_input('Since', value)
-1,359,020,157,680,741,400
Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.)
temboo/Library/MailChimp/ListMembers.py
set_Since
jordanemedlock/psychtruths
python
def set_Since(self, value): "\n \n " super(ListMembersInputSet, self)._set_input('Since', value)
def set_Start(self, value): '\n Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.)\n ' super(ListMembersInputSet, self)._set_input('Start', value)
3,804,596,894,647,427,000
Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.)
temboo/Library/MailChimp/ListMembers.py
set_Start
jordanemedlock/psychtruths
python
def set_Start(self, value): '\n \n ' super(ListMembersInputSet, self)._set_input('Start', value)
def set_Status(self, value): "\n Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.)\n " super(ListMembersInputSet, self)._set_input('Status', value)
-2,203,140,081,308,092,000
Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.)
temboo/Library/MailChimp/ListMembers.py
set_Status
jordanemedlock/psychtruths
python
def set_Status(self, value): "\n \n " super(ListMembersInputSet, self)._set_input('Status', value)
def get_Response(self): '\n Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".)\n ' return self._output.get('Response', None)
1,283,719,462,627,130,400
Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".)
temboo/Library/MailChimp/ListMembers.py
get_Response
jordanemedlock/psychtruths
python
def get_Response(self): '\n \n ' return self._output.get('Response', None)
def description_of(lines, name='stdin'): '\n Return a string describing the probable encoding of a file or\n list of strings.\n\n :param lines: The lines to get the encoding of.\n :type lines: Iterable of bytes\n :param name: Name of file or collection of lines\n :type name: str\n ' u = UniversalDetector() for line in lines: line = bytearray(line) u.feed(line) if u.done: break u.close() result = u.result if PY2: name = name.decode(sys.getfilesystemencoding(), 'ignore') if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) else: return '{0}: no result'.format(name)
-4,948,760,566,063,939,000
Return a string describing the probable encoding of a file or list of strings. :param lines: The lines to get the encoding of. :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str
venv/lib/python3.8/site-packages/pip/_vendor/chardet/cli/chardetect.py
description_of
fortbox/leetcode-solve
python
def description_of(lines, name='stdin'): '\n Return a string describing the probable encoding of a file or\n list of strings.\n\n :param lines: The lines to get the encoding of.\n :type lines: Iterable of bytes\n :param name: Name of file or collection of lines\n :type name: str\n ' u = UniversalDetector() for line in lines: line = bytearray(line) u.feed(line) if u.done: break u.close() result = u.result if PY2: name = name.decode(sys.getfilesystemencoding(), 'ignore') if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) else: return '{0}: no result'.format(name)
def main(argv=None): '\n Handles command line arguments and gets things started.\n\n :param argv: List of arguments, as if specified on the command-line.\n If None, ``sys.argv[1:]`` is used instead.\n :type argv: list of str\n ' parser = argparse.ArgumentParser(description='Takes one or more file paths and reports their detected encodings') parser.add_argument('input', help='File whose encoding we would like to determine. (default: stdin)', type=argparse.FileType('rb'), nargs='*', default=[(sys.stdin if PY2 else sys.stdin.buffer)]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print(((('You are running chardetect interactively. Press ' + 'CTRL-D twice at the start of a blank line to signal the ') + 'end of your input. If you want help, run chardetect ') + '--help\n'), file=sys.stderr) print(description_of(f, f.name))
1,331,823,930,218,164,700
Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str
venv/lib/python3.8/site-packages/pip/_vendor/chardet/cli/chardetect.py
main
fortbox/leetcode-solve
python
def main(argv=None): '\n Handles command line arguments and gets things started.\n\n :param argv: List of arguments, as if specified on the command-line.\n If None, ``sys.argv[1:]`` is used instead.\n :type argv: list of str\n ' parser = argparse.ArgumentParser(description='Takes one or more file paths and reports their detected encodings') parser.add_argument('input', help='File whose encoding we would like to determine. (default: stdin)', type=argparse.FileType('rb'), nargs='*', default=[(sys.stdin if PY2 else sys.stdin.buffer)]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print(((('You are running chardetect interactively. Press ' + 'CTRL-D twice at the start of a blank line to signal the ') + 'end of your input. If you want help, run chardetect ') + '--help\n'), file=sys.stderr) print(description_of(f, f.name))
def MediaBackend(): 'Media storage backend.' return S3Boto3Storage(location='media')
-1,886,577,899,193,967,900
Media storage backend.
aws/backends.py
MediaBackend
florimondmanca/personal-api
python
def MediaBackend(): return S3Boto3Storage(location='media')
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, action_group_name: Optional[pulumi.Input[str]]=None, automation_runbook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]]]=None, azure_app_push_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]]]=None, email_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]]]=None, enabled: Optional[pulumi.Input[bool]]=None, group_short_name: Optional[pulumi.Input[str]]=None, itsm_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sms_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, webhook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]]]=None, __props__=None, __name__=None, __opts__=None): "\n An action group resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] action_group_name: The name of the action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]] automation_runbook_receivers: The list of AutomationRunbook receivers that are part of this action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]] azure_app_push_receivers: The list of AzureAppPush receivers that are part of this action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]] email_receivers: The list of email receivers that are part of this action group.\n :param pulumi.Input[bool] enabled: Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.\n :param pulumi.Input[str] group_short_name: The short name of the action group. This will be used in SMS messages.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]] itsm_receivers: The list of ITSM receivers that are part of this action group.\n :param pulumi.Input[str] location: Resource location\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]] sms_receivers: The list of SMS receivers that are part of this action group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]] webhook_receivers: The list of webhook receivers that are part of this action group.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['action_group_name'] = action_group_name __props__['automation_runbook_receivers'] = automation_runbook_receivers __props__['azure_app_push_receivers'] = azure_app_push_receivers __props__['email_receivers'] = email_receivers if (enabled is None): enabled = True if ((enabled is None) and (not opts.urn)): raise TypeError("Missing required property 'enabled'") __props__['enabled'] = enabled if ((group_short_name is None) and (not opts.urn)): raise TypeError("Missing required property 'group_short_name'") __props__['group_short_name'] = group_short_name __props__['itsm_receivers'] = itsm_receivers __props__['location'] = location if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['sms_receivers'] = sms_receivers __props__['tags'] = tags __props__['webhook_receivers'] = webhook_receivers __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:insights/v20170401:ActionGroup'), pulumi.Alias(type_='azure-native:insights:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights:ActionGroup'), pulumi.Alias(type_='azure-native:insights/latest:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/latest:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20180301:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20180301:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20180901:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20180901:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20190301:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20190301:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20190601:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20190601:ActionGroup')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(ActionGroup, __self__).__init__('azure-native:insights/v20170401:ActionGroup', resource_name, __props__, opts)
7,079,130,205,066,804,000
An action group resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] action_group_name: The name of the action group. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]] automation_runbook_receivers: The list of AutomationRunbook receivers that are part of this action group. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]] azure_app_push_receivers: The list of AzureAppPush receivers that are part of this action group. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]] email_receivers: The list of email receivers that are part of this action group. :param pulumi.Input[bool] enabled: Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. :param pulumi.Input[str] group_short_name: The short name of the action group. This will be used in SMS messages. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]] itsm_receivers: The list of ITSM receivers that are part of this action group. :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]] sms_receivers: The list of SMS receivers that are part of this action group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]] webhook_receivers: The list of webhook receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
__init__
pulumi-bot/pulumi-azure-native
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, action_group_name: Optional[pulumi.Input[str]]=None, automation_runbook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]]]=None, azure_app_push_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]]]=None, email_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]]]=None, enabled: Optional[pulumi.Input[bool]]=None, group_short_name: Optional[pulumi.Input[str]]=None, itsm_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sms_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, webhook_receivers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]]]=None, __props__=None, __name__=None, __opts__=None): "\n An action group resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] action_group_name: The name of the action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AutomationRunbookReceiverArgs']]]] automation_runbook_receivers: The list of AutomationRunbook receivers that are part of this action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AzureAppPushReceiverArgs']]]] azure_app_push_receivers: The list of AzureAppPush receivers that are part of this action group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailReceiverArgs']]]] email_receivers: The list of email receivers that are part of this action group.\n :param pulumi.Input[bool] enabled: Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.\n :param pulumi.Input[str] group_short_name: The short name of the action group. This will be used in SMS messages.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ItsmReceiverArgs']]]] itsm_receivers: The list of ITSM receivers that are part of this action group.\n :param pulumi.Input[str] location: Resource location\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SmsReceiverArgs']]]] sms_receivers: The list of SMS receivers that are part of this action group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WebhookReceiverArgs']]]] webhook_receivers: The list of webhook receivers that are part of this action group.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['action_group_name'] = action_group_name __props__['automation_runbook_receivers'] = automation_runbook_receivers __props__['azure_app_push_receivers'] = azure_app_push_receivers __props__['email_receivers'] = email_receivers if (enabled is None): enabled = True if ((enabled is None) and (not opts.urn)): raise TypeError("Missing required property 'enabled'") __props__['enabled'] = enabled if ((group_short_name is None) and (not opts.urn)): raise TypeError("Missing required property 'group_short_name'") __props__['group_short_name'] = group_short_name __props__['itsm_receivers'] = itsm_receivers __props__['location'] = location if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['sms_receivers'] = sms_receivers __props__['tags'] = tags __props__['webhook_receivers'] = webhook_receivers __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:insights/v20170401:ActionGroup'), pulumi.Alias(type_='azure-native:insights:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights:ActionGroup'), pulumi.Alias(type_='azure-native:insights/latest:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/latest:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20180301:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20180301:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20180901:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20180901:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20190301:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20190301:ActionGroup'), pulumi.Alias(type_='azure-native:insights/v20190601:ActionGroup'), pulumi.Alias(type_='azure-nextgen:insights/v20190601:ActionGroup')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(ActionGroup, __self__).__init__('azure-native:insights/v20170401:ActionGroup', resource_name, __props__, opts)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'ActionGroup': "\n Get an existing ActionGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['automation_runbook_receivers'] = None __props__['azure_app_push_receivers'] = None __props__['email_receivers'] = None __props__['enabled'] = None __props__['group_short_name'] = None __props__['itsm_receivers'] = None __props__['location'] = None __props__['name'] = None __props__['sms_receivers'] = None __props__['tags'] = None __props__['type'] = None __props__['webhook_receivers'] = None return ActionGroup(resource_name, opts=opts, __props__=__props__)
2,591,240,440,721,210,400
Get an existing ActionGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
get
pulumi-bot/pulumi-azure-native
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'ActionGroup': "\n Get an existing ActionGroup resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['automation_runbook_receivers'] = None __props__['azure_app_push_receivers'] = None __props__['email_receivers'] = None __props__['enabled'] = None __props__['group_short_name'] = None __props__['itsm_receivers'] = None __props__['location'] = None __props__['name'] = None __props__['sms_receivers'] = None __props__['tags'] = None __props__['type'] = None __props__['webhook_receivers'] = None return ActionGroup(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter(name='automationRunbookReceivers') def automation_runbook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AutomationRunbookReceiverResponse']]]: '\n The list of AutomationRunbook receivers that are part of this action group.\n ' return pulumi.get(self, 'automation_runbook_receivers')
-5,235,239,916,796,044,000
The list of AutomationRunbook receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
automation_runbook_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='automationRunbookReceivers') def automation_runbook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AutomationRunbookReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'automation_runbook_receivers')
@property @pulumi.getter(name='azureAppPushReceivers') def azure_app_push_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AzureAppPushReceiverResponse']]]: '\n The list of AzureAppPush receivers that are part of this action group.\n ' return pulumi.get(self, 'azure_app_push_receivers')
-1,548,000,125,861,752,600
The list of AzureAppPush receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
azure_app_push_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='azureAppPushReceivers') def azure_app_push_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.AzureAppPushReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'azure_app_push_receivers')
@property @pulumi.getter(name='emailReceivers') def email_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.EmailReceiverResponse']]]: '\n The list of email receivers that are part of this action group.\n ' return pulumi.get(self, 'email_receivers')
46,405,372,373,859,880
The list of email receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
email_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='emailReceivers') def email_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.EmailReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'email_receivers')
@property @pulumi.getter def enabled(self) -> pulumi.Output[bool]: '\n Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.\n ' return pulumi.get(self, 'enabled')
-3,715,311,298,690,319,400
Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
enabled
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def enabled(self) -> pulumi.Output[bool]: '\n \n ' return pulumi.get(self, 'enabled')
@property @pulumi.getter(name='groupShortName') def group_short_name(self) -> pulumi.Output[str]: '\n The short name of the action group. This will be used in SMS messages.\n ' return pulumi.get(self, 'group_short_name')
-600,390,329,182,447,400
The short name of the action group. This will be used in SMS messages.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
group_short_name
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='groupShortName') def group_short_name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'group_short_name')
@property @pulumi.getter(name='itsmReceivers') def itsm_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.ItsmReceiverResponse']]]: '\n The list of ITSM receivers that are part of this action group.\n ' return pulumi.get(self, 'itsm_receivers')
2,912,169,956,100,402,700
The list of ITSM receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
itsm_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='itsmReceivers') def itsm_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.ItsmReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'itsm_receivers')
@property @pulumi.getter def location(self) -> pulumi.Output[str]: '\n Resource location\n ' return pulumi.get(self, 'location')
2,974,713,878,710,662,000
Resource location
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
location
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def location(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n Azure resource name\n ' return pulumi.get(self, 'name')
-1,714,126,423,700,497,000
Azure resource name
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
name
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter(name='smsReceivers') def sms_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.SmsReceiverResponse']]]: '\n The list of SMS receivers that are part of this action group.\n ' return pulumi.get(self, 'sms_receivers')
-7,178,998,211,520,635,000
The list of SMS receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
sms_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='smsReceivers') def sms_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.SmsReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'sms_receivers')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n Resource tags\n ' return pulumi.get(self, 'tags')
-1,239,552,863,427,208,400
Resource tags
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
tags
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n Azure resource type\n ' return pulumi.get(self, 'type')
-3,038,610,106,204,977,000
Azure resource type
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
type
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter(name='webhookReceivers') def webhook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.WebhookReceiverResponse']]]: '\n The list of webhook receivers that are part of this action group.\n ' return pulumi.get(self, 'webhook_receivers')
-4,920,229,458,121,997
The list of webhook receivers that are part of this action group.
sdk/python/pulumi_azure_native/insights/v20170401/action_group.py
webhook_receivers
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='webhookReceivers') def webhook_receivers(self) -> pulumi.Output[Optional[Sequence['outputs.WebhookReceiverResponse']]]: '\n \n ' return pulumi.get(self, 'webhook_receivers')
def _area_tables_binning_parallel(source_df, target_df, n_jobs=(- 1)): 'Construct area allocation and source-target correspondence tables using\n a parallel spatial indexing approach\n ...\n\n NOTE: currently, the largest df is chunked and the other one is shipped in\n full to each core; within each process, the spatial index is built for the\n largest set of geometries, and the other one used for `query_bulk`\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n n_jobs : int\n [Optional. Default=-1] Number of processes to run in parallel. If -1,\n this is set to the number of CPUs available\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n ' from joblib import Parallel, delayed, parallel_backend if _check_crs(source_df, target_df): pass else: return None if (n_jobs == (- 1)): n_jobs = os.cpu_count() df1 = source_df.copy() df2 = target_df.copy() if (df1.shape[0] > df2.shape[1]): to_chunk = df1 df_full = df2 else: to_chunk = df2 df_full = df1 to_workers = _chunk_dfs(gpd.GeoSeries(to_chunk.geometry.values, crs=to_chunk.crs), gpd.GeoSeries(df_full.geometry.values, crs=df_full.crs), n_jobs) with parallel_backend('loky', inner_max_num_threads=1): worker_out = Parallel(n_jobs=n_jobs)((delayed(_index_n_query)(*chunk_pair) for chunk_pair in to_workers)) (ids_src, ids_tgt) = np.concatenate(worker_out).T chunks_to_intersection = _chunk_polys(np.vstack([ids_src, ids_tgt]).T, df1.geometry, df2.geometry, n_jobs) with parallel_backend('loky', inner_max_num_threads=1): worker_out = Parallel(n_jobs=n_jobs)((delayed(_intersect_area_on_chunk)(*chunk_pair) for chunk_pair in chunks_to_intersection)) areas = np.concatenate(worker_out) table = coo_matrix((areas, (ids_src, ids_tgt)), shape=(df1.shape[0], df2.shape[0]), dtype=np.float32) table = table.todok() return table
-2,854,547,330,361,908,000
Construct area allocation and source-target correspondence tables using a parallel spatial indexing approach ... NOTE: currently, the largest df is chunked and the other one is shipped in full to each core; within each process, the spatial index is built for the largest set of geometries, and the other one used for `query_bulk` Parameters ---------- source_df : geopandas.GeoDataFrame GeoDataFrame containing input data and polygons target_df : geopandas.GeoDataFramee GeoDataFrame defining the output geometries n_jobs : int [Optional. Default=-1] Number of processes to run in parallel. If -1, this is set to the number of CPUs available Returns ------- tables : scipy.sparse.dok_matrix
tobler/area_weighted/area_interpolate.py
_area_tables_binning_parallel
AnGWar26/tobler
python
def _area_tables_binning_parallel(source_df, target_df, n_jobs=(- 1)): 'Construct area allocation and source-target correspondence tables using\n a parallel spatial indexing approach\n ...\n\n NOTE: currently, the largest df is chunked and the other one is shipped in\n full to each core; within each process, the spatial index is built for the\n largest set of geometries, and the other one used for `query_bulk`\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n n_jobs : int\n [Optional. Default=-1] Number of processes to run in parallel. If -1,\n this is set to the number of CPUs available\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n ' from joblib import Parallel, delayed, parallel_backend if _check_crs(source_df, target_df): pass else: return None if (n_jobs == (- 1)): n_jobs = os.cpu_count() df1 = source_df.copy() df2 = target_df.copy() if (df1.shape[0] > df2.shape[1]): to_chunk = df1 df_full = df2 else: to_chunk = df2 df_full = df1 to_workers = _chunk_dfs(gpd.GeoSeries(to_chunk.geometry.values, crs=to_chunk.crs), gpd.GeoSeries(df_full.geometry.values, crs=df_full.crs), n_jobs) with parallel_backend('loky', inner_max_num_threads=1): worker_out = Parallel(n_jobs=n_jobs)((delayed(_index_n_query)(*chunk_pair) for chunk_pair in to_workers)) (ids_src, ids_tgt) = np.concatenate(worker_out).T chunks_to_intersection = _chunk_polys(np.vstack([ids_src, ids_tgt]).T, df1.geometry, df2.geometry, n_jobs) with parallel_backend('loky', inner_max_num_threads=1): worker_out = Parallel(n_jobs=n_jobs)((delayed(_intersect_area_on_chunk)(*chunk_pair) for chunk_pair in chunks_to_intersection)) areas = np.concatenate(worker_out) table = coo_matrix((areas, (ids_src, ids_tgt)), shape=(df1.shape[0], df2.shape[0]), dtype=np.float32) table = table.todok() return table
def _area_tables_binning(source_df, target_df, spatial_index): 'Construct area allocation and source-target correspondence tables using a spatial indexing approach\n ...\n\n NOTE: this currently relies on Geopandas\' spatial index machinery\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n spatial_index : str\n Spatial index to use to build the allocation of area from source to\n target tables. It currently support the following values:\n - "source": build the spatial index on `source_df`\n - "target": build the spatial index on `target_df`\n - "auto": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n ' if _check_crs(source_df, target_df): pass else: return None df1 = source_df.copy() df2 = target_df.copy() if (spatial_index == 'auto'): if (df1.shape[0] > df2.shape[0]): spatial_index = 'source' else: spatial_index = 'target' if (spatial_index == 'source'): (ids_tgt, ids_src) = df1.sindex.query_bulk(df2.geometry, predicate='intersects') elif (spatial_index == 'target'): (ids_src, ids_tgt) = df2.sindex.query_bulk(df1.geometry, predicate='intersects') else: raise ValueError(f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'.") areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area table = coo_matrix((areas, (ids_src, ids_tgt)), shape=(df1.shape[0], df2.shape[0]), dtype=np.float32) table = table.todok() return table
975,538,696,487,234,300
Construct area allocation and source-target correspondence tables using a spatial indexing approach ... NOTE: this currently relies on Geopandas' spatial index machinery Parameters ---------- source_df : geopandas.GeoDataFrame GeoDataFrame containing input data and polygons target_df : geopandas.GeoDataFramee GeoDataFrame defining the output geometries spatial_index : str Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. Returns ------- tables : scipy.sparse.dok_matrix
tobler/area_weighted/area_interpolate.py
_area_tables_binning
AnGWar26/tobler
python
def _area_tables_binning(source_df, target_df, spatial_index): 'Construct area allocation and source-target correspondence tables using a spatial indexing approach\n ...\n\n NOTE: this currently relies on Geopandas\' spatial index machinery\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n spatial_index : str\n Spatial index to use to build the allocation of area from source to\n target tables. It currently support the following values:\n - "source": build the spatial index on `source_df`\n - "target": build the spatial index on `target_df`\n - "auto": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n ' if _check_crs(source_df, target_df): pass else: return None df1 = source_df.copy() df2 = target_df.copy() if (spatial_index == 'auto'): if (df1.shape[0] > df2.shape[0]): spatial_index = 'source' else: spatial_index = 'target' if (spatial_index == 'source'): (ids_tgt, ids_src) = df1.sindex.query_bulk(df2.geometry, predicate='intersects') elif (spatial_index == 'target'): (ids_src, ids_tgt) = df2.sindex.query_bulk(df1.geometry, predicate='intersects') else: raise ValueError(f"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'.") areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area table = coo_matrix((areas, (ids_src, ids_tgt)), shape=(df1.shape[0], df2.shape[0]), dtype=np.float32) table = table.todok() return table
def _area_tables(source_df, target_df): '\n Construct area allocation and source-target correspondence tables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n\n Returns\n -------\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n ' if _check_crs(source_df, target_df): pass else: return None source_df = source_df.copy() source_df = source_df.copy() n_s = source_df.shape[0] n_t = target_df.shape[0] _left = np.arange(n_s) _right = np.arange(n_t) source_df.loc[:, '_left'] = _left target_df.loc[:, '_right'] = _right res_union = gpd.overlay(source_df, target_df, how='union') (n_u, _) = res_union.shape SU = np.zeros((n_s, n_u)) UT = np.zeros((n_u, n_t)) for (index, row) in res_union.iterrows(): if ((not np.isnan(row['_left'])) and (not np.isnan(row['_right']))): s_id = int(row['_left']) t_id = int(row['_right']) SU[(s_id, index)] = row[row.geometry.name].area UT[(index, t_id)] = 1 source_df.drop(['_left'], axis=1, inplace=True) target_df.drop(['_right'], axis=1, inplace=True) return (SU, UT)
5,719,887,546,585,006,000
Construct area allocation and source-target correspondence tables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame Returns ------- tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t Notes ----- The assumption is both dataframes have the same coordinate reference system. Union geometry is a geometry formed by the intersection of a source geometry and a target geometry SU Maps source geometry to union geometry, UT maps union geometry to target geometry
tobler/area_weighted/area_interpolate.py
_area_tables
AnGWar26/tobler
python
def _area_tables(source_df, target_df): '\n Construct area allocation and source-target correspondence tables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n\n Returns\n -------\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n ' if _check_crs(source_df, target_df): pass else: return None source_df = source_df.copy() source_df = source_df.copy() n_s = source_df.shape[0] n_t = target_df.shape[0] _left = np.arange(n_s) _right = np.arange(n_t) source_df.loc[:, '_left'] = _left target_df.loc[:, '_right'] = _right res_union = gpd.overlay(source_df, target_df, how='union') (n_u, _) = res_union.shape SU = np.zeros((n_s, n_u)) UT = np.zeros((n_u, n_t)) for (index, row) in res_union.iterrows(): if ((not np.isnan(row['_left'])) and (not np.isnan(row['_right']))): s_id = int(row['_left']) t_id = int(row['_right']) SU[(s_id, index)] = row[row.geometry.name].area UT[(index, t_id)] = 1 source_df.drop(['_left'], axis=1, inplace=True) target_df.drop(['_right'], axis=1, inplace=True) return (SU, UT)
def _area_interpolate_binning(source_df, target_df, extensive_variables=None, intensive_variables=None, table=None, allocate_total=True, spatial_index='auto', n_jobs=1, categorical_variables=None): '\n Area interpolation for extensive, intensive and categorical variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n extensive_variables : list\n [Optional. Default=None] Columns in dataframes for extensive variables\n intensive_variables : list\n [Optional. Default=None] Columns in dataframes for intensive variables\n table : scipy.sparse.dok_matrix\n [Optional. Default=None] Area allocation source-target correspondence\n table. If not provided, it will be built from `source_df` and\n `target_df` using `tobler.area_interpolate._area_tables_binning`\n allocate_total : boolean\n [Optional. Default=True] True if total value of source area should be\n allocated. False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is exhausted by\n intersections. See Notes for more details.\n spatial_index : str\n [Optional. Default="auto"] Spatial index to use to build the\n allocation of area from source to target tables. It currently support\n the following values:\n - "source": build the spatial index on `source_df`\n - "target": build the spatial index on `target_df`\n - "auto": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n This argument is ignored if n_jobs>1 (or n_jobs=-1).\n n_jobs : int\n [Optional. Default=1] Number of processes to run in parallel to\n generate the area allocation. If -1, this is set to the number of CPUs\n available. If `table` is passed, this is ignored.\n NOTE: as of Jan\'21 multi-core functionality requires master versions\n of `pygeos` and `geopandas`.\n categorical_variables : list\n [Optional. Default=None] Columns in dataframes for categorical variables\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{i,k}\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n For an intensive variable, the estimate at target polygon j is:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{k,j}\n\n For categorical variables, the estimate returns ratio of presence of each\n unique category.\n ' source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if (table is None): if (n_jobs == 1): table = _area_tables_binning(source_df, target_df, spatial_index) else: table = _area_tables_binning_parallel(source_df, target_df, n_jobs=n_jobs) den = source_df[source_df.geometry.name].area.values if allocate_total: den = np.asarray(table.sum(axis=1)) den = (den + (den == 0)) den = (1.0 / den) n = den.shape[0] den = den.reshape((n,)) den = diags([den], [0]) weights = den.dot(table) dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = diags([vals], [0]).dot(weights) estimates = estimates.sum(axis=0) extensive.append(estimates.tolist()[0]) extensive = np.asarray(extensive) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) area = np.asarray(table.sum(axis=0)) den = (1.0 / (area + (area == 0))) (n, k) = den.shape den = den.reshape((k,)) den = diags([den], [0]) weights = table.dot(den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) n = vals.shape[0] vals = vals.reshape((n,)) estimates = diags([vals], [0]) estimates = estimates.dot(weights).sum(axis=0) intensive.append(estimates.tolist()[0]) intensive = np.asarray(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if categorical_variables: categorical = {} for variable in categorical_variables: unique = source_df[variable].unique() for value in unique: mask = (source_df[variable] == value) categorical[f'{variable}_{value}'] = np.asarray(table[mask].sum(axis=0))[0] categorical = pd.DataFrame(categorical) categorical = categorical.div(target_df.area, axis='rows') if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) if categorical_variables: dfs.append(categorical) df = pd.concat(dfs, axis=1) df['geometry'] = target_df[target_df.geometry.name].reset_index(drop=True) df = gpd.GeoDataFrame(df.replace(np.inf, np.nan)) return df
-5,018,229,989,345,004,000
Area interpolation for extensive, intensive and categorical variables. Parameters ---------- source_df : geopandas.GeoDataFrame target_df : geopandas.GeoDataFrame extensive_variables : list [Optional. Default=None] Columns in dataframes for extensive variables intensive_variables : list [Optional. Default=None] Columns in dataframes for intensive variables table : scipy.sparse.dok_matrix [Optional. Default=None] Area allocation source-target correspondence table. If not provided, it will be built from `source_df` and `target_df` using `tobler.area_interpolate._area_tables_binning` allocate_total : boolean [Optional. Default=True] True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. spatial_index : str [Optional. Default="auto"] Spatial index to use to build the allocation of area from source to target tables. It currently support the following values: - "source": build the spatial index on `source_df` - "target": build the spatial index on `target_df` - "auto": attempts to guess the most efficient alternative. Currently, this option uses the largest table to build the index, and performs a `bulk_query` on the shorter table. This argument is ignored if n_jobs>1 (or n_jobs=-1). n_jobs : int [Optional. Default=1] Number of processes to run in parallel to generate the area allocation. If -1, this is set to the number of CPUs available. If `table` is passed, this is ignored. NOTE: as of Jan'21 multi-core functionality requires master versions of `pygeos` and `geopandas`. categorical_variables : list [Optional. Default=None] Columns in dataframes for categorical variables Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: .. math:: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: .. math:: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: .. math:: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{k,j} For categorical variables, the estimate returns ratio of presence of each unique category.
tobler/area_weighted/area_interpolate.py
_area_interpolate_binning
AnGWar26/tobler
python
def _area_interpolate_binning(source_df, target_df, extensive_variables=None, intensive_variables=None, table=None, allocate_total=True, spatial_index='auto', n_jobs=1, categorical_variables=None): '\n Area interpolation for extensive, intensive and categorical variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n extensive_variables : list\n [Optional. Default=None] Columns in dataframes for extensive variables\n intensive_variables : list\n [Optional. Default=None] Columns in dataframes for intensive variables\n table : scipy.sparse.dok_matrix\n [Optional. Default=None] Area allocation source-target correspondence\n table. If not provided, it will be built from `source_df` and\n `target_df` using `tobler.area_interpolate._area_tables_binning`\n allocate_total : boolean\n [Optional. Default=True] True if total value of source area should be\n allocated. False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is exhausted by\n intersections. See Notes for more details.\n spatial_index : str\n [Optional. Default="auto"] Spatial index to use to build the\n allocation of area from source to target tables. It currently support\n the following values:\n - "source": build the spatial index on `source_df`\n - "target": build the spatial index on `target_df`\n - "auto": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n This argument is ignored if n_jobs>1 (or n_jobs=-1).\n n_jobs : int\n [Optional. Default=1] Number of processes to run in parallel to\n generate the area allocation. If -1, this is set to the number of CPUs\n available. If `table` is passed, this is ignored.\n NOTE: as of Jan\'21 multi-core functionality requires master versions\n of `pygeos` and `geopandas`.\n categorical_variables : list\n [Optional. Default=None] Columns in dataframes for categorical variables\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{i,k}\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n For an intensive variable, the estimate at target polygon j is:\n\n .. math::\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{k,j}\n\n For categorical variables, the estimate returns ratio of presence of each\n unique category.\n ' source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if (table is None): if (n_jobs == 1): table = _area_tables_binning(source_df, target_df, spatial_index) else: table = _area_tables_binning_parallel(source_df, target_df, n_jobs=n_jobs) den = source_df[source_df.geometry.name].area.values if allocate_total: den = np.asarray(table.sum(axis=1)) den = (den + (den == 0)) den = (1.0 / den) n = den.shape[0] den = den.reshape((n,)) den = diags([den], [0]) weights = den.dot(table) dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = diags([vals], [0]).dot(weights) estimates = estimates.sum(axis=0) extensive.append(estimates.tolist()[0]) extensive = np.asarray(extensive) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) area = np.asarray(table.sum(axis=0)) den = (1.0 / (area + (area == 0))) (n, k) = den.shape den = den.reshape((k,)) den = diags([den], [0]) weights = table.dot(den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) n = vals.shape[0] vals = vals.reshape((n,)) estimates = diags([vals], [0]) estimates = estimates.dot(weights).sum(axis=0) intensive.append(estimates.tolist()[0]) intensive = np.asarray(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if categorical_variables: categorical = {} for variable in categorical_variables: unique = source_df[variable].unique() for value in unique: mask = (source_df[variable] == value) categorical[f'{variable}_{value}'] = np.asarray(table[mask].sum(axis=0))[0] categorical = pd.DataFrame(categorical) categorical = categorical.div(target_df.area, axis='rows') if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) if categorical_variables: dfs.append(categorical) df = pd.concat(dfs, axis=1) df['geometry'] = target_df[target_df.geometry.name].reset_index(drop=True) df = gpd.GeoDataFrame(df.replace(np.inf, np.nan)) return df
def _area_interpolate(source_df, target_df, extensive_variables=None, intensive_variables=None, tables=None, allocate_total=True): '\n Area interpolation for extensive and intensive variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n target_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n extensive_variables : list, (optional)\n columns in dataframes for extensive variables\n intensive_variables : list, (optional)\n columns in dataframes for intensive variables\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n allocate_total : boolean\n True if total value of source area should be allocated.\n False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is\n exhausted by intersections. See Notes for more details.\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{i,k}\n\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n\n\n For an intensive variable, the estimate at target polygon j is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{k,j}\n\n ' source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if (tables is None): (SU, UT) = _area_tables(source_df, target_df) else: (SU, UT) = tables den = source_df[source_df.geometry.name].area.values if allocate_total: den = SU.sum(axis=1) den = (den + (den == 0)) weights = np.dot(np.diag((1 / den)), SU) dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = np.dot(np.diag(vals), weights) estimates = np.dot(estimates, UT) estimates = estimates.sum(axis=0) extensive.append(estimates) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) ST = np.dot(SU, UT) area = ST.sum(axis=0) den = np.diag((1.0 / (area + (area == 0)))) weights = np.dot(ST, den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) vals.shape = (len(vals), 1) est = (vals * weights).sum(axis=0) intensive.append(est) intensive = np.array(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) df = pd.concat(dfs, axis=1) df['geometry'] = target_df[target_df.geometry.name].reset_index(drop=True) df = gpd.GeoDataFrame(df.replace(np.inf, np.nan)) return df
7,904,624,371,789,951,000
Area interpolation for extensive and intensive variables. Parameters ---------- source_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries target_df : geopandas.GeoDataFrame (required) geodataframe with polygon geometries extensive_variables : list, (optional) columns in dataframes for extensive variables intensive_variables : list, (optional) columns in dataframes for intensive variables tables : tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t allocate_total : boolean True if total value of source area should be allocated. False if denominator is area of i. Note that the two cases would be identical when the area of the source polygon is exhausted by intersections. See Notes for more details. Returns ------- estimates : geopandas.GeoDataFrame new geodaraframe with interpolated variables as columns and target_df geometry as output geometry Notes ----- The assumption is both dataframes have the same coordinate reference system. For an extensive variable, the estimate at target polygon j (default case) is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{i,k} If the area of the source polygon is not exhausted by intersections with target polygons and there is reason to not allocate the complete value of an extensive attribute, then setting allocate_total=False will use the following weights: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / a_i where a_i is the total area of source polygon i. For an intensive variable, the estimate at target polygon j is: v_j = \sum_i v_i w_{i,j} w_{i,j} = a_{i,j} / \sum_k a_{k,j}
tobler/area_weighted/area_interpolate.py
_area_interpolate
AnGWar26/tobler
python
def _area_interpolate(source_df, target_df, extensive_variables=None, intensive_variables=None, tables=None, allocate_total=True): '\n Area interpolation for extensive and intensive variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n target_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n extensive_variables : list, (optional)\n columns in dataframes for extensive variables\n intensive_variables : list, (optional)\n columns in dataframes for intensive variables\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n allocate_total : boolean\n True if total value of source area should be allocated.\n False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is\n exhausted by intersections. See Notes for more details.\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{i,k}\n\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n\n\n For an intensive variable, the estimate at target polygon j is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{k,j}\n\n ' source_df = source_df.copy() target_df = target_df.copy() if _check_crs(source_df, target_df): pass else: return None if (tables is None): (SU, UT) = _area_tables(source_df, target_df) else: (SU, UT) = tables den = source_df[source_df.geometry.name].area.values if allocate_total: den = SU.sum(axis=1) den = (den + (den == 0)) weights = np.dot(np.diag((1 / den)), SU) dfs = [] extensive = [] if extensive_variables: for variable in extensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) estimates = np.dot(np.diag(vals), weights) estimates = np.dot(estimates, UT) estimates = estimates.sum(axis=0) extensive.append(estimates) extensive = np.array(extensive) extensive = pd.DataFrame(extensive.T, columns=extensive_variables) ST = np.dot(SU, UT) area = ST.sum(axis=0) den = np.diag((1.0 / (area + (area == 0)))) weights = np.dot(ST, den) intensive = [] if intensive_variables: for variable in intensive_variables: vals = _nan_check(source_df, variable) vals = _inf_check(source_df, variable) vals.shape = (len(vals), 1) est = (vals * weights).sum(axis=0) intensive.append(est) intensive = np.array(intensive) intensive = pd.DataFrame(intensive.T, columns=intensive_variables) if extensive_variables: dfs.append(extensive) if intensive_variables: dfs.append(intensive) df = pd.concat(dfs, axis=1) df['geometry'] = target_df[target_df.geometry.name].reset_index(drop=True) df = gpd.GeoDataFrame(df.replace(np.inf, np.nan)) return df
def _area_tables_raster(source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True): "\n Construct area allocation and source-target correspondence tables according to a raster 'populated' areas\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n geeodataframe with geometry column of polygon type\n target_df : geopandas.GeoDataFrame\n geodataframe with geometry column of polygon type\n raster_path : str\n the path to the associated raster image.\n codes : list\n list of integer code values that should be considered as 'populated'.\n Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).\n The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html\n Only taken into consideration for harmonization raster based.\n force_crs_match : bool (default is True)\n Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.\n It is recommended to let this argument as True.\n\n Returns\n -------\n tables: tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n\n\n " if _check_crs(source_df, target_df): pass else: return None source_df = source_df.copy() target_df = target_df.copy() n_s = source_df.shape[0] n_t = target_df.shape[0] _left = np.arange(n_s) _right = np.arange(n_t) source_df.loc[:, '_left'] = _left target_df.loc[:, '_right'] = _right res_union_pre = gpd.overlay(source_df, target_df, how='union') warnings.warn('The CRS for the generated union will be set to be the same as source_df.') res_union_pre.crs = source_df.crs res_union = _fast_append_profile_in_gdf(res_union_pre, raster_path, force_crs_match=force_crs_match) str_codes = [str(i) for i in codes] str_list = [('Type_' + i) for i in str_codes] str_list_ok = [col for col in res_union.columns if (col in str_list)] res_union['Populated_Pixels'] = res_union[str_list_ok].sum(axis=1) (n_u, _) = res_union.shape SU = np.zeros((n_s, n_u)) UT = np.zeros((n_u, n_t)) for (index, row) in res_union.iterrows(): if ((not np.isnan(row['_left'])) and (not np.isnan(row['_right']))): s_id = int(row['_left']) t_id = int(row['_right']) SU[(s_id, index)] = row['Populated_Pixels'] UT[(index, t_id)] = 1 source_df.drop(['_left'], axis=1, inplace=True) target_df.drop(['_right'], axis=1, inplace=True) return (SU, UT)
8,537,103,575,296,378,000
Construct area allocation and source-target correspondence tables according to a raster 'populated' areas Parameters ---------- source_df : geopandas.GeoDataFrame geeodataframe with geometry column of polygon type target_df : geopandas.GeoDataFrame geodataframe with geometry column of polygon type raster_path : str the path to the associated raster image. codes : list list of integer code values that should be considered as 'populated'. Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity). The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html Only taken into consideration for harmonization raster based. force_crs_match : bool (default is True) Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file. It is recommended to let this argument as True. Returns ------- tables: tuple (optional) two 2-D numpy arrays SU: area of intersection of source geometry i with union geometry j UT: binary mapping of union geometry j to target geometry t Notes ----- The assumption is both dataframes have the same coordinate reference system. Union geometry is a geometry formed by the intersection of a source geometry and a target geometry SU Maps source geometry to union geometry, UT maps union geometry to target geometry
tobler/area_weighted/area_interpolate.py
_area_tables_raster
AnGWar26/tobler
python
def _area_tables_raster(source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True): "\n Construct area allocation and source-target correspondence tables according to a raster 'populated' areas\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n geeodataframe with geometry column of polygon type\n target_df : geopandas.GeoDataFrame\n geodataframe with geometry column of polygon type\n raster_path : str\n the path to the associated raster image.\n codes : list\n list of integer code values that should be considered as 'populated'.\n Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).\n The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html\n Only taken into consideration for harmonization raster based.\n force_crs_match : bool (default is True)\n Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.\n It is recommended to let this argument as True.\n\n Returns\n -------\n tables: tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n\n\n " if _check_crs(source_df, target_df): pass else: return None source_df = source_df.copy() target_df = target_df.copy() n_s = source_df.shape[0] n_t = target_df.shape[0] _left = np.arange(n_s) _right = np.arange(n_t) source_df.loc[:, '_left'] = _left target_df.loc[:, '_right'] = _right res_union_pre = gpd.overlay(source_df, target_df, how='union') warnings.warn('The CRS for the generated union will be set to be the same as source_df.') res_union_pre.crs = source_df.crs res_union = _fast_append_profile_in_gdf(res_union_pre, raster_path, force_crs_match=force_crs_match) str_codes = [str(i) for i in codes] str_list = [('Type_' + i) for i in str_codes] str_list_ok = [col for col in res_union.columns if (col in str_list)] res_union['Populated_Pixels'] = res_union[str_list_ok].sum(axis=1) (n_u, _) = res_union.shape SU = np.zeros((n_s, n_u)) UT = np.zeros((n_u, n_t)) for (index, row) in res_union.iterrows(): if ((not np.isnan(row['_left'])) and (not np.isnan(row['_right']))): s_id = int(row['_left']) t_id = int(row['_right']) SU[(s_id, index)] = row['Populated_Pixels'] UT[(index, t_id)] = 1 source_df.drop(['_left'], axis=1, inplace=True) target_df.drop(['_right'], axis=1, inplace=True) return (SU, UT)
def list(self, filter: Optional[str]=None, **kwargs: Any) -> AsyncIterable['_models.JobCollection']: 'Gets the list of jobs.\n\n Gets the list of Azure Site Recovery Jobs for the vault.\n\n :param filter: OData filter options.\n :type filter: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either JobCollection or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.JobCollection]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2021-06-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') if (filter is not None): query_parameters['$filter'] = self._serialize.query('filter', filter, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('JobCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
-7,047,998,206,408,130,000
Gets the list of jobs. Gets the list of Azure Site Recovery Jobs for the vault. :param filter: OData filter options. :type filter: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either JobCollection or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.JobCollection] :raises: ~azure.core.exceptions.HttpResponseError
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
list
AFengKK/azure-sdk-for-python
python
def list(self, filter: Optional[str]=None, **kwargs: Any) -> AsyncIterable['_models.JobCollection']: 'Gets the list of jobs.\n\n Gets the list of Azure Site Recovery Jobs for the vault.\n\n :param filter: OData filter options.\n :type filter: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either JobCollection or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.JobCollection]\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2021-06-01' accept = 'application/json' def prepare_request(next_link=None): header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') if (not next_link): url = self.list.metadata['url'] path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') if (filter is not None): query_parameters['$filter'] = self._serialize.query('filter', filter, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('JobCollection', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return ((deserialized.next_link or None), AsyncList(list_of_elem)) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged(get_next, extract_data)
async def get(self, job_name: str, **kwargs: Any) -> '_models.Job': 'Gets the job details.\n\n Get the details of an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: Job, or the result of cls(response)\n :rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Job\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2021-06-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
7,681,439,816,165,434,000
Gets the job details. Get the details of an Azure Site Recovery job. :param job_name: Job identifier. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Job, or the result of cls(response) :rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Job :raises: ~azure.core.exceptions.HttpResponseError
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
get
AFengKK/azure-sdk-for-python
python
async def get(self, job_name: str, **kwargs: Any) -> '_models.Job': 'Gets the job details.\n\n Get the details of an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: Job, or the result of cls(response)\n :rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Job\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2021-06-01' accept = 'application/json' url = self.get.metadata['url'] path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
async def begin_cancel(self, job_name: str, **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Cancels the specified job.\n\n The operation to cancel an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._cancel_initial(job_name=job_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
978,010,739,571,960,300
Cancels the specified job. The operation to cancel an Azure Site Recovery job. :param job_name: Job identifier. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job] :raises ~azure.core.exceptions.HttpResponseError:
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
begin_cancel
AFengKK/azure-sdk-for-python
python
async def begin_cancel(self, job_name: str, **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Cancels the specified job.\n\n The operation to cancel an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._cancel_initial(job_name=job_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_restart(self, job_name: str, **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Restarts the specified job.\n\n The operation to restart an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._restart_initial(job_name=job_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
-4,034,072,986,809,026,000
Restarts the specified job. The operation to restart an Azure Site Recovery job. :param job_name: Job identifier. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job] :raises ~azure.core.exceptions.HttpResponseError:
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
begin_restart
AFengKK/azure-sdk-for-python
python
async def begin_restart(self, job_name: str, **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Restarts the specified job.\n\n The operation to restart an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._restart_initial(job_name=job_name, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_resume(self, job_name: str, resume_job_params: '_models.ResumeJobParams', **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Resumes the specified job.\n\n The operation to resume an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :param resume_job_params: Resume rob comments.\n :type resume_job_params: ~azure.mgmt.recoveryservicessiterecovery.models.ResumeJobParams\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._resume_initial(job_name=job_name, resume_job_params=resume_job_params, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
7,726,238,360,745,979,000
Resumes the specified job. The operation to resume an Azure Site Recovery job. :param job_name: Job identifier. :type job_name: str :param resume_job_params: Resume rob comments. :type resume_job_params: ~azure.mgmt.recoveryservicessiterecovery.models.ResumeJobParams :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job] :raises ~azure.core.exceptions.HttpResponseError:
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
begin_resume
AFengKK/azure-sdk-for-python
python
async def begin_resume(self, job_name: str, resume_job_params: '_models.ResumeJobParams', **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Resumes the specified job.\n\n The operation to resume an Azure Site Recovery job.\n\n :param job_name: Job identifier.\n :type job_name: str\n :param resume_job_params: Resume rob comments.\n :type resume_job_params: ~azure.mgmt.recoveryservicessiterecovery.models.ResumeJobParams\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._resume_initial(job_name=job_name, resume_job_params=resume_job_params, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str'), 'jobName': self._serialize.url('job_name', job_name, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def begin_export(self, job_query_parameter: '_models.JobQueryParameter', **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Exports the details of the Azure Site Recovery jobs of the vault.\n\n The operation to export the details of the Azure Site Recovery jobs of the vault.\n\n :param job_query_parameter: job query filter.\n :type job_query_parameter: ~azure.mgmt.recoveryservicessiterecovery.models.JobQueryParameter\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._export_initial(job_query_parameter=job_query_parameter, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
4,578,152,469,486,086,000
Exports the details of the Azure Site Recovery jobs of the vault. The operation to export the details of the Azure Site Recovery jobs of the vault. :param job_query_parameter: job query filter. :type job_query_parameter: ~azure.mgmt.recoveryservicessiterecovery.models.JobQueryParameter :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job] :raises ~azure.core.exceptions.HttpResponseError:
sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_jobs_operations.py
begin_export
AFengKK/azure-sdk-for-python
python
async def begin_export(self, job_query_parameter: '_models.JobQueryParameter', **kwargs: Any) -> AsyncLROPoller['_models.Job']: 'Exports the details of the Azure Site Recovery jobs of the vault.\n\n The operation to export the details of the Azure Site Recovery jobs of the vault.\n\n :param job_query_parameter: job query filter.\n :type job_query_parameter: ~azure.mgmt.recoveryservicessiterecovery.models.JobQueryParameter\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: By default, your polling method will be AsyncARMPolling.\n Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either Job or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.recoveryservicessiterecovery.models.Job]\n :raises ~azure.core.exceptions.HttpResponseError:\n ' polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop('polling_interval', self._config.polling_interval) cont_token = kwargs.pop('continuation_token', None) if (cont_token is None): raw_result = (await self._export_initial(job_query_parameter=job_query_parameter, cls=(lambda x, y, z: x), **kwargs)) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('Job', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = {'resourceName': self._serialize.url('self._config.resource_name', self._config.resource_name, 'str'), 'resourceGroupName': self._serialize.url('self._config.resource_group_name', self._config.resource_group_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} if (polling is True): polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif (polling is False): polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
def build_model(layers, model=None, input_dim=None): "\n Build and return a Sequential model with Dense layers given by the layers argument.\n\n Arguments\n model (keras.Sequential) model to which layers will be added\n input_dim (int) dimension of input\n layers (tuple) sequence of 2-ples, one per layer, such as ((64, 'relu'), (64, 'relu'), (1, 'sigmoid'))\n\n Return\n model_name (str) a name for the model\n model (Model) a compiled model\n " if (model is None): model = Sequential() model_name = io.StringIO() (layer_type, kwargs) = layers[0] if (input_dim is None): pass else: kwargs['input_dim'] = input_dim for (layer_type, kwargs) in layers: layer = build_layer(model_name, layer_type, kwargs) model.add(layer) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return (model_name.getvalue()[1:], model)
-200,597,198,694,877,470
Build and return a Sequential model with Dense layers given by the layers argument. Arguments model (keras.Sequential) model to which layers will be added input_dim (int) dimension of input layers (tuple) sequence of 2-ples, one per layer, such as ((64, 'relu'), (64, 'relu'), (1, 'sigmoid')) Return model_name (str) a name for the model model (Model) a compiled model
vl/model/training.py
build_model
hurwitzlab/viral-learning
python
def build_model(layers, model=None, input_dim=None): "\n Build and return a Sequential model with Dense layers given by the layers argument.\n\n Arguments\n model (keras.Sequential) model to which layers will be added\n input_dim (int) dimension of input\n layers (tuple) sequence of 2-ples, one per layer, such as ((64, 'relu'), (64, 'relu'), (1, 'sigmoid'))\n\n Return\n model_name (str) a name for the model\n model (Model) a compiled model\n " if (model is None): model = Sequential() model_name = io.StringIO() (layer_type, kwargs) = layers[0] if (input_dim is None): pass else: kwargs['input_dim'] = input_dim for (layer_type, kwargs) in layers: layer = build_layer(model_name, layer_type, kwargs) model.add(layer) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return (model_name.getvalue()[1:], model)
def parse_byteOrder(byteOrder): 'convert byteOrder to enum' if ((byteOrder is None) or (byteOrder == '')): return None value = STRING_ENUM_MAP.get(byteOrder) if (value is None): raise ValueError(f'invalid byteOrder {repr(value)},expected one of {{SBE_STRING_ENUM_MAP.keys()}}') return value
-3,941,613,680,548,913,000
convert byteOrder to enum
pysbe/parser/fix_parser.py
parse_byteOrder
bkc/pysbe
python
def parse_byteOrder(byteOrder): if ((byteOrder is None) or (byteOrder == )): return None value = STRING_ENUM_MAP.get(byteOrder) if (value is None): raise ValueError(f'invalid byteOrder {repr(value)},expected one of {{SBE_STRING_ENUM_MAP.keys()}}') return value
def parse_version(version): 'convert version to int' if (version is None): raise ValueError('sbe:messageSchema/@version is required') return int(version)
311,950,630,180,861,000
convert version to int
pysbe/parser/fix_parser.py
parse_version
bkc/pysbe
python
def parse_version(version): if (version is None): raise ValueError('sbe:messageSchema/@version is required') return int(version)
def parse_optionalString(value): 'parse an optional string' if (not value): return None return value
-9,117,855,241,927,928,000
parse an optional string
pysbe/parser/fix_parser.py
parse_optionalString
bkc/pysbe
python
def parse_optionalString(value): if (not value): return None return value
def parse_common_attributes(self, element, attributes): 'parse and return dict of common attributes' result_attributes = {} for attribute in attributes: attrib_info = ALL_ATTRIBUTES_MAP[attribute] if (attrib_info.get('default', MISSING) is not MISSING): default_value = attrib_info['default'] else: default_value = MISSING attribute_name = attrib_info.get('attribute_name', attribute) value = element.attrib.get(attribute_name, default_value) if ((value is MISSING) or (value == '')): if (attrib_info.get('use') == 'optional'): continue else: raise ValueError(f'element {element.tag} missing required attribute {attribute_name}') if attrib_info.get('type'): try: value = attrib_info['type'](value) except ValueError as exc: raise ValueError(f'element {element.tag} invalid value {repr(value)} for attribute {attribute_name}') from exc if attrib_info.get('minimumValue'): if (value < attrib_info['minimumValue']): raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name},less than allowed minimum {repr(attrib_info['minimumValue'])}") if attrib_info.get('pattern'): if (not attrib_info['pattern'].match(value)): raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name},does not match expected pattern {repr(attrib_info['pattern'])}") if attrib_info.get('map'): try: value = attrib_info['map'][value] except (KeyError, IndexError) as exc: raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name}, must be one of {repr(attrib_info['map'].keys())}") from exc if attrib_info.get('rename'): attribute = attrib_info['rename'] result_attributes[attribute] = value return result_attributes
6,420,430,953,369,956,000
parse and return dict of common attributes
pysbe/parser/fix_parser.py
parse_common_attributes
bkc/pysbe
python
def parse_common_attributes(self, element, attributes): result_attributes = {} for attribute in attributes: attrib_info = ALL_ATTRIBUTES_MAP[attribute] if (attrib_info.get('default', MISSING) is not MISSING): default_value = attrib_info['default'] else: default_value = MISSING attribute_name = attrib_info.get('attribute_name', attribute) value = element.attrib.get(attribute_name, default_value) if ((value is MISSING) or (value == )): if (attrib_info.get('use') == 'optional'): continue else: raise ValueError(f'element {element.tag} missing required attribute {attribute_name}') if attrib_info.get('type'): try: value = attrib_info['type'](value) except ValueError as exc: raise ValueError(f'element {element.tag} invalid value {repr(value)} for attribute {attribute_name}') from exc if attrib_info.get('minimumValue'): if (value < attrib_info['minimumValue']): raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name},less than allowed minimum {repr(attrib_info['minimumValue'])}") if attrib_info.get('pattern'): if (not attrib_info['pattern'].match(value)): raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name},does not match expected pattern {repr(attrib_info['pattern'])}") if attrib_info.get('map'): try: value = attrib_info['map'][value] except (KeyError, IndexError) as exc: raise ValueError(f"element {element.tag} invalid value {repr(value)} for attribute {attribute_name}, must be one of {repr(attrib_info['map'].keys())}") from exc if attrib_info.get('rename'): attribute = attrib_info['rename'] result_attributes[attribute] = value return result_attributes
def parseFile(self, file_or_object): 'parse a file' root = etree.parse(file_or_object) element_name = ('{%s}messageSchema' % SBE_NS) messageSchema_element = root.getroot() if (messageSchema_element.tag != element_name): raise ValueError(f'root element is not sbe:messageSchema, found {{repr(messageSchema_element)}} instead') return self.processSchema(messageSchema_element)
-9,178,720,531,024,859,000
parse a file
pysbe/parser/fix_parser.py
parseFile
bkc/pysbe
python
def parseFile(self, file_or_object): root = etree.parse(file_or_object) element_name = ('{%s}messageSchema' % SBE_NS) messageSchema_element = root.getroot() if (messageSchema_element.tag != element_name): raise ValueError(f'root element is not sbe:messageSchema, found {{repr(messageSchema_element)}} instead') return self.processSchema(messageSchema_element)
def processSchema(self, messageSchema_element): 'process xml elements beginning with root messageSchema_element' attrib = messageSchema_element.attrib version = parse_version(attrib.get('version')) byteOrder = parse_byteOrder((attrib.get('byteOrder') or 'littleEndian')) package = parse_optionalString(attrib.get('package')) semanticVersion = parse_optionalString(attrib.get('semanticVersion')) description = parse_optionalString(attrib.get('description')) headerType = parse_optionalString((attrib.get('headerType') or 'messageHeader')) messageSchema = createMessageSchema(version=version, byteOrder=byteOrder, package=package, semanticVersion=semanticVersion, description=description, headerType=headerType) types_elements = messageSchema_element.findall('types') types_parser = TypesParser() for element in types_elements: types_parser.parse_types(messageSchema, element) message_elements = messageSchema_element.findall('sbe:message', namespaces=self.NS) message_parser = MessageParser() for element in message_elements: message_parser.parse_message(messageSchema, element) return messageSchema
5,891,823,265,714,278,000
process xml elements beginning with root messageSchema_element
pysbe/parser/fix_parser.py
processSchema
bkc/pysbe
python
def processSchema(self, messageSchema_element): attrib = messageSchema_element.attrib version = parse_version(attrib.get('version')) byteOrder = parse_byteOrder((attrib.get('byteOrder') or 'littleEndian')) package = parse_optionalString(attrib.get('package')) semanticVersion = parse_optionalString(attrib.get('semanticVersion')) description = parse_optionalString(attrib.get('description')) headerType = parse_optionalString((attrib.get('headerType') or 'messageHeader')) messageSchema = createMessageSchema(version=version, byteOrder=byteOrder, package=package, semanticVersion=semanticVersion, description=description, headerType=headerType) types_elements = messageSchema_element.findall('types') types_parser = TypesParser() for element in types_elements: types_parser.parse_types(messageSchema, element) message_elements = messageSchema_element.findall('sbe:message', namespaces=self.NS) message_parser = MessageParser() for element in message_elements: message_parser.parse_message(messageSchema, element) return messageSchema
def parse_types(self, messageSchema, element): 'parse type, can be repeated' for child_element in element: if (child_element.tag not in self.VALID_TYPES_ELEMENTS): raise ValueError(f'invalid types child element {repr(child_element.tag)}') parser = getattr(self, f'parse_types_{child_element.tag}', None) if (not parser): raise RuntimeError(f'unsupported types parser {repr(child_element.tag)}') parser(messageSchema, child_element)
932,864,072,484,365,200
parse type, can be repeated
pysbe/parser/fix_parser.py
parse_types
bkc/pysbe
python
def parse_types(self, messageSchema, element): for child_element in element: if (child_element.tag not in self.VALID_TYPES_ELEMENTS): raise ValueError(f'invalid types child element {repr(child_element.tag)}') parser = getattr(self, f'parse_types_{child_element.tag}', None) if (not parser): raise RuntimeError(f'unsupported types parser {repr(child_element.tag)}') parser(messageSchema, child_element)
def parse_types_type(self, parent: TypeCollection, element): 'parse types/type' attributes = self.parse_common_attributes(element, attributes=TYPE_ATTRIBUTES_LIST) sbe_type = createType(**attributes) parent.addType(sbe_type)
-1,194,256,432,256,632,800
parse types/type
pysbe/parser/fix_parser.py
parse_types_type
bkc/pysbe
python
def parse_types_type(self, parent: TypeCollection, element): attributes = self.parse_common_attributes(element, attributes=TYPE_ATTRIBUTES_LIST) sbe_type = createType(**attributes) parent.addType(sbe_type)
def parse_types_ref(self, parent: TypeCollection, element): 'parse composite / ref' attributes = self.parse_common_attributes(element, attributes=REF_ATTRIBUTES_LIST) sbe_ref = createRef(**attributes) reference_type = parent.lookupName(sbe_ref.type) if (not reference_type): raise UnknownReference(f'composite {parent.name} ref {sbe_ref.name} references unknown encodingType {sbe_ref.type}') parent.addType(sbe_ref)
-8,382,981,341,107,380,000
parse composite / ref
pysbe/parser/fix_parser.py
parse_types_ref
bkc/pysbe
python
def parse_types_ref(self, parent: TypeCollection, element): attributes = self.parse_common_attributes(element, attributes=REF_ATTRIBUTES_LIST) sbe_ref = createRef(**attributes) reference_type = parent.lookupName(sbe_ref.type) if (not reference_type): raise UnknownReference(f'composite {parent.name} ref {sbe_ref.name} references unknown encodingType {sbe_ref.type}') parent.addType(sbe_ref)
def parse_types_composite(self, parent: TypeCollection, element): 'parse types/composite' attributes = self.parse_common_attributes(element, attributes=COMPOSITE_ATTRIBUTES_LIST) sbe_composite = createComposite(**attributes) parent.addType(sbe_composite) for child_element in element: tag = child_element.tag if (tag not in VALID_COMPOSITE_CHILD_ELEMENTS): raise ValueError(f'invalid child element {repr(tag)} in composite element {repr(sbe_composite.name)}') parser = getattr(self, f'parse_types_{tag}', None) if (not parser): raise RuntimeError(f'unsupported types parser {repr(child_element.tag)}') parser(sbe_composite, child_element)
8,407,362,604,885,788,000
parse types/composite
pysbe/parser/fix_parser.py
parse_types_composite
bkc/pysbe
python
def parse_types_composite(self, parent: TypeCollection, element): attributes = self.parse_common_attributes(element, attributes=COMPOSITE_ATTRIBUTES_LIST) sbe_composite = createComposite(**attributes) parent.addType(sbe_composite) for child_element in element: tag = child_element.tag if (tag not in VALID_COMPOSITE_CHILD_ELEMENTS): raise ValueError(f'invalid child element {repr(tag)} in composite element {repr(sbe_composite.name)}') parser = getattr(self, f'parse_types_{tag}', None) if (not parser): raise RuntimeError(f'unsupported types parser {repr(child_element.tag)}') parser(sbe_composite, child_element)
def parse_types_set(self, parent: TypeCollection, element): 'parse types/set' attributes = self.parse_common_attributes(element, attributes=SET_ATTRIBUTES_LIST) sbe_set = createSet(**attributes) parent.addType(sbe_set) for child_element in element.findall('choice'): choice = self.parse_set_choice(sbe_set=sbe_set, element=child_element) sbe_set.addChoice(choice)
3,417,518,372,095,596,000
parse types/set
pysbe/parser/fix_parser.py
parse_types_set
bkc/pysbe
python
def parse_types_set(self, parent: TypeCollection, element): attributes = self.parse_common_attributes(element, attributes=SET_ATTRIBUTES_LIST) sbe_set = createSet(**attributes) parent.addType(sbe_set) for child_element in element.findall('choice'): choice = self.parse_set_choice(sbe_set=sbe_set, element=child_element) sbe_set.addChoice(choice)
def parse_set_choice(self, sbe_set, element): 'parse and return an enum validvalue' attributes = self.parse_common_attributes(element, attributes=SET_CHOICE_ATTRIBUTES_LIST) value = element.text try: value = int(element.text) except ValueError as exc: raise ValueError(f"invalid value for set {sbe_set.name} choice {attributes.get('name')}") from exc choice = createChoice(value=value, **attributes) return choice
-7,322,725,966,345,624,000
parse and return an enum validvalue
pysbe/parser/fix_parser.py
parse_set_choice
bkc/pysbe
python
def parse_set_choice(self, sbe_set, element): attributes = self.parse_common_attributes(element, attributes=SET_CHOICE_ATTRIBUTES_LIST) value = element.text try: value = int(element.text) except ValueError as exc: raise ValueError(f"invalid value for set {sbe_set.name} choice {attributes.get('name')}") from exc choice = createChoice(value=value, **attributes) return choice
def parse_types_enum(self, parent: TypeCollection, element): 'parse types/enum' attributes = self.parse_common_attributes(element, attributes=ENUM_ATTRIBUTES_LIST) sbe_enum = createEnum(**attributes) parent.addType(sbe_enum) for child_element in element.findall('validValue'): valid_value = self.parse_enum_valid_value(sbe_enum=sbe_enum, element=child_element) sbe_enum.addValidValue(valid_value)
-8,241,963,529,179,988,000
parse types/enum
pysbe/parser/fix_parser.py
parse_types_enum
bkc/pysbe
python
def parse_types_enum(self, parent: TypeCollection, element): attributes = self.parse_common_attributes(element, attributes=ENUM_ATTRIBUTES_LIST) sbe_enum = createEnum(**attributes) parent.addType(sbe_enum) for child_element in element.findall('validValue'): valid_value = self.parse_enum_valid_value(sbe_enum=sbe_enum, element=child_element) sbe_enum.addValidValue(valid_value)
def parse_enum_valid_value(self, sbe_enum, element): 'parse and return an enum validvalue' attributes = self.parse_common_attributes(element, attributes=ENUM_VALID_VALUES_ATTRIBUTES_LIST) value = element.text enum_valid_value = createValidValue(value=value, **attributes) return enum_valid_value
-1,096,921,606,398,139,300
parse and return an enum validvalue
pysbe/parser/fix_parser.py
parse_enum_valid_value
bkc/pysbe
python
def parse_enum_valid_value(self, sbe_enum, element): attributes = self.parse_common_attributes(element, attributes=ENUM_VALID_VALUES_ATTRIBUTES_LIST) value = element.text enum_valid_value = createValidValue(value=value, **attributes) return enum_valid_value
def parse_message(self, messageSchema, element): 'parse message, can be repeated' attributes = self.parse_common_attributes(element, attributes=MESSAGE_ATTRIBUTES_LIST) message = createMessage(**attributes) messageSchema.addMessage(message) self.parse_field_children(messageSchema, message, element)
-6,473,633,594,660,156,000
parse message, can be repeated
pysbe/parser/fix_parser.py
parse_message
bkc/pysbe
python
def parse_message(self, messageSchema, element): attributes = self.parse_common_attributes(element, attributes=MESSAGE_ATTRIBUTES_LIST) message = createMessage(**attributes) messageSchema.addMessage(message) self.parse_field_children(messageSchema, message, element)
def parse_field_children(self, messageSchema, parent: FieldCollection, element): 'parse child elements that fit in a fieldCollection' for child_element in element: if (child_element.tag not in self.VALID_MESSAGE_TYPES): raise ValueError(f'invalid message/group child element {repr(child_element.tag)}') parser = getattr(self, f'parse_message_{child_element.tag}', None) if (not parser): raise RuntimeError(f'unsupported message parser {repr(child_element.tag)}') parser(messageSchema, parent, child_element)
-4,653,781,715,196,204,000
parse child elements that fit in a fieldCollection
pysbe/parser/fix_parser.py
parse_field_children
bkc/pysbe
python
def parse_field_children(self, messageSchema, parent: FieldCollection, element): for child_element in element: if (child_element.tag not in self.VALID_MESSAGE_TYPES): raise ValueError(f'invalid message/group child element {repr(child_element.tag)}') parser = getattr(self, f'parse_message_{child_element.tag}', None) if (not parser): raise RuntimeError(f'unsupported message parser {repr(child_element.tag)}') parser(messageSchema, parent, child_element)
def parse_message_field(self, messageSchema, parent: FieldCollection, element) -> None: 'parse field Type' attributes = self.parse_common_attributes(element, attributes=FIELD_ATTRIBUTES_LIST) field = createField(**attributes) field.validate(messageSchema) parent.addField(field)
-1,781,270,552,327,019,000
parse field Type
pysbe/parser/fix_parser.py
parse_message_field
bkc/pysbe
python
def parse_message_field(self, messageSchema, parent: FieldCollection, element) -> None: attributes = self.parse_common_attributes(element, attributes=FIELD_ATTRIBUTES_LIST) field = createField(**attributes) field.validate(messageSchema) parent.addField(field)
def parse_message_group(self, messageSchema, parent: FieldCollection, element) -> None: 'parse field Type' attributes = self.parse_common_attributes(element, attributes=GROUP_ATTRIBUTES_LIST) group = createGroup(**attributes) group.validate(messageSchema) parent.addField(group) self.parse_field_children(messageSchema, group, element)
-5,951,012,801,918,320,000
parse field Type
pysbe/parser/fix_parser.py
parse_message_group
bkc/pysbe
python
def parse_message_group(self, messageSchema, parent: FieldCollection, element) -> None: attributes = self.parse_common_attributes(element, attributes=GROUP_ATTRIBUTES_LIST) group = createGroup(**attributes) group.validate(messageSchema) parent.addField(group) self.parse_field_children(messageSchema, group, element)
def parse_duration(time_str, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX))): 'Parse a time string e.g. (2h13m) into a timedelta object\n https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string\n\n Arguments:\n - time_str: A string identifying a duration. Use\n - d: days\n - h: hours\n - m: minutes\n - s: seconds\n All options are optional but at least one needs to be supplied. Float\n values are allowed (e.g. "1.5d" is the same as "1d12h"). Spaces\n between each field is allowed. Examples:\n - 1h 30m 45s\n - 1h05s\n - 55h 59m 12s\n - log: optional, logger object for logging a warning if the passed in\n string is not parsable. A "time_utils" logger will be used if not\n supplied.\n\n Returns:\n A ``datetime.timedelta`` object representing the supplied time duration\n or ``None`` if ``time_str`` cannot be parsed.\n ' parts = duration_regex.match(time_str) if (parts is None): log.warn("Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m 4s'".format(time_str)) return None else: time_params = {name: float(param) for (name, param) in parts.groupdict().items() if param} return timedelta(**time_params)
-4,127,285,100,476,708,400
Parse a time string e.g. (2h13m) into a timedelta object https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string Arguments: - time_str: A string identifying a duration. Use - d: days - h: hours - m: minutes - s: seconds All options are optional but at least one needs to be supplied. Float values are allowed (e.g. "1.5d" is the same as "1d12h"). Spaces between each field is allowed. Examples: - 1h 30m 45s - 1h05s - 55h 59m 12s - log: optional, logger object for logging a warning if the passed in string is not parsable. A "time_utils" logger will be used if not supplied. Returns: A ``datetime.timedelta`` object representing the supplied time duration or ``None`` if ``time_str`` cannot be parsed.
time_utils/automation/lib/python/community/time_utils.py
parse_duration
cherub-i/openhab-rules-tools
python
def parse_duration(time_str, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX))): 'Parse a time string e.g. (2h13m) into a timedelta object\n https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string\n\n Arguments:\n - time_str: A string identifying a duration. Use\n - d: days\n - h: hours\n - m: minutes\n - s: seconds\n All options are optional but at least one needs to be supplied. Float\n values are allowed (e.g. "1.5d" is the same as "1d12h"). Spaces\n between each field is allowed. Examples:\n - 1h 30m 45s\n - 1h05s\n - 55h 59m 12s\n - log: optional, logger object for logging a warning if the passed in\n string is not parsable. A "time_utils" logger will be used if not\n supplied.\n\n Returns:\n A ``datetime.timedelta`` object representing the supplied time duration\n or ``None`` if ``time_str`` cannot be parsed.\n ' parts = duration_regex.match(time_str) if (parts is None): log.warn("Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m 4s'".format(time_str)) return None else: time_params = {name: float(param) for (name, param) in parts.groupdict().items() if param} return timedelta(**time_params)
def delta_to_datetime(td): 'Takes a Python timedelta Object and converts it to a ZonedDateTime from now.\n\n Arguments:\n - td: The Python datetime.timedelta Object\n\n Returns:\n A ZonedDateTime td from now.\n ' return ZonedDateTime.now().plusDays(td.days).plusSeconds(td.seconds).plusNanos(((td.microseconds // 1000) * 1000000))
-1,148,001,927,989,460,700
Takes a Python timedelta Object and converts it to a ZonedDateTime from now. Arguments: - td: The Python datetime.timedelta Object Returns: A ZonedDateTime td from now.
time_utils/automation/lib/python/community/time_utils.py
delta_to_datetime
cherub-i/openhab-rules-tools
python
def delta_to_datetime(td): 'Takes a Python timedelta Object and converts it to a ZonedDateTime from now.\n\n Arguments:\n - td: The Python datetime.timedelta Object\n\n Returns:\n A ZonedDateTime td from now.\n ' return ZonedDateTime.now().plusDays(td.days).plusSeconds(td.seconds).plusNanos(((td.microseconds // 1000) * 1000000))
def parse_duration_to_datetime(time_str, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX))): 'Parses the passed in time string (see parse_duration) and returns a\n ZonedDateTime that amount of time from now.\n\n Arguments:\n - time_str: A string identifying a duration. See parse_duration above\n\n Returns:\n A ZonedDateTime time_str from now\n ' return delta_to_datetime(parse_duration(time_str, log))
2,824,344,283,550,933,000
Parses the passed in time string (see parse_duration) and returns a ZonedDateTime that amount of time from now. Arguments: - time_str: A string identifying a duration. See parse_duration above Returns: A ZonedDateTime time_str from now
time_utils/automation/lib/python/community/time_utils.py
parse_duration_to_datetime
cherub-i/openhab-rules-tools
python
def parse_duration_to_datetime(time_str, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX))): 'Parses the passed in time string (see parse_duration) and returns a\n ZonedDateTime that amount of time from now.\n\n Arguments:\n - time_str: A string identifying a duration. See parse_duration above\n\n Returns:\n A ZonedDateTime time_str from now\n ' return delta_to_datetime(parse_duration(time_str, log))
def is_iso8601(dt_str): 'Returns True if dt_str conforms to ISO 8601\n Arguments:\n - dt_str: the String to check\n Returns:\n True if dt_str conforms to dt_str and False otherwise\n ' try: if (iso8601_regex.match(dt_str) is not None): return True except: pass return False
6,834,307,859,551,796,000
Returns True if dt_str conforms to ISO 8601 Arguments: - dt_str: the String to check Returns: True if dt_str conforms to dt_str and False otherwise
time_utils/automation/lib/python/community/time_utils.py
is_iso8601
cherub-i/openhab-rules-tools
python
def is_iso8601(dt_str): 'Returns True if dt_str conforms to ISO 8601\n Arguments:\n - dt_str: the String to check\n Returns:\n True if dt_str conforms to dt_str and False otherwise\n ' try: if (iso8601_regex.match(dt_str) is not None): return True except: pass return False
def to_datetime(when, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX)), output='Java'): "Based on what type when is, converts when to a Python DateTime object.\n Type:\n - int: returns now.plusMillis(when)\n - openHAB number type: returns now.plusMillis(when.intValue())\n - ISO8601 string: DateTime(when)\n - Duration definition: see parse_duration_to_datetime\n - java ZonedDateTime\n For python make sure the datetime object is not assigned to a variable when this function is called)\n otherwise a java.time.sql object will be returned due to a bug in Jython\n - Python datetime\n - Python time: returns DateTime with today date and system timezone\n\n Arguments:\n - when: the Object to convert to a DateTime\n - log: optional logger, when not supplied one is created for logging errors\n - output: object returned as a string. If not specified returns a ZonedDateTime object\n 'Python': return datetime object\n 'Java': return a ZonedDateTime object\n\n Returns:\n - ZonedDateTime specified by when\n - datetime specified by when if output = 'Python'\n - ZonedDateTime specified by when if output = 'Java'\n " log.debug(((('when is: ' + str(when)) + ' output is ') + str(output))) dt_python = None dt_java = None try: if isinstance(when, (str, unicode)): if is_iso8601(when): log.debug(('when is iso8601: ' + str(when))) dt_java = ZonedDateTime.parse(str(when)) else: log.debug(('when is duration: ' + str(when))) dt_python = (datetime.now() + parse_duration(when, log)) elif isinstance(when, int): log.debug(('when is int: ' + str(when))) dt_java = ZonedDateTime.now().plus(when, ChronoUnit.MILLIS) elif isinstance(when, scope.DateTimeType): log.debug(('when is DateTimeType: ' + str(when))) dt_java = when.getZonedDateTime() elif isinstance(when, (scope.DecimalType, scope.PercentType, scope.QuantityType)): log.debug(('when is decimal, percent or quantity type: ' + str(when))) dt_python = (datetime.now() + timedelta(milliseconds=when.intValue())) elif isinstance(when, datetime): log.debug(('when is datetime: ' + str(when))) dt_python = when elif isinstance(when, ZonedDateTime): log.debug(('when is ZonedDateTime: ' + str(when))) dt_java = when elif isinstance(when, time): log.debug(('when is python time object: ' + str(when))) dt_java = ZonedDateTime.now().withHour(when.hour).withMinute(when.minute).withSecond(when.second).withNano((when.microsecond * 1000)) else: log.warn('When is an unknown type {}'.format(type(when))) return None except: log.error('Exception: {}'.format(traceback.format_exc())) if (output == 'Python'): log.debug('returning dt python') return (dt_python if (dt_python is not None) else to_python_datetime(dt_java)) elif (output == 'Java'): log.debug('returning dt java') return (dt_java if (dt_java is not None) else to_java_zoneddatetime(dt_python)) elif (output == 'Joda'): log.error("to_datetime trying to return dt joda - use output = 'Python' or output = 'Java' instead") else: log.error('to_datetime cannot output [{}]'.format(output))
7,924,184,960,481,924,000
Based on what type when is, converts when to a Python DateTime object. Type: - int: returns now.plusMillis(when) - openHAB number type: returns now.plusMillis(when.intValue()) - ISO8601 string: DateTime(when) - Duration definition: see parse_duration_to_datetime - java ZonedDateTime For python make sure the datetime object is not assigned to a variable when this function is called) otherwise a java.time.sql object will be returned due to a bug in Jython - Python datetime - Python time: returns DateTime with today date and system timezone Arguments: - when: the Object to convert to a DateTime - log: optional logger, when not supplied one is created for logging errors - output: object returned as a string. If not specified returns a ZonedDateTime object 'Python': return datetime object 'Java': return a ZonedDateTime object Returns: - ZonedDateTime specified by when - datetime specified by when if output = 'Python' - ZonedDateTime specified by when if output = 'Java'
time_utils/automation/lib/python/community/time_utils.py
to_datetime
cherub-i/openhab-rules-tools
python
def to_datetime(when, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX)), output='Java'): "Based on what type when is, converts when to a Python DateTime object.\n Type:\n - int: returns now.plusMillis(when)\n - openHAB number type: returns now.plusMillis(when.intValue())\n - ISO8601 string: DateTime(when)\n - Duration definition: see parse_duration_to_datetime\n - java ZonedDateTime\n For python make sure the datetime object is not assigned to a variable when this function is called)\n otherwise a java.time.sql object will be returned due to a bug in Jython\n - Python datetime\n - Python time: returns DateTime with today date and system timezone\n\n Arguments:\n - when: the Object to convert to a DateTime\n - log: optional logger, when not supplied one is created for logging errors\n - output: object returned as a string. If not specified returns a ZonedDateTime object\n 'Python': return datetime object\n 'Java': return a ZonedDateTime object\n\n Returns:\n - ZonedDateTime specified by when\n - datetime specified by when if output = 'Python'\n - ZonedDateTime specified by when if output = 'Java'\n " log.debug(((('when is: ' + str(when)) + ' output is ') + str(output))) dt_python = None dt_java = None try: if isinstance(when, (str, unicode)): if is_iso8601(when): log.debug(('when is iso8601: ' + str(when))) dt_java = ZonedDateTime.parse(str(when)) else: log.debug(('when is duration: ' + str(when))) dt_python = (datetime.now() + parse_duration(when, log)) elif isinstance(when, int): log.debug(('when is int: ' + str(when))) dt_java = ZonedDateTime.now().plus(when, ChronoUnit.MILLIS) elif isinstance(when, scope.DateTimeType): log.debug(('when is DateTimeType: ' + str(when))) dt_java = when.getZonedDateTime() elif isinstance(when, (scope.DecimalType, scope.PercentType, scope.QuantityType)): log.debug(('when is decimal, percent or quantity type: ' + str(when))) dt_python = (datetime.now() + timedelta(milliseconds=when.intValue())) elif isinstance(when, datetime): log.debug(('when is datetime: ' + str(when))) dt_python = when elif isinstance(when, ZonedDateTime): log.debug(('when is ZonedDateTime: ' + str(when))) dt_java = when elif isinstance(when, time): log.debug(('when is python time object: ' + str(when))) dt_java = ZonedDateTime.now().withHour(when.hour).withMinute(when.minute).withSecond(when.second).withNano((when.microsecond * 1000)) else: log.warn('When is an unknown type {}'.format(type(when))) return None except: log.error('Exception: {}'.format(traceback.format_exc())) if (output == 'Python'): log.debug('returning dt python') return (dt_python if (dt_python is not None) else to_python_datetime(dt_java)) elif (output == 'Java'): log.debug('returning dt java') return (dt_java if (dt_java is not None) else to_java_zoneddatetime(dt_python)) elif (output == 'Joda'): log.error("to_datetime trying to return dt joda - use output = 'Python' or output = 'Java' instead") else: log.error('to_datetime cannot output [{}]'.format(output))
def to_today(when, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX)), output='Java'): "Takes a when (see to_datetime) and updates the date to today.\n Arguments:\n - when : One of the types or formats supported by to_datetime\n - log: optional logger, when not supplied one is created for logging errors\n Returns:\n - ZonedDateTime specified by when with today's date.\n - datetime specified by when with today's date if output = 'Python'\n - ZonedDateTime specified by when with today's date if output = 'Java'\n " log.debug(('output is: ' + str(output))) if (output == 'Python'): dt = to_datetime(when, log=log, output='Python') return datetime.combine(date.today(), dt.timetz()) elif (output == 'Java'): dt = to_datetime(when, log=log, output='Java') now = dt.now() return now.withHour(dt.getHour()).withMinute(dt.getMinute()).withSecond(dt.getSecond()).withNano(dt.getNano()) elif (output == 'Joda'): log.error("to_today trying to return dt joda - use output = 'Python' or output = 'Java' instead") else: log.error('to_today cannot output [{}]'.format(output))
-304,641,836,008,533,200
Takes a when (see to_datetime) and updates the date to today. Arguments: - when : One of the types or formats supported by to_datetime - log: optional logger, when not supplied one is created for logging errors Returns: - ZonedDateTime specified by when with today's date. - datetime specified by when with today's date if output = 'Python' - ZonedDateTime specified by when with today's date if output = 'Java'
time_utils/automation/lib/python/community/time_utils.py
to_today
cherub-i/openhab-rules-tools
python
def to_today(when, log=logging.getLogger('{}.time_utils'.format(LOG_PREFIX)), output='Java'): "Takes a when (see to_datetime) and updates the date to today.\n Arguments:\n - when : One of the types or formats supported by to_datetime\n - log: optional logger, when not supplied one is created for logging errors\n Returns:\n - ZonedDateTime specified by when with today's date.\n - datetime specified by when with today's date if output = 'Python'\n - ZonedDateTime specified by when with today's date if output = 'Java'\n " log.debug(('output is: ' + str(output))) if (output == 'Python'): dt = to_datetime(when, log=log, output='Python') return datetime.combine(date.today(), dt.timetz()) elif (output == 'Java'): dt = to_datetime(when, log=log, output='Java') now = dt.now() return now.withHour(dt.getHour()).withMinute(dt.getMinute()).withSecond(dt.getSecond()).withNano(dt.getNano()) elif (output == 'Joda'): log.error("to_today trying to return dt joda - use output = 'Python' or output = 'Java' instead") else: log.error('to_today cannot output [{}]'.format(output))
def get_template(name): 'Given the name of a template (an entire folder in the directory here)\n Return the full path to the folder, with the intention to copy it somewhere.\n ' template = os.path.join(here, name) if os.path.exists(template): return template
-8,811,136,433,118,892,000
Given the name of a template (an entire folder in the directory here) Return the full path to the folder, with the intention to copy it somewhere.
gridtest/templates/__init__.py
get_template
khinsen/gridtest
python
def get_template(name): 'Given the name of a template (an entire folder in the directory here)\n Return the full path to the folder, with the intention to copy it somewhere.\n ' template = os.path.join(here, name) if os.path.exists(template): return template
def copy_template(name, dest): 'Given a template name and a destination directory, copy the template\n to the desination directory.\n ' template = get_template(name) dest_dir = os.path.dirname(dest) if (template and os.path.exists(dest_dir)): shutil.copytree(template, dest) return dest
5,936,665,553,016,513,000
Given a template name and a destination directory, copy the template to the desination directory.
gridtest/templates/__init__.py
copy_template
khinsen/gridtest
python
def copy_template(name, dest): 'Given a template name and a destination directory, copy the template\n to the desination directory.\n ' template = get_template(name) dest_dir = os.path.dirname(dest) if (template and os.path.exists(dest_dir)): shutil.copytree(template, dest) return dest
def draw_what_sheet(image: Image.Image) -> None: 'Draw a calendar page for a WHAT display.\n\n Args:\n image: The image to be drawn on to\n\n ' draw = ImageDraw.Draw(image) draw.line([(7, 3), (392, 3)], fill=1) for line in range(8): draw.line([(((line * 55) + 7), 3), (((line * 55) + 7), 296)], fill=1) for line in range(7): draw.line([(7, ((line * 45) + 26)), (392, ((line * 45) + 26))], fill=1)
-982,108,587,062,840,300
Draw a calendar page for a WHAT display. Args: image: The image to be drawn on to
inky_calendar.py
draw_what_sheet
nukes327/inky_monitor
python
def draw_what_sheet(image: Image.Image) -> None: 'Draw a calendar page for a WHAT display.\n\n Args:\n image: The image to be drawn on to\n\n ' draw = ImageDraw.Draw(image) draw.line([(7, 3), (392, 3)], fill=1) for line in range(8): draw.line([(((line * 55) + 7), 3), (((line * 55) + 7), 296)], fill=1) for line in range(7): draw.line([(7, ((line * 45) + 26)), (392, ((line * 45) + 26))], fill=1)
def get_shape(tensor, dynamic=False): ' Return shape of the input tensor without batch size.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n ' if dynamic: shape = tf.shape(tensor) else: shape = tensor.get_shape().as_list() return shape[1:]
8,010,232,841,342,494,000
Return shape of the input tensor without batch size. Parameters ---------- tensor : tf.Tensor dynamic : bool If True, returns tensor which represents shape. If False, returns list of ints and/or Nones. Returns ------- shape : tf.Tensor or list
batchflow/models/tf/utils.py
get_shape
bestetc/batchflow
python
def get_shape(tensor, dynamic=False): ' Return shape of the input tensor without batch size.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n ' if dynamic: shape = tf.shape(tensor) else: shape = tensor.get_shape().as_list() return shape[1:]
def get_num_dims(tensor): ' Return a number of semantic dimensions (i.e. excluding batch and channels axis)' shape = get_shape(tensor) dim = len(shape) return max(1, (dim - 2))
505,323,808,096,459,400
Return a number of semantic dimensions (i.e. excluding batch and channels axis)
batchflow/models/tf/utils.py
get_num_dims
bestetc/batchflow
python
def get_num_dims(tensor): ' ' shape = get_shape(tensor) dim = len(shape) return max(1, (dim - 2))
def get_channels_axis(data_format='channels_last'): ' Return the integer channels axis based on string data format. ' return (1 if ((data_format == 'channels_first') or data_format.startswith('NC')) else (- 1))
1,287,573,240,049,828,000
Return the integer channels axis based on string data format.
batchflow/models/tf/utils.py
get_channels_axis
bestetc/batchflow
python
def get_channels_axis(data_format='channels_last'): ' ' return (1 if ((data_format == 'channels_first') or data_format.startswith('NC')) else (- 1))
def get_num_channels(tensor, data_format='channels_last'): ' Return number of channels in the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n shape : tuple of ints\n ' shape = tensor.get_shape().as_list() axis = get_channels_axis(data_format) return shape[axis]
8,800,103,316,646,686
Return number of channels in the input tensor. Parameters ---------- tensor : tf.Tensor Returns ------- shape : tuple of ints
batchflow/models/tf/utils.py
get_num_channels
bestetc/batchflow
python
def get_num_channels(tensor, data_format='channels_last'): ' Return number of channels in the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n shape : tuple of ints\n ' shape = tensor.get_shape().as_list() axis = get_channels_axis(data_format) return shape[axis]
def get_batch_size(tensor, dynamic=False): ' Return batch size (the length of the first dimension) of the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n batch size : int or None\n ' if dynamic: return tf.shape(tensor)[0] return tensor.get_shape().as_list()[0]
3,089,443,516,940,477,000
Return batch size (the length of the first dimension) of the input tensor. Parameters ---------- tensor : tf.Tensor Returns ------- batch size : int or None
batchflow/models/tf/utils.py
get_batch_size
bestetc/batchflow
python
def get_batch_size(tensor, dynamic=False): ' Return batch size (the length of the first dimension) of the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n batch size : int or None\n ' if dynamic: return tf.shape(tensor)[0] return tensor.get_shape().as_list()[0]
def get_spatial_dim(tensor): ' Return spatial dim of the input tensor (without channels and batch dimension).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n dim : int\n ' return (len(tensor.get_shape().as_list()) - 2)
7,268,385,630,642,926,000
Return spatial dim of the input tensor (without channels and batch dimension). Parameters ---------- tensor : tf.Tensor Returns ------- dim : int
batchflow/models/tf/utils.py
get_spatial_dim
bestetc/batchflow
python
def get_spatial_dim(tensor): ' Return spatial dim of the input tensor (without channels and batch dimension).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n dim : int\n ' return (len(tensor.get_shape().as_list()) - 2)
def get_spatial_shape(tensor, data_format='channels_last', dynamic=False): ' Return the tensor spatial shape (without batch and channels dimensions).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n ' if dynamic: shape = tf.shape(tensor) else: shape = tensor.get_shape().as_list() axis = (slice(1, (- 1)) if (data_format == 'channels_last') else slice(2, None)) return shape[axis]
1,553,030,828,239,486
Return the tensor spatial shape (without batch and channels dimensions). Parameters ---------- tensor : tf.Tensor dynamic : bool If True, returns tensor which represents shape. If False, returns list of ints and/or Nones. Returns ------- shape : tf.Tensor or list
batchflow/models/tf/utils.py
get_spatial_shape
bestetc/batchflow
python
def get_spatial_shape(tensor, data_format='channels_last', dynamic=False): ' Return the tensor spatial shape (without batch and channels dimensions).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n ' if dynamic: shape = tf.shape(tensor) else: shape = tensor.get_shape().as_list() axis = (slice(1, (- 1)) if (data_format == 'channels_last') else slice(2, None)) return shape[axis]
def stub_out_db_instance_api(stubs): 'Stubs out the db API for creating Instances.' INSTANCE_TYPES = {'m1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)} class FakeModel(object): 'Stubs out for model.' def __init__(self, values): self.values = values def __getattr__(self, name): return self.values[name] def __getitem__(self, key): if (key in self.values): return self.values[key] else: raise NotImplementedError() def fake_instance_create(context, values): 'Stubs out the db.instance_create method.' type_data = INSTANCE_TYPES[values['instance_type']] base_options = {'name': values['name'], 'id': values['id'], 'uuid': utils.gen_uuid(), 'reservation_id': utils.generate_uid('r'), 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILDING, 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': values['instance_type'], 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'mac_addresses': [{'address': values['mac_address']}], 'root_gb': type_data['root_gb']} return FakeModel(base_options) def fake_network_get_by_instance(context, instance_id): 'Stubs out the db.network_get_by_instance method.' fields = {'bridge': 'vmnet0', 'netmask': '255.255.255.0', 'gateway': '10.10.10.1', 'broadcast': '10.10.10.255', 'dns1': 'fake', 'vlan': 100} return FakeModel(fields) def fake_instance_type_get_all(context, inactive=0, filters=None): return INSTANCE_TYPES.values() def fake_instance_type_get_by_name(context, name): return INSTANCE_TYPES[name] stubs.Set(db, 'instance_create', fake_instance_create) stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance) stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
3,721,215,118,227,447,300
Stubs out the db API for creating Instances.
nova/tests/vmwareapi/db_fakes.py
stub_out_db_instance_api
bopopescu/openstack-12
python
def stub_out_db_instance_api(stubs): INSTANCE_TYPES = {'m1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1), 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2), 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3), 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4), 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)} class FakeModel(object): 'Stubs out for model.' def __init__(self, values): self.values = values def __getattr__(self, name): return self.values[name] def __getitem__(self, key): if (key in self.values): return self.values[key] else: raise NotImplementedError() def fake_instance_create(context, values): 'Stubs out the db.instance_create method.' type_data = INSTANCE_TYPES[values['instance_type']] base_options = {'name': values['name'], 'id': values['id'], 'uuid': utils.gen_uuid(), 'reservation_id': utils.generate_uid('r'), 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILDING, 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': values['instance_type'], 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'mac_addresses': [{'address': values['mac_address']}], 'root_gb': type_data['root_gb']} return FakeModel(base_options) def fake_network_get_by_instance(context, instance_id): 'Stubs out the db.network_get_by_instance method.' fields = {'bridge': 'vmnet0', 'netmask': '255.255.255.0', 'gateway': '10.10.10.1', 'broadcast': '10.10.10.255', 'dns1': 'fake', 'vlan': 100} return FakeModel(fields) def fake_instance_type_get_all(context, inactive=0, filters=None): return INSTANCE_TYPES.values() def fake_instance_type_get_by_name(context, name): return INSTANCE_TYPES[name] stubs.Set(db, 'instance_create', fake_instance_create) stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance) stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
def fake_instance_create(context, values): 'Stubs out the db.instance_create method.' type_data = INSTANCE_TYPES[values['instance_type']] base_options = {'name': values['name'], 'id': values['id'], 'uuid': utils.gen_uuid(), 'reservation_id': utils.generate_uid('r'), 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILDING, 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': values['instance_type'], 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'mac_addresses': [{'address': values['mac_address']}], 'root_gb': type_data['root_gb']} return FakeModel(base_options)
-2,338,449,893,014,719,500
Stubs out the db.instance_create method.
nova/tests/vmwareapi/db_fakes.py
fake_instance_create
bopopescu/openstack-12
python
def fake_instance_create(context, values): type_data = INSTANCE_TYPES[values['instance_type']] base_options = {'name': values['name'], 'id': values['id'], 'uuid': utils.gen_uuid(), 'reservation_id': utils.generate_uid('r'), 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILDING, 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type': values['instance_type'], 'memory_mb': type_data['memory_mb'], 'vcpus': type_data['vcpus'], 'mac_addresses': [{'address': values['mac_address']}], 'root_gb': type_data['root_gb']} return FakeModel(base_options)
def fake_network_get_by_instance(context, instance_id): 'Stubs out the db.network_get_by_instance method.' fields = {'bridge': 'vmnet0', 'netmask': '255.255.255.0', 'gateway': '10.10.10.1', 'broadcast': '10.10.10.255', 'dns1': 'fake', 'vlan': 100} return FakeModel(fields)
-3,446,393,075,820,079,000
Stubs out the db.network_get_by_instance method.
nova/tests/vmwareapi/db_fakes.py
fake_network_get_by_instance
bopopescu/openstack-12
python
def fake_network_get_by_instance(context, instance_id): fields = {'bridge': 'vmnet0', 'netmask': '255.255.255.0', 'gateway': '10.10.10.1', 'broadcast': '10.10.10.255', 'dns1': 'fake', 'vlan': 100} return FakeModel(fields)
def test_udp_port(self): ' test UDP ports\n Check if there are no udp listeners before gtpu is enabled\n ' self._check_udp_port_ip4(False) self._check_udp_port_ip6(False) r = self.vapi.gtpu_add_del_tunnel(is_add=True, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip4, dst_address=self.pg0.remote_ip4) self._check_udp_port_ip4() r = self.vapi.gtpu_add_del_tunnel(is_add=True, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip6, dst_address=self.pg0.remote_ip6) self._check_udp_port_ip6() r = self.vapi.gtpu_add_del_tunnel(is_add=False, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip4, dst_address=self.pg0.remote_ip4) r = self.vapi.gtpu_add_del_tunnel(is_add=False, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip6, dst_address=self.pg0.remote_ip6)
573,172,237,140,440,800
test UDP ports Check if there are no udp listeners before gtpu is enabled
test/test_gtpu.py
test_udp_port
B4dM4n/vpp
python
def test_udp_port(self): ' test UDP ports\n Check if there are no udp listeners before gtpu is enabled\n ' self._check_udp_port_ip4(False) self._check_udp_port_ip6(False) r = self.vapi.gtpu_add_del_tunnel(is_add=True, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip4, dst_address=self.pg0.remote_ip4) self._check_udp_port_ip4() r = self.vapi.gtpu_add_del_tunnel(is_add=True, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip6, dst_address=self.pg0.remote_ip6) self._check_udp_port_ip6() r = self.vapi.gtpu_add_del_tunnel(is_add=False, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip4, dst_address=self.pg0.remote_ip4) r = self.vapi.gtpu_add_del_tunnel(is_add=False, mcast_sw_if_index=4294967295, decap_next_index=4294967295, src_address=self.pg0.local_ip6, dst_address=self.pg0.remote_ip6)
def encapsulate(self, pkt, vni): '\n Encapsulate the original payload frame by adding GTPU header with its\n UDP, IP and Ethernet fields\n ' return ((((Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4)) / UDP(sport=self.dport, dport=self.dport, chksum=0)) / GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150)) / pkt)
8,759,027,516,574,093,000
Encapsulate the original payload frame by adding GTPU header with its UDP, IP and Ethernet fields
test/test_gtpu.py
encapsulate
B4dM4n/vpp
python
def encapsulate(self, pkt, vni): '\n Encapsulate the original payload frame by adding GTPU header with its\n UDP, IP and Ethernet fields\n ' return ((((Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4)) / UDP(sport=self.dport, dport=self.dport, chksum=0)) / GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150)) / pkt)
def ip_range(self, start, end): " range of remote ip's " return ip4_range(self.pg0.remote_ip4, start, end)
1,119,192,004,210,344,300
range of remote ip's
test/test_gtpu.py
ip_range
B4dM4n/vpp
python
def ip_range(self, start, end): " " return ip4_range(self.pg0.remote_ip4, start, end)
def encap_mcast(self, pkt, src_ip, src_mac, vni): '\n Encapsulate the original payload frame by adding GTPU header with its\n UDP, IP and Ethernet fields\n ' return ((((Ether(src=src_mac, dst=self.mcast_mac) / IP(src=src_ip, dst=self.mcast_ip4)) / UDP(sport=self.dport, dport=self.dport, chksum=0)) / GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150)) / pkt)
-5,837,505,097,817,672,000
Encapsulate the original payload frame by adding GTPU header with its UDP, IP and Ethernet fields
test/test_gtpu.py
encap_mcast
B4dM4n/vpp
python
def encap_mcast(self, pkt, src_ip, src_mac, vni): '\n Encapsulate the original payload frame by adding GTPU header with its\n UDP, IP and Ethernet fields\n ' return ((((Ether(src=src_mac, dst=self.mcast_mac) / IP(src=src_ip, dst=self.mcast_ip4)) / UDP(sport=self.dport, dport=self.dport, chksum=0)) / GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150)) / pkt)
def decapsulate(self, pkt): '\n Decapsulate the original payload frame by removing GTPU header\n ' return pkt[GTP_U_Header].payload
-5,324,461,435,278,782,000
Decapsulate the original payload frame by removing GTPU header
test/test_gtpu.py
decapsulate
B4dM4n/vpp
python
def decapsulate(self, pkt): '\n \n ' return pkt[GTP_U_Header].payload
def test_encap(self): ' Encapsulation test\n Send frames from pg1\n Verify receipt of encapsulated frames on pg0\n ' self.pg1.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(1) pkt = out[0] self.check_encapsulation(pkt, self.single_tunnel_vni)
-1,844,543,995,700,587,500
Encapsulation test Send frames from pg1 Verify receipt of encapsulated frames on pg0
test/test_gtpu.py
test_encap
B4dM4n/vpp
python
def test_encap(self): ' Encapsulation test\n Send frames from pg1\n Verify receipt of encapsulated frames on pg0\n ' self.pg1.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(1) pkt = out[0] self.check_encapsulation(pkt, self.single_tunnel_vni)
def test_ucast_flood(self): ' Unicast flood test\n Send frames from pg3\n Verify receipt of encapsulated frames on pg0\n ' self.pg3.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(self.n_ucast_tunnels) for pkt in out: self.check_encapsulation(pkt, self.ucast_flood_bd, True)
-7,930,271,950,726,271,000
Unicast flood test Send frames from pg3 Verify receipt of encapsulated frames on pg0
test/test_gtpu.py
test_ucast_flood
B4dM4n/vpp
python
def test_ucast_flood(self): ' Unicast flood test\n Send frames from pg3\n Verify receipt of encapsulated frames on pg0\n ' self.pg3.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(self.n_ucast_tunnels) for pkt in out: self.check_encapsulation(pkt, self.ucast_flood_bd, True)
def test_mcast_flood(self): ' Multicast flood test\n Send frames from pg2\n Verify receipt of encapsulated frames on pg0\n ' self.pg2.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(1) pkt = out[0] self.check_encapsulation(pkt, self.mcast_flood_bd, local_only=False, mcast_pkt=True)
-8,496,937,217,177,894,000
Multicast flood test Send frames from pg2 Verify receipt of encapsulated frames on pg0
test/test_gtpu.py
test_mcast_flood
B4dM4n/vpp
python
def test_mcast_flood(self): ' Multicast flood test\n Send frames from pg2\n Verify receipt of encapsulated frames on pg0\n ' self.pg2.add_stream([self.frame_reply]) self.pg0.enable_capture() self.pg_start() out = self.pg0.get_capture(1) pkt = out[0] self.check_encapsulation(pkt, self.mcast_flood_bd, local_only=False, mcast_pkt=True)
@classmethod def add_del_shared_mcast_dst_load(cls, is_add): '\n add or del tunnels sharing the same mcast dst\n to test gtpu ref_count mechanism\n ' n_shared_dst_tunnels = 20 teid_start = 1000 teid_end = (teid_start + n_shared_dst_tunnels) for teid in range(teid_start, teid_end): r = cls.vapi.gtpu_add_del_tunnel(decap_next_index=4294967295, src_address=cls.pg0.local_ip4, dst_address=cls.mcast_ip4, mcast_sw_if_index=1, teid=teid, is_add=is_add) if (r.sw_if_index == 4294967295): raise ValueError('bad sw_if_index: ~0')
-6,297,277,622,027,447,000
add or del tunnels sharing the same mcast dst to test gtpu ref_count mechanism
test/test_gtpu.py
add_del_shared_mcast_dst_load
B4dM4n/vpp
python
@classmethod def add_del_shared_mcast_dst_load(cls, is_add): '\n add or del tunnels sharing the same mcast dst\n to test gtpu ref_count mechanism\n ' n_shared_dst_tunnels = 20 teid_start = 1000 teid_end = (teid_start + n_shared_dst_tunnels) for teid in range(teid_start, teid_end): r = cls.vapi.gtpu_add_del_tunnel(decap_next_index=4294967295, src_address=cls.pg0.local_ip4, dst_address=cls.mcast_ip4, mcast_sw_if_index=1, teid=teid, is_add=is_add) if (r.sw_if_index == 4294967295): raise ValueError('bad sw_if_index: ~0')
@classmethod def add_del_mcast_tunnels_load(cls, is_add): '\n add or del tunnels to test gtpu stability\n ' n_distinct_dst_tunnels = 20 ip_range_start = 10 ip_range_end = (ip_range_start + n_distinct_dst_tunnels) for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start, ip_range_end): teid = int(dest_ip4.split('.')[3]) cls.vapi.gtpu_add_del_tunnel(decap_next_index=4294967295, src_address=cls.pg0.local_ip4, dst_address=dest_ip4, mcast_sw_if_index=1, teid=teid, is_add=is_add)
-505,654,480,933,667,500
add or del tunnels to test gtpu stability
test/test_gtpu.py
add_del_mcast_tunnels_load
B4dM4n/vpp
python
@classmethod def add_del_mcast_tunnels_load(cls, is_add): '\n \n ' n_distinct_dst_tunnels = 20 ip_range_start = 10 ip_range_end = (ip_range_start + n_distinct_dst_tunnels) for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start, ip_range_end): teid = int(dest_ip4.split('.')[3]) cls.vapi.gtpu_add_del_tunnel(decap_next_index=4294967295, src_address=cls.pg0.local_ip4, dst_address=dest_ip4, mcast_sw_if_index=1, teid=teid, is_add=is_add)
def default_handlers(handlers=[], **handler_names): 'Tornado handlers' gist_handler = _load_handler_from_location(handler_names['gist_handler']) user_gists_handler = _load_handler_from_location(handler_names['user_gists_handler']) return (handlers + [('/gist/([^\\/]+/)?([0-9]+|[0-9a-f]{20,})', gist_handler, {}), ('/gist/([^\\/]+/)?([0-9]+|[0-9a-f]{20,})/(?:files/)?(.*)', gist_handler, {}), ('/([0-9]+|[0-9a-f]{20,})', GistRedirectHandler, {}), ('/([0-9]+|[0-9a-f]{20,})/(.*)', GistRedirectHandler, {}), ('/gist/([^\\/]+)/?', user_gists_handler, {})])
6,064,056,627,304,257,000
Tornado handlers
nbviewer/providers/gist/handlers.py
default_handlers
cybergis/nbviewer
python
def default_handlers(handlers=[], **handler_names): gist_handler = _load_handler_from_location(handler_names['gist_handler']) user_gists_handler = _load_handler_from_location(handler_names['user_gists_handler']) return (handlers + [('/gist/([^\\/]+/)?([0-9]+|[0-9a-f]{20,})', gist_handler, {}), ('/gist/([^\\/]+/)?([0-9]+|[0-9a-f]{20,})/(?:files/)?(.*)', gist_handler, {}), ('/([0-9]+|[0-9a-f]{20,})', GistRedirectHandler, {}), ('/([0-9]+|[0-9a-f]{20,})/(.*)', GistRedirectHandler, {}), ('/gist/([^\\/]+)/?', user_gists_handler, {})])
def render_usergists_template(self, entries, user, provider_url, prev_url, next_url, **namespace): '\n provider_url: str\n URL to the notebook document upstream at the provider (e.g., GitHub)\n executor_url: str, optional (kwarg passed into `namespace`)\n URL to execute the notebook document (e.g., Binder)\n ' return self.render_template('usergists.html', entries=entries, user=user, provider_url=provider_url, prev_url=prev_url, next_url=next_url, **self.PROVIDER_CTX, **namespace)
-1,520,432,892,554,863,600
provider_url: str URL to the notebook document upstream at the provider (e.g., GitHub) executor_url: str, optional (kwarg passed into `namespace`) URL to execute the notebook document (e.g., Binder)
nbviewer/providers/gist/handlers.py
render_usergists_template
cybergis/nbviewer
python
def render_usergists_template(self, entries, user, provider_url, prev_url, next_url, **namespace): '\n provider_url: str\n URL to the notebook document upstream at the provider (e.g., GitHub)\n executor_url: str, optional (kwarg passed into `namespace`)\n URL to execute the notebook document (e.g., Binder)\n ' return self.render_template('usergists.html', entries=entries, user=user, provider_url=provider_url, prev_url=prev_url, next_url=next_url, **self.PROVIDER_CTX, **namespace)
async def tree_get(self, user, gist_id, gist, files): '\n user, gist_id, gist, and files are (most) of the values returned by parse_gist\n ' entries = [] ipynbs = [] others = [] for file in files.values(): e = {} e['name'] = file['filename'] if file['filename'].endswith('.ipynb'): e['url'] = quote(('/%s/%s' % (gist_id, file['filename']))) e['class'] = 'fa-book' ipynbs.append(e) else: if (self.github_url == 'https://github.com/'): gist_base_url = 'https://gist.github.com/' else: gist_base_url = url_path_join(self.github_url, 'gist/') provider_url = url_path_join(gist_base_url, '{user}/{gist_id}#file-{clean_name}'.format(user=user, gist_id=gist_id, clean_name=clean_filename(file['filename']))) e['url'] = provider_url e['class'] = 'fa-share' others.append(e) entries.extend(ipynbs) entries.extend(others) executor_url = (self.BINDER_TMPL.format(binder_base_url=self.binder_base_url, user=user.rstrip('/'), gist_id=gist_id) if self.binder_base_url else None) html = self.render_template('treelist.html', entries=entries, tree_type='gist', tree_label='gists', user=user.rstrip('/'), provider_url=gist['html_url'], executor_url=executor_url, **self.PROVIDER_CTX) (await self.cache_and_finish(html))
5,717,998,368,181,414,000
user, gist_id, gist, and files are (most) of the values returned by parse_gist
nbviewer/providers/gist/handlers.py
tree_get
cybergis/nbviewer
python
async def tree_get(self, user, gist_id, gist, files): '\n \n ' entries = [] ipynbs = [] others = [] for file in files.values(): e = {} e['name'] = file['filename'] if file['filename'].endswith('.ipynb'): e['url'] = quote(('/%s/%s' % (gist_id, file['filename']))) e['class'] = 'fa-book' ipynbs.append(e) else: if (self.github_url == 'https://github.com/'): gist_base_url = 'https://gist.github.com/' else: gist_base_url = url_path_join(self.github_url, 'gist/') provider_url = url_path_join(gist_base_url, '{user}/{gist_id}#file-{clean_name}'.format(user=user, gist_id=gist_id, clean_name=clean_filename(file['filename']))) e['url'] = provider_url e['class'] = 'fa-share' others.append(e) entries.extend(ipynbs) entries.extend(others) executor_url = (self.BINDER_TMPL.format(binder_base_url=self.binder_base_url, user=user.rstrip('/'), gist_id=gist_id) if self.binder_base_url else None) html = self.render_template('treelist.html', entries=entries, tree_type='gist', tree_label='gists', user=user.rstrip('/'), provider_url=gist['html_url'], executor_url=executor_url, **self.PROVIDER_CTX) (await self.cache_and_finish(html))
async def get_notebook_data(self, gist_id, filename, many_files_gist, file): '\n gist_id, filename, many_files_gist, file are all passed to file_get\n ' if (file['type'] or '').startswith('image/'): self.log.debug('Fetching raw image (%s) %s/%s: %s', file['type'], gist_id, filename, file['raw_url']) response = (await self.fetch(file['raw_url'])) content = response.body elif file['truncated']: self.log.debug('Gist %s/%s truncated, fetching %s', gist_id, filename, file['raw_url']) response = (await self.fetch(file['raw_url'])) content = response_text(response, encoding='utf-8') else: content = file['content'] if (many_files_gist and (not filename.endswith('.ipynb'))): self.set_header('Content-Type', (file.get('type') or 'text/plain')) self.finish(content) return else: return content
-7,604,107,273,506,377,000
gist_id, filename, many_files_gist, file are all passed to file_get
nbviewer/providers/gist/handlers.py
get_notebook_data
cybergis/nbviewer
python
async def get_notebook_data(self, gist_id, filename, many_files_gist, file): '\n \n ' if (file['type'] or ).startswith('image/'): self.log.debug('Fetching raw image (%s) %s/%s: %s', file['type'], gist_id, filename, file['raw_url']) response = (await self.fetch(file['raw_url'])) content = response.body elif file['truncated']: self.log.debug('Gist %s/%s truncated, fetching %s', gist_id, filename, file['raw_url']) response = (await self.fetch(file['raw_url'])) content = response_text(response, encoding='utf-8') else: content = file['content'] if (many_files_gist and (not filename.endswith('.ipynb'))): self.set_header('Content-Type', (file.get('type') or 'text/plain')) self.finish(content) return else: return content
async def deliver_notebook(self, user, gist_id, filename, gist, file, content): '\n user, gist_id, filename, gist, file, are the same values as those\n passed into file_get, whereas content is returned from\n get_notebook_data using user, gist_id, filename, gist, and file.\n ' executor_url = (self.BINDER_PATH_TMPL.format(binder_base_url=self.binder_base_url, user=user.rstrip('/'), gist_id=gist_id, path=quote(filename)) if self.binder_base_url else None) (await self.finish_notebook(content, file['raw_url'], msg=('gist: %s' % gist_id), public=gist['public'], provider_url=gist['html_url'], executor_url=executor_url, **self.PROVIDER_CTX))
6,513,723,941,740,857,000
user, gist_id, filename, gist, file, are the same values as those passed into file_get, whereas content is returned from get_notebook_data using user, gist_id, filename, gist, and file.
nbviewer/providers/gist/handlers.py
deliver_notebook
cybergis/nbviewer
python
async def deliver_notebook(self, user, gist_id, filename, gist, file, content): '\n user, gist_id, filename, gist, file, are the same values as those\n passed into file_get, whereas content is returned from\n get_notebook_data using user, gist_id, filename, gist, and file.\n ' executor_url = (self.BINDER_PATH_TMPL.format(binder_base_url=self.binder_base_url, user=user.rstrip('/'), gist_id=gist_id, path=quote(filename)) if self.binder_base_url else None) (await self.finish_notebook(content, file['raw_url'], msg=('gist: %s' % gist_id), public=gist['public'], provider_url=gist['html_url'], executor_url=executor_url, **self.PROVIDER_CTX))
@cached async def get(self, user, gist_id, filename=''): '\n Encompasses both the case of a single file gist, handled by\n `file_get`, as well as a many-file gist, handled by `tree_get`.\n ' parsed_gist = (await self.parse_gist(user, gist_id, filename)) if (parsed_gist is not None): (user, gist_id, gist, files, many_files_gist) = parsed_gist else: return if (many_files_gist and (not filename)): (await self.tree_get(user, gist_id, gist, files)) else: if ((not many_files_gist) and (not filename)): filename = list(files.keys())[0] if (filename not in files): raise web.HTTPError(404, 'No such file in gist: %s (%s)', filename, list(files.keys())) file = files[filename] (await self.file_get(user, gist_id, filename, gist, many_files_gist, file))
-9,108,773,499,648,490,000
Encompasses both the case of a single file gist, handled by `file_get`, as well as a many-file gist, handled by `tree_get`.
nbviewer/providers/gist/handlers.py
get
cybergis/nbviewer
python
@cached async def get(self, user, gist_id, filename=): '\n Encompasses both the case of a single file gist, handled by\n `file_get`, as well as a many-file gist, handled by `tree_get`.\n ' parsed_gist = (await self.parse_gist(user, gist_id, filename)) if (parsed_gist is not None): (user, gist_id, gist, files, many_files_gist) = parsed_gist else: return if (many_files_gist and (not filename)): (await self.tree_get(user, gist_id, gist, files)) else: if ((not many_files_gist) and (not filename)): filename = list(files.keys())[0] if (filename not in files): raise web.HTTPError(404, 'No such file in gist: %s (%s)', filename, list(files.keys())) file = files[filename] (await self.file_get(user, gist_id, filename, gist, many_files_gist, file))
def map_and_load(self, path: str, exec_now: bool=False): "Map and load a module into memory.\n\n The specified module would be mapped and loaded into the address set\n in the `next_image_base` member. It is the caller's responsibility to\n make sure that the memory is available.\n\n On success, `next_image_base` will be updated accordingly.\n\n Args:\n path : path of the module binary to load\n exec_now : execute module right away; will be enququed if not\n\n Raises:\n QlMemoryMappedError : when `next_image_base` is not available\n " ql = self.ql pe = PE(path, fast_load=True) image_base = (pe.OPTIONAL_HEADER.ImageBase or self.next_image_base) image_size = ql.mem.align(pe.OPTIONAL_HEADER.SizeOfImage, 4096) assert ((image_base % 4096) == 0), 'image base is expected to be page-aligned' if (image_base != pe.OPTIONAL_HEADER.ImageBase): pe.relocate_image(image_base) pe.parse_data_directories() data = bytes(pe.get_memory_mapped_image()) ql.mem.map(image_base, image_size, info='[module]') ql.mem.write(image_base, data) ql.log.info(f'Module {path} loaded to {image_base:#x}') entry_point = (image_base + pe.OPTIONAL_HEADER.AddressOfEntryPoint) ql.log.info(f'Module entry point at {entry_point:#x}') if (self.entry_point == 0): self.entry_point = entry_point self.install_loaded_image_protocol(image_base, image_size) self.images.append(self.coverage_image(image_base, (image_base + image_size), path)) self.next_image_base = (image_base + image_size) module_info = (path, image_base, entry_point) if exec_now: self.execute_module(*module_info, eoe_trap=None) else: self.modules.append(module_info)
4,001,441,927,666,838,500
Map and load a module into memory. The specified module would be mapped and loaded into the address set in the `next_image_base` member. It is the caller's responsibility to make sure that the memory is available. On success, `next_image_base` will be updated accordingly. Args: path : path of the module binary to load exec_now : execute module right away; will be enququed if not Raises: QlMemoryMappedError : when `next_image_base` is not available
qiling/qiling/loader/pe_uefi.py
map_and_load
mrTavas/owasp-fstm-auto
python
def map_and_load(self, path: str, exec_now: bool=False): "Map and load a module into memory.\n\n The specified module would be mapped and loaded into the address set\n in the `next_image_base` member. It is the caller's responsibility to\n make sure that the memory is available.\n\n On success, `next_image_base` will be updated accordingly.\n\n Args:\n path : path of the module binary to load\n exec_now : execute module right away; will be enququed if not\n\n Raises:\n QlMemoryMappedError : when `next_image_base` is not available\n " ql = self.ql pe = PE(path, fast_load=True) image_base = (pe.OPTIONAL_HEADER.ImageBase or self.next_image_base) image_size = ql.mem.align(pe.OPTIONAL_HEADER.SizeOfImage, 4096) assert ((image_base % 4096) == 0), 'image base is expected to be page-aligned' if (image_base != pe.OPTIONAL_HEADER.ImageBase): pe.relocate_image(image_base) pe.parse_data_directories() data = bytes(pe.get_memory_mapped_image()) ql.mem.map(image_base, image_size, info='[module]') ql.mem.write(image_base, data) ql.log.info(f'Module {path} loaded to {image_base:#x}') entry_point = (image_base + pe.OPTIONAL_HEADER.AddressOfEntryPoint) ql.log.info(f'Module entry point at {entry_point:#x}') if (self.entry_point == 0): self.entry_point = entry_point self.install_loaded_image_protocol(image_base, image_size) self.images.append(self.coverage_image(image_base, (image_base + image_size), path)) self.next_image_base = (image_base + image_size) module_info = (path, image_base, entry_point) if exec_now: self.execute_module(*module_info, eoe_trap=None) else: self.modules.append(module_info)
def call_function(self, addr: int, args: Sequence[int], ret: int): 'Call a function after properly setting up its arguments and return address.\n\n Args:\n addr : function address\n args : a sequence of arguments to pass to the function; may be empty\n ret : return address; may be None\n ' regs = ('rcx', 'rdx', 'r8', 'r9') assert (len(args) <= len(regs)), f'currently supporting up to {len(regs)} arguments' for (reg, arg) in zip(regs, args): self.ql.reg.write(reg, arg) if (ret is not None): self.ql.stack_push(ret) self.ql.reg.rip = addr
7,733,981,989,651,165,000
Call a function after properly setting up its arguments and return address. Args: addr : function address args : a sequence of arguments to pass to the function; may be empty ret : return address; may be None
qiling/qiling/loader/pe_uefi.py
call_function
mrTavas/owasp-fstm-auto
python
def call_function(self, addr: int, args: Sequence[int], ret: int): 'Call a function after properly setting up its arguments and return address.\n\n Args:\n addr : function address\n args : a sequence of arguments to pass to the function; may be empty\n ret : return address; may be None\n ' regs = ('rcx', 'rdx', 'r8', 'r9') assert (len(args) <= len(regs)), f'currently supporting up to {len(regs)} arguments' for (reg, arg) in zip(regs, args): self.ql.reg.write(reg, arg) if (ret is not None): self.ql.stack_push(ret) self.ql.reg.rip = addr
def execute_module(self, path: str, image_base: int, entry_point: int, eoe_trap: int): 'Start the execution of a UEFI module.\n\n Args:\n image_base : module base address\n entry_point : module entry point address\n eoe_trap : end-of-execution trap address; may be None\n ' ImageHandle = image_base SystemTable = self.gST self.call_function(entry_point, [ImageHandle, SystemTable], eoe_trap) self.ql.os.entry_point = entry_point self.ql.log.info(f'Running from {entry_point:#010x} of {path}')
5,655,954,296,241,916,000
Start the execution of a UEFI module. Args: image_base : module base address entry_point : module entry point address eoe_trap : end-of-execution trap address; may be None
qiling/qiling/loader/pe_uefi.py
execute_module
mrTavas/owasp-fstm-auto
python
def execute_module(self, path: str, image_base: int, entry_point: int, eoe_trap: int): 'Start the execution of a UEFI module.\n\n Args:\n image_base : module base address\n entry_point : module entry point address\n eoe_trap : end-of-execution trap address; may be None\n ' ImageHandle = image_base SystemTable = self.gST self.call_function(entry_point, [ImageHandle, SystemTable], eoe_trap) self.ql.os.entry_point = entry_point self.ql.log.info(f'Running from {entry_point:#010x} of {path}')
def _assert_setitem_series_conversion(self, original_series, loc_value, expected_series, expected_dtype): "test series value's coercion triggered by assignment" temp = original_series.copy() temp[1] = loc_value tm.assert_series_equal(temp, expected_series) assert (temp.dtype == expected_dtype)
-620,337,551,841,168,800
test series value's coercion triggered by assignment
pandas/tests/indexing/test_coercion.py
_assert_setitem_series_conversion
701KHK1915/8-PANDAS
python
def _assert_setitem_series_conversion(self, original_series, loc_value, expected_series, expected_dtype): temp = original_series.copy() temp[1] = loc_value tm.assert_series_equal(temp, expected_series) assert (temp.dtype == expected_dtype)
def _assert_setitem_index_conversion(self, original_series, loc_key, expected_index, expected_dtype): "test index's coercion triggered by assign key" temp = original_series.copy() temp[loc_key] = 5 exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) tm.assert_series_equal(temp, exp) assert (temp.index.dtype == expected_dtype) temp = original_series.copy() temp.loc[loc_key] = 5 exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) tm.assert_series_equal(temp, exp) assert (temp.index.dtype == expected_dtype)
-979,596,171,037,217,800
test index's coercion triggered by assign key
pandas/tests/indexing/test_coercion.py
_assert_setitem_index_conversion
701KHK1915/8-PANDAS
python
def _assert_setitem_index_conversion(self, original_series, loc_key, expected_index, expected_dtype): temp = original_series.copy() temp[loc_key] = 5 exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) tm.assert_series_equal(temp, exp) assert (temp.index.dtype == expected_dtype) temp = original_series.copy() temp.loc[loc_key] = 5 exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) tm.assert_series_equal(temp, exp) assert (temp.index.dtype == expected_dtype)
def _assert_insert_conversion(self, original, value, expected, expected_dtype): 'test coercion triggered by insert' target = original.copy() res = target.insert(1, value) tm.assert_index_equal(res, expected) assert (res.dtype == expected_dtype)
-525,057,754,933,178,100
test coercion triggered by insert
pandas/tests/indexing/test_coercion.py
_assert_insert_conversion
701KHK1915/8-PANDAS
python
def _assert_insert_conversion(self, original, value, expected, expected_dtype): target = original.copy() res = target.insert(1, value) tm.assert_index_equal(res, expected) assert (res.dtype == expected_dtype)
def _assert_where_conversion(self, original, cond, values, expected, expected_dtype): 'test coercion triggered by where' target = original.copy() res = target.where(cond, values) tm.assert_equal(res, expected) assert (res.dtype == expected_dtype)
6,380,131,108,986,973,000
test coercion triggered by where
pandas/tests/indexing/test_coercion.py
_assert_where_conversion
701KHK1915/8-PANDAS
python
def _assert_where_conversion(self, original, cond, values, expected, expected_dtype): target = original.copy() res = target.where(cond, values) tm.assert_equal(res, expected) assert (res.dtype == expected_dtype)
def _assert_fillna_conversion(self, original, value, expected, expected_dtype): 'test coercion triggered by fillna' target = original.copy() res = target.fillna(value) tm.assert_equal(res, expected) assert (res.dtype == expected_dtype)
-1,228,754,256,084,262,400
test coercion triggered by fillna
pandas/tests/indexing/test_coercion.py
_assert_fillna_conversion
701KHK1915/8-PANDAS
python
def _assert_fillna_conversion(self, original, value, expected, expected_dtype): target = original.copy() res = target.fillna(value) tm.assert_equal(res, expected) assert (res.dtype == expected_dtype)