body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def share_entity_group_using_post_with_http_info(self, entity_group_id, **kwargs):
"Share the Entity Group (shareEntityGroup) # noqa: E501\n\n Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.share_entity_group_using_post_with_http_info(entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param ShareGroupRequest body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['entity_group_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method share_entity_group_using_post" % key))
params[key] = val
del params['kwargs']
if (('entity_group_id' not in params) or (params['entity_group_id'] is None)):
raise ValueError('Missing the required parameter `entity_group_id` when calling `share_entity_group_using_post`')
collection_formats = {}
path_params = {}
if ('entity_group_id' in params):
path_params['entityGroupId'] = params['entity_group_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in params):
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/entityGroup/{entityGroupId}/share', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| 4,922,334,354,604,789,000
|
Share the Entity Group (shareEntityGroup) # noqa: E501
Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.share_entity_group_using_post_with_http_info(entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param ShareGroupRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
|
tb_rest_client/api/api_pe/entity_group_controller_api.py
|
share_entity_group_using_post_with_http_info
|
D34DPlayer/thingsboard-python-rest-client
|
python
|
def share_entity_group_using_post_with_http_info(self, entity_group_id, **kwargs):
"Share the Entity Group (shareEntityGroup) # noqa: E501\n\n Share the entity group with certain user group based on the provided Share Group Request. The request is quite flexible and processing of the request involves multiple security checks using platform RBAC feature. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for specified group. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.share_entity_group_using_post_with_http_info(entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param ShareGroupRequest body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['entity_group_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method share_entity_group_using_post" % key))
params[key] = val
del params['kwargs']
if (('entity_group_id' not in params) or (params['entity_group_id'] is None)):
raise ValueError('Missing the required parameter `entity_group_id` when calling `share_entity_group_using_post`')
collection_formats = {}
path_params = {}
if ('entity_group_id' in params):
path_params['entityGroupId'] = params['entity_group_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in params):
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/entityGroup/{entityGroupId}/share', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def unassign_entity_group_from_edge_using_delete(self, edge_id, group_type, entity_group_id, **kwargs):
"Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501\n\n Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.unassign_entity_group_from_edge_using_delete(edge_id, group_type, entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param str group_type: EntityGroup type (required)\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :return: EntityGroup\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs)
else:
data = self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs)
return data
| -3,184,272,104,746,827,300
|
Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
|
tb_rest_client/api/api_pe/entity_group_controller_api.py
|
unassign_entity_group_from_edge_using_delete
|
D34DPlayer/thingsboard-python-rest-client
|
python
|
def unassign_entity_group_from_edge_using_delete(self, edge_id, group_type, entity_group_id, **kwargs):
"Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501\n\n Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.unassign_entity_group_from_edge_using_delete(edge_id, group_type, entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param str group_type: EntityGroup type (required)\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :return: EntityGroup\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs)
else:
data = self.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, **kwargs)
return data
|
def unassign_entity_group_from_edge_using_delete_with_http_info(self, edge_id, group_type, entity_group_id, **kwargs):
"Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501\n\n Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param str group_type: EntityGroup type (required)\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :return: EntityGroup\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['edge_id', 'group_type', 'entity_group_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method unassign_entity_group_from_edge_using_delete" % key))
params[key] = val
del params['kwargs']
if (('edge_id' not in params) or (params['edge_id'] is None)):
raise ValueError('Missing the required parameter `edge_id` when calling `unassign_entity_group_from_edge_using_delete`')
if (('group_type' not in params) or (params['group_type'] is None)):
raise ValueError('Missing the required parameter `group_type` when calling `unassign_entity_group_from_edge_using_delete`')
if (('entity_group_id' not in params) or (params['entity_group_id'] is None)):
raise ValueError('Missing the required parameter `entity_group_id` when calling `unassign_entity_group_from_edge_using_delete`')
collection_formats = {}
path_params = {}
if ('edge_id' in params):
path_params['edgeId'] = params['edge_id']
if ('group_type' in params):
path_params['groupType'] = params['group_type']
if ('entity_group_id' in params):
path_params['entityGroupId'] = params['entity_group_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/edge/{edgeId}/entityGroup/{entityGroupId}/{groupType}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='EntityGroup', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| -8,425,441,640,010,802,000
|
Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501
Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:param str group_type: EntityGroup type (required)
:param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: EntityGroup
If the method is called asynchronously,
returns the request thread.
|
tb_rest_client/api/api_pe/entity_group_controller_api.py
|
unassign_entity_group_from_edge_using_delete_with_http_info
|
D34DPlayer/thingsboard-python-rest-client
|
python
|
def unassign_entity_group_from_edge_using_delete_with_http_info(self, edge_id, group_type, entity_group_id, **kwargs):
"Unassign entity group from edge (unassignEntityGroupFromEdge) # noqa: E501\n\n Clears assignment of the entity group to the edge. Unassignment works in async way - first, 'unassign' notification event pushed to edge queue on platform. Second, remote edge service will receive an 'unassign' command to remove entity group (Edge will receive this instantly, if it's currently connected, or once it's going to be connected to platform). Third, once 'unassign' command will be delivered to edge service, it's going to remove entity group and entities inside this group locally. Available for users with 'TENANT_ADMIN' or 'CUSTOMER_USER' authority. Security check is performed to verify that the user has 'WRITE' permission for the entity (entities). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.unassign_entity_group_from_edge_using_delete_with_http_info(edge_id, group_type, entity_group_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str edge_id: A string value representing the edge id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :param str group_type: EntityGroup type (required)\n :param str entity_group_id: A string value representing the Entity Group Id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)\n :return: EntityGroup\n If the method is called asynchronously,\n returns the request thread.\n "
all_params = ['edge_id', 'group_type', 'entity_group_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method unassign_entity_group_from_edge_using_delete" % key))
params[key] = val
del params['kwargs']
if (('edge_id' not in params) or (params['edge_id'] is None)):
raise ValueError('Missing the required parameter `edge_id` when calling `unassign_entity_group_from_edge_using_delete`')
if (('group_type' not in params) or (params['group_type'] is None)):
raise ValueError('Missing the required parameter `group_type` when calling `unassign_entity_group_from_edge_using_delete`')
if (('entity_group_id' not in params) or (params['entity_group_id'] is None)):
raise ValueError('Missing the required parameter `entity_group_id` when calling `unassign_entity_group_from_edge_using_delete`')
collection_formats = {}
path_params = {}
if ('edge_id' in params):
path_params['edgeId'] = params['edge_id']
if ('group_type' in params):
path_params['groupType'] = params['group_type']
if ('entity_group_id' in params):
path_params['entityGroupId'] = params['entity_group_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/edge/{edgeId}/entityGroup/{entityGroupId}/{groupType}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='EntityGroup', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def _third_octave_levels(sig, fs):
'3rd octave filtering, squaring, smoothing, level calculation and\n downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz\n\n See ISO 532-1 section 6.3\n\n Parameters\n ----------\n sig : numpy.ndarray\n time signal sampled at 48 kHz[pa]\n fs : int\n time signal sampling frequency\n\n Outputs\n -------\n third_octave_levels : numpy.ndarray\n Set of time signals filtered per third octave bands\n '
if (fs != 48000):
raise ValueError('ERROR: Sampling frequency shall be equal to 48 kHz')
n_level_band = 28
n_filter_coeff = 6
dec_factor = int((fs / 2000))
coeff = np.zeros(n_filter_coeff)
third_octave_filter_ref = np.array([[1, 2, 1, 1, (- 2), 1], [1, 0, (- 1), 1, (- 2), 1], [1, (- 2), 1, 1, (- 2), 1]])
third_octave_filter = np.array([[[0, 0, 0, 0, (- 0.00067026), 0.000659453], [0, 0, 0, 0, (- 0.000375071), 0.000361926], [0, 0, 0, 0, (- 0.000306523), 0.000297634]], [[0, 0, 0, 0, (- 0.000847258), 0.000830131], [0, 0, 0, 0, (- 0.000476448), 0.000455616], [0, 0, 0, 0, (- 0.000388773), 0.000374685]], [[0, 0, 0, 0, (- 0.0010721), 0.00104496], [0, 0, 0, 0, (- 0.000606567), 0.000573553], [0, 0, 0, 0, (- 0.000494004), 0.000471677]], [[0, 0, 0, 0, (- 0.00135836), 0.00131535], [0, 0, 0, 0, (- 0.000774327), 0.000722007], [0, 0, 0, 0, (- 0.000629154), 0.000593771]], [[0, 0, 0, 0, (- 0.0017238), 0.00165564], [0, 0, 0, 0, (- 0.00099178), 0.000908866], [0, 0, 0, 0, (- 0.000803529), 0.000747455]], [[0, 0, 0, 0, (- 0.00219188), 0.00208388], [0, 0, 0, 0, (- 0.00127545), 0.00114406], [0, 0, 0, 0, (- 0.00102976), 0.0009409]], [[0, 0, 0, 0, (- 0.00279386), 0.00262274], [0, 0, 0, 0, (- 0.00164828), 0.00144006], [0, 0, 0, 0, (- 0.0013252), 0.00118438]], [[0, 0, 0, 0, (- 0.00357182), 0.00330071], [0, 0, 0, 0, (- 0.00214252), 0.00181258], [0, 0, 0, 0, (- 0.00171397), 0.00149082]], [[0, 0, 0, 0, (- 0.00458305), 0.00415355], [0, 0, 0, 0, (- 0.00280413), 0.00228135], [0, 0, 0, 0, (- 0.00223006), 0.00187646]], [[0, 0, 0, 0, (- 0.00590655), 0.00522622], [0, 0, 0, 0, (- 0.00369947), 0.00287118], [0, 0, 0, 0, (- 0.00292205), 0.00236178]], [[0, 0, 0, 0, (- 0.00765243), 0.00657493], [0, 0, 0, 0, (- 0.0049254), 0.00361318], [0, 0, 0, 0, (- 0.00386007), 0.0029724]], [[0, 0, 0, 0, (- 0.0100023), 0.0082961], [0, 0, 0, 0, (- 0.00663788), 0.00455999], [0, 0, 0, 0, (- 0.00515982), 0.00375306]], [[0, 0, 0, 0, (- 0.013123), 0.010422], [0, 0, 0, 0, (- 0.00902274), 0.00573132], [0, 0, 0, 0, (- 0.00694543), 0.00471734]], [[0, 0, 0, 0, (- 0.0173693), 0.0130947], [0, 0, 0, 0, (- 0.0124176), 0.00720526], [0, 0, 0, 0, (- 0.00946002), 0.00593145]], [[0, 0, 0, 0, (- 0.0231934), 0.0164308], [0, 0, 0, 0, (- 0.0173009), 0.00904761], [0, 0, 0, 0, (- 0.0130358), 0.00744926]], [[0, 0, 0, 0, (- 0.0313292), 0.020637], [0, 0, 0, 0, (- 0.0244342), 0.0113731], [0, 0, 0, 0, (- 0.0182108), 0.00936778]], [[0, 0, 0, 0, (- 0.0428261), 0.0259325], [0, 0, 0, 0, (- 0.0349619), 0.0143046], [0, 0, 0, 0, (- 0.0257855), 0.0117912]], [[0, 0, 0, 0, (- 0.0591733), 0.0325054], [0, 0, 0, 0, (- 0.0506072), 0.0179513], [0, 0, 0, 0, (- 0.0369401), 0.0148094]], [[0, 0, 0, 0, (- 0.0826348), 0.0405894], [0, 0, 0, 0, (- 0.0740348), 0.0224476], [0, 0, 0, 0, (- 0.0534977), 0.0185371]], [[0, 0, 0, 0, (- 0.117018), 0.0508116], [0, 0, 0, 0, (- 0.109516), 0.0281387], [0, 0, 0, 0, (- 0.0785097), 0.0232872]], [[0, 0, 0, 0, (- 0.167714), 0.0637872], [0, 0, 0, 0, (- 0.163378), 0.0353729], [0, 0, 0, 0, (- 0.116419), 0.0293723]], [[0, 0, 0, 0, (- 0.242528), 0.0798576], [0, 0, 0, 0, (- 0.245161), 0.044337], [0, 0, 0, 0, (- 0.173972), 0.0370015]], [[0, 0, 0, 0, (- 0.353142), 0.099633], [0, 0, 0, 0, (- 0.369163), 0.0553535], [0, 0, 0, 0, (- 0.261399), 0.0465428]], [[0, 0, 0, 0, (- 0.516316), 0.124177], [0, 0, 0, 0, (- 0.555473), 0.0689403], [0, 0, 0, 0, (- 0.393998), 0.0586715]], [[0, 0, 0, 0, (- 0.756635), 0.155023], [0, 0, 0, 0, (- 0.834281), 0.0858123], [0, 0, 0, 0, (- 0.594547), 0.074396]], [[0, 0, 0, 0, (- 1.10165), 0.191713], [0, 0, 0, 0, (- 1.23939), 0.105243], [0, 0, 0, 0, (- 0.891666), 0.0940354]], [[0, 0, 0, 0, (- 1.58477), 0.239049], [0, 0, 0, 0, (- 1.80505), 0.128794], [0, 0, 0, 0, (- 1.325), 0.121333]], [[0, 0, 0, 0, (- 2.5063), 0.142308], [0, 0, 0, 0, (- 2.19464), 0.27647], [0, 0, 0, 0, (- 1.90231), 0.147304]]])
filter_gain = np.array([4.30764e-11, 8.5934e-11, 1.71424e-10, 3.41944e-10, 6.82035e-10, 1.36026e-09, 2.71261e-09, 5.4087e-09, 1.07826e-08, 2.1491e-08, 4.28228e-08, 8.54316e-08, 1.70009e-07, 3.38215e-07, 6.7199e-07, 1.33531e-06, 2.65172e-06, 5.25477e-06, 1.0378e-05, 2.0487e-05, 4.05198e-05, 7.97914e-05, 0.000156511, 0.000304954, 0.000599157, 0.00116544, 0.00227488, 0.00391006])
freq = [25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]
n_time = len(sig[::dec_factor])
time_axis = np.linspace(0, (len(sig) / fs), num=n_time)
third_octave_level = np.zeros((n_level_band, n_time))
for i_bands in range(n_level_band):
tiny_value = (10 ** (- 12))
i_ref = (4 * (10 ** (- 10)))
coeff = (third_octave_filter_ref - third_octave_filter[i_bands, :, :])
sig_filt = (filter_gain[i_bands] * signal.sosfilt(coeff, sig))
center_freq = ((10 ** ((i_bands - 16) / 10)) * 1000)
sig_filt = _square_and_smooth(sig_filt, center_freq, 48000)
third_octave_level[i_bands, :] = (10 * np.log10(((sig_filt[::dec_factor] + tiny_value) / i_ref)))
return (third_octave_level, time_axis, freq)
| 4,043,975,176,146,884,000
|
3rd octave filtering, squaring, smoothing, level calculation and
downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz
See ISO 532-1 section 6.3
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz[pa]
fs : int
time signal sampling frequency
Outputs
-------
third_octave_levels : numpy.ndarray
Set of time signals filtered per third octave bands
|
mosqito/sq_metrics/loudness/loudness_zwtv/_third_octave_levels.py
|
_third_octave_levels
|
Igarciac117/MoSQITo
|
python
|
def _third_octave_levels(sig, fs):
'3rd octave filtering, squaring, smoothing, level calculation and\n downsampling to temporal resolution: 0,5 ms, i.e. sampling rate: 2 kHz\n\n See ISO 532-1 section 6.3\n\n Parameters\n ----------\n sig : numpy.ndarray\n time signal sampled at 48 kHz[pa]\n fs : int\n time signal sampling frequency\n\n Outputs\n -------\n third_octave_levels : numpy.ndarray\n Set of time signals filtered per third octave bands\n '
if (fs != 48000):
raise ValueError('ERROR: Sampling frequency shall be equal to 48 kHz')
n_level_band = 28
n_filter_coeff = 6
dec_factor = int((fs / 2000))
coeff = np.zeros(n_filter_coeff)
third_octave_filter_ref = np.array([[1, 2, 1, 1, (- 2), 1], [1, 0, (- 1), 1, (- 2), 1], [1, (- 2), 1, 1, (- 2), 1]])
third_octave_filter = np.array([[[0, 0, 0, 0, (- 0.00067026), 0.000659453], [0, 0, 0, 0, (- 0.000375071), 0.000361926], [0, 0, 0, 0, (- 0.000306523), 0.000297634]], [[0, 0, 0, 0, (- 0.000847258), 0.000830131], [0, 0, 0, 0, (- 0.000476448), 0.000455616], [0, 0, 0, 0, (- 0.000388773), 0.000374685]], [[0, 0, 0, 0, (- 0.0010721), 0.00104496], [0, 0, 0, 0, (- 0.000606567), 0.000573553], [0, 0, 0, 0, (- 0.000494004), 0.000471677]], [[0, 0, 0, 0, (- 0.00135836), 0.00131535], [0, 0, 0, 0, (- 0.000774327), 0.000722007], [0, 0, 0, 0, (- 0.000629154), 0.000593771]], [[0, 0, 0, 0, (- 0.0017238), 0.00165564], [0, 0, 0, 0, (- 0.00099178), 0.000908866], [0, 0, 0, 0, (- 0.000803529), 0.000747455]], [[0, 0, 0, 0, (- 0.00219188), 0.00208388], [0, 0, 0, 0, (- 0.00127545), 0.00114406], [0, 0, 0, 0, (- 0.00102976), 0.0009409]], [[0, 0, 0, 0, (- 0.00279386), 0.00262274], [0, 0, 0, 0, (- 0.00164828), 0.00144006], [0, 0, 0, 0, (- 0.0013252), 0.00118438]], [[0, 0, 0, 0, (- 0.00357182), 0.00330071], [0, 0, 0, 0, (- 0.00214252), 0.00181258], [0, 0, 0, 0, (- 0.00171397), 0.00149082]], [[0, 0, 0, 0, (- 0.00458305), 0.00415355], [0, 0, 0, 0, (- 0.00280413), 0.00228135], [0, 0, 0, 0, (- 0.00223006), 0.00187646]], [[0, 0, 0, 0, (- 0.00590655), 0.00522622], [0, 0, 0, 0, (- 0.00369947), 0.00287118], [0, 0, 0, 0, (- 0.00292205), 0.00236178]], [[0, 0, 0, 0, (- 0.00765243), 0.00657493], [0, 0, 0, 0, (- 0.0049254), 0.00361318], [0, 0, 0, 0, (- 0.00386007), 0.0029724]], [[0, 0, 0, 0, (- 0.0100023), 0.0082961], [0, 0, 0, 0, (- 0.00663788), 0.00455999], [0, 0, 0, 0, (- 0.00515982), 0.00375306]], [[0, 0, 0, 0, (- 0.013123), 0.010422], [0, 0, 0, 0, (- 0.00902274), 0.00573132], [0, 0, 0, 0, (- 0.00694543), 0.00471734]], [[0, 0, 0, 0, (- 0.0173693), 0.0130947], [0, 0, 0, 0, (- 0.0124176), 0.00720526], [0, 0, 0, 0, (- 0.00946002), 0.00593145]], [[0, 0, 0, 0, (- 0.0231934), 0.0164308], [0, 0, 0, 0, (- 0.0173009), 0.00904761], [0, 0, 0, 0, (- 0.0130358), 0.00744926]], [[0, 0, 0, 0, (- 0.0313292), 0.020637], [0, 0, 0, 0, (- 0.0244342), 0.0113731], [0, 0, 0, 0, (- 0.0182108), 0.00936778]], [[0, 0, 0, 0, (- 0.0428261), 0.0259325], [0, 0, 0, 0, (- 0.0349619), 0.0143046], [0, 0, 0, 0, (- 0.0257855), 0.0117912]], [[0, 0, 0, 0, (- 0.0591733), 0.0325054], [0, 0, 0, 0, (- 0.0506072), 0.0179513], [0, 0, 0, 0, (- 0.0369401), 0.0148094]], [[0, 0, 0, 0, (- 0.0826348), 0.0405894], [0, 0, 0, 0, (- 0.0740348), 0.0224476], [0, 0, 0, 0, (- 0.0534977), 0.0185371]], [[0, 0, 0, 0, (- 0.117018), 0.0508116], [0, 0, 0, 0, (- 0.109516), 0.0281387], [0, 0, 0, 0, (- 0.0785097), 0.0232872]], [[0, 0, 0, 0, (- 0.167714), 0.0637872], [0, 0, 0, 0, (- 0.163378), 0.0353729], [0, 0, 0, 0, (- 0.116419), 0.0293723]], [[0, 0, 0, 0, (- 0.242528), 0.0798576], [0, 0, 0, 0, (- 0.245161), 0.044337], [0, 0, 0, 0, (- 0.173972), 0.0370015]], [[0, 0, 0, 0, (- 0.353142), 0.099633], [0, 0, 0, 0, (- 0.369163), 0.0553535], [0, 0, 0, 0, (- 0.261399), 0.0465428]], [[0, 0, 0, 0, (- 0.516316), 0.124177], [0, 0, 0, 0, (- 0.555473), 0.0689403], [0, 0, 0, 0, (- 0.393998), 0.0586715]], [[0, 0, 0, 0, (- 0.756635), 0.155023], [0, 0, 0, 0, (- 0.834281), 0.0858123], [0, 0, 0, 0, (- 0.594547), 0.074396]], [[0, 0, 0, 0, (- 1.10165), 0.191713], [0, 0, 0, 0, (- 1.23939), 0.105243], [0, 0, 0, 0, (- 0.891666), 0.0940354]], [[0, 0, 0, 0, (- 1.58477), 0.239049], [0, 0, 0, 0, (- 1.80505), 0.128794], [0, 0, 0, 0, (- 1.325), 0.121333]], [[0, 0, 0, 0, (- 2.5063), 0.142308], [0, 0, 0, 0, (- 2.19464), 0.27647], [0, 0, 0, 0, (- 1.90231), 0.147304]]])
filter_gain = np.array([4.30764e-11, 8.5934e-11, 1.71424e-10, 3.41944e-10, 6.82035e-10, 1.36026e-09, 2.71261e-09, 5.4087e-09, 1.07826e-08, 2.1491e-08, 4.28228e-08, 8.54316e-08, 1.70009e-07, 3.38215e-07, 6.7199e-07, 1.33531e-06, 2.65172e-06, 5.25477e-06, 1.0378e-05, 2.0487e-05, 4.05198e-05, 7.97914e-05, 0.000156511, 0.000304954, 0.000599157, 0.00116544, 0.00227488, 0.00391006])
freq = [25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]
n_time = len(sig[::dec_factor])
time_axis = np.linspace(0, (len(sig) / fs), num=n_time)
third_octave_level = np.zeros((n_level_band, n_time))
for i_bands in range(n_level_band):
tiny_value = (10 ** (- 12))
i_ref = (4 * (10 ** (- 10)))
coeff = (third_octave_filter_ref - third_octave_filter[i_bands, :, :])
sig_filt = (filter_gain[i_bands] * signal.sosfilt(coeff, sig))
center_freq = ((10 ** ((i_bands - 16) / 10)) * 1000)
sig_filt = _square_and_smooth(sig_filt, center_freq, 48000)
third_octave_level[i_bands, :] = (10 * np.log10(((sig_filt[::dec_factor] + tiny_value) / i_ref)))
return (third_octave_level, time_axis, freq)
|
def vap(x, a, b, c):
'Vapor pressure model\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n np.exp(a+b/x+c*np.log(x))\n '
return np.exp(((a + (b / x)) + (c * np.log(x))))
| -7,761,664,097,999,010,000
|
Vapor pressure model
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
np.exp(a+b/x+c*np.log(x))
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
vap
|
Ascarshen/nni
|
python
|
def vap(x, a, b, c):
'Vapor pressure model\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n np.exp(a+b/x+c*np.log(x))\n '
return np.exp(((a + (b / x)) + (c * np.log(x))))
|
def pow3(x, c, a, alpha):
'pow3\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n alpha : float\n\n Returns\n -------\n float\n c - a * x**(-alpha)\n '
return (c - (a * (x ** (- alpha))))
| -4,558,576,408,274,066,400
|
pow3
Parameters
----------
x : int
c : float
a : float
alpha : float
Returns
-------
float
c - a * x**(-alpha)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
pow3
|
Ascarshen/nni
|
python
|
def pow3(x, c, a, alpha):
'pow3\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n alpha : float\n\n Returns\n -------\n float\n c - a * x**(-alpha)\n '
return (c - (a * (x ** (- alpha))))
|
def linear(x, a, b):
'linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a*x + b\n '
return ((a * x) + b)
| -1,092,216,930,129,213,400
|
linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a*x + b
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
linear
|
Ascarshen/nni
|
python
|
def linear(x, a, b):
'linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a*x + b\n '
return ((a * x) + b)
|
def logx_linear(x, a, b):
'logx linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a * np.log(x) + b\n '
x = np.log(x)
return ((a * x) + b)
| 4,003,974,454,394,717
|
logx linear
Parameters
----------
x : int
a : float
b : float
Returns
-------
float
a * np.log(x) + b
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
logx_linear
|
Ascarshen/nni
|
python
|
def logx_linear(x, a, b):
'logx linear\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n\n Returns\n -------\n float\n a * np.log(x) + b\n '
x = np.log(x)
return ((a * x) + b)
|
def dr_hill_zero_background(x, theta, eta, kappa):
'dr hill zero background\n\n Parameters\n ----------\n x : int\n theta : float\n eta : float\n kappa : float\n\n Returns\n -------\n float\n (theta* x**eta) / (kappa**eta + x**eta)\n '
return ((theta * (x ** eta)) / ((kappa ** eta) + (x ** eta)))
| -2,401,082,026,529,337,300
|
dr hill zero background
Parameters
----------
x : int
theta : float
eta : float
kappa : float
Returns
-------
float
(theta* x**eta) / (kappa**eta + x**eta)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
dr_hill_zero_background
|
Ascarshen/nni
|
python
|
def dr_hill_zero_background(x, theta, eta, kappa):
'dr hill zero background\n\n Parameters\n ----------\n x : int\n theta : float\n eta : float\n kappa : float\n\n Returns\n -------\n float\n (theta* x**eta) / (kappa**eta + x**eta)\n '
return ((theta * (x ** eta)) / ((kappa ** eta) + (x ** eta)))
|
def log_power(x, a, b, c):
'"logistic power\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n a/(1.+(x/np.exp(b))**c)\n '
return (a / (1.0 + ((x / np.exp(b)) ** c)))
| 8,904,214,296,093,443,000
|
"logistic power
Parameters
----------
x : int
a : float
b : float
c : float
Returns
-------
float
a/(1.+(x/np.exp(b))**c)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
log_power
|
Ascarshen/nni
|
python
|
def log_power(x, a, b, c):
'"logistic power\n\n Parameters\n ----------\n x : int\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n a/(1.+(x/np.exp(b))**c)\n '
return (a / (1.0 + ((x / np.exp(b)) ** c)))
|
def pow4(x, alpha, a, b, c):
'pow4\n\n Parameters\n ----------\n x : int\n alpha : float\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n c - (a*x+b)**-alpha\n '
return (c - (((a * x) + b) ** (- alpha)))
| 1,150,530,309,531,869,000
|
pow4
Parameters
----------
x : int
alpha : float
a : float
b : float
c : float
Returns
-------
float
c - (a*x+b)**-alpha
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
pow4
|
Ascarshen/nni
|
python
|
def pow4(x, alpha, a, b, c):
'pow4\n\n Parameters\n ----------\n x : int\n alpha : float\n a : float\n b : float\n c : float\n\n Returns\n -------\n float\n c - (a*x+b)**-alpha\n '
return (c - (((a * x) + b) ** (- alpha)))
|
def mmf(x, alpha, beta, kappa, delta):
'Morgan-Mercer-Flodin\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) / (1. + (kappa * x)**delta)\n '
return (alpha - ((alpha - beta) / (1.0 + ((kappa * x) ** delta))))
| -5,394,674,920,945,372,000
|
Morgan-Mercer-Flodin
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) / (1. + (kappa * x)**delta)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
mmf
|
Ascarshen/nni
|
python
|
def mmf(x, alpha, beta, kappa, delta):
'Morgan-Mercer-Flodin\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) / (1. + (kappa * x)**delta)\n '
return (alpha - ((alpha - beta) / (1.0 + ((kappa * x) ** delta))))
|
def exp4(x, c, a, b, alpha):
'exp4\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n b : float\n alpha : float\n\n Returns\n -------\n float\n c - np.exp(-a*(x**alpha)+b)\n '
return (c - np.exp((((- a) * (x ** alpha)) + b)))
| 7,751,033,301,510,852,000
|
exp4
Parameters
----------
x : int
c : float
a : float
b : float
alpha : float
Returns
-------
float
c - np.exp(-a*(x**alpha)+b)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
exp4
|
Ascarshen/nni
|
python
|
def exp4(x, c, a, b, alpha):
'exp4\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n b : float\n alpha : float\n\n Returns\n -------\n float\n c - np.exp(-a*(x**alpha)+b)\n '
return (c - np.exp((((- a) * (x ** alpha)) + b)))
|
def ilog2(x, c, a):
'ilog2\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n\n Returns\n -------\n float\n c - a / np.log(x)\n '
return (c - (a / np.log(x)))
| -1,638,633,845,434,928,600
|
ilog2
Parameters
----------
x : int
c : float
a : float
Returns
-------
float
c - a / np.log(x)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
ilog2
|
Ascarshen/nni
|
python
|
def ilog2(x, c, a):
'ilog2\n\n Parameters\n ----------\n x : int\n c : float\n a : float\n\n Returns\n -------\n float\n c - a / np.log(x)\n '
return (c - (a / np.log(x)))
|
def weibull(x, alpha, beta, kappa, delta):
'Weibull model\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)\n '
return (alpha - ((alpha - beta) * np.exp((- ((kappa * x) ** delta)))))
| 6,887,383,915,405,984,000
|
Weibull model
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x : int
alpha : float
beta : float
kappa : float
delta : float
Returns
-------
float
alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
weibull
|
Ascarshen/nni
|
python
|
def weibull(x, alpha, beta, kappa, delta):
'Weibull model\n http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm\n\n Parameters\n ----------\n x : int\n alpha : float\n beta : float\n kappa : float\n delta : float\n\n Returns\n -------\n float\n alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)\n '
return (alpha - ((alpha - beta) * np.exp((- ((kappa * x) ** delta)))))
|
def janoschek(x, a, beta, k, delta):
'http://www.pisces-conservation.com/growthhelp/janoschek.htm\n\n Parameters\n ----------\n x : int\n a : float\n beta : float\n k : float\n delta : float\n\n Returns\n -------\n float\n a - (a - beta) * np.exp(-k*x**delta)\n '
return (a - ((a - beta) * np.exp(((- k) * (x ** delta)))))
| -8,035,998,551,225,298,000
|
http://www.pisces-conservation.com/growthhelp/janoschek.htm
Parameters
----------
x : int
a : float
beta : float
k : float
delta : float
Returns
-------
float
a - (a - beta) * np.exp(-k*x**delta)
|
src/sdk/pynni/nni/curvefitting_assessor/curvefunctions.py
|
janoschek
|
Ascarshen/nni
|
python
|
def janoschek(x, a, beta, k, delta):
'http://www.pisces-conservation.com/growthhelp/janoschek.htm\n\n Parameters\n ----------\n x : int\n a : float\n beta : float\n k : float\n delta : float\n\n Returns\n -------\n float\n a - (a - beta) * np.exp(-k*x**delta)\n '
return (a - ((a - beta) * np.exp(((- k) * (x ** delta)))))
|
def our_colours(colours=[]):
"\n Extract hexcodes for our colours\n If passed a sting, returns the matching hexcode.\n If passed a list, returns a list of hexcodes.\n Method from https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2.\n - colours, list of strings\n\n Examples:\n data.our_colours_raw\n our_colours()\n our_colours('green', 'blue', 'green')\n our_colours('not a colour', 'also not a colour', 'green')\n our_colors('blue')\n "
if (len(colours) == 0):
return data.our_colours_raw
elif isinstance(colours, str):
return data.our_colours_raw[colours]
else:
return [data.our_colours_raw[i] for i in colours]
| -2,128,949,553,459,649,800
|
Extract hexcodes for our colours
If passed a sting, returns the matching hexcode.
If passed a list, returns a list of hexcodes.
Method from https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2.
- colours, list of strings
Examples:
data.our_colours_raw
our_colours()
our_colours('green', 'blue', 'green')
our_colours('not a colour', 'also not a colour', 'green')
our_colors('blue')
|
ourstylePy/our_colours.py
|
our_colours
|
PeterGrahamJersey/ourstylePy
|
python
|
def our_colours(colours=[]):
"\n Extract hexcodes for our colours\n If passed a sting, returns the matching hexcode.\n If passed a list, returns a list of hexcodes.\n Method from https://drsimonj.svbtle.com/creating-corporate-colour-palettes-for-ggplot2.\n - colours, list of strings\n\n Examples:\n data.our_colours_raw\n our_colours()\n our_colours('green', 'blue', 'green')\n our_colours('not a colour', 'also not a colour', 'green')\n our_colors('blue')\n "
if (len(colours) == 0):
return data.our_colours_raw
elif isinstance(colours, str):
return data.our_colours_raw[colours]
else:
return [data.our_colours_raw[i] for i in colours]
|
def our_colors(colours=[]):
'\n Alias for our_colours()\n '
return our_colours(colours)
| -1,348,029,345,276,553,000
|
Alias for our_colours()
|
ourstylePy/our_colours.py
|
our_colors
|
PeterGrahamJersey/ourstylePy
|
python
|
def our_colors(colours=[]):
'\n \n '
return our_colours(colours)
|
@property
def access_control_allow_credentials(self):
'Whether credentials can be shared by the browser to\n JavaScript code. As part of the preflight request it indicates\n whether credentials can be used on the cross origin request.\n '
return ('Access-Control-Allow-Credentials' in self.headers)
| -2,807,124,438,663,061,000
|
Whether credentials can be shared by the browser to
JavaScript code. As part of the preflight request it indicates
whether credentials can be used on the cross origin request.
|
venv/Lib/site-packages/werkzeug/wrappers/cors.py
|
access_control_allow_credentials
|
997Yi/Flask-web
|
python
|
@property
def access_control_allow_credentials(self):
'Whether credentials can be shared by the browser to\n JavaScript code. As part of the preflight request it indicates\n whether credentials can be used on the cross origin request.\n '
return ('Access-Control-Allow-Credentials' in self.headers)
|
def __init__(__self__, *, resource_group_name: pulumi.Input[str], storage_account: pulumi.Input['StorageAccountArgs'], workspace_name: pulumi.Input[str], containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, e_tag: Optional[pulumi.Input[str]]=None, storage_insight_name: Optional[pulumi.Input[str]]=None, tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None):
"\n The set of arguments for constructing a StorageInsightConfig resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read\n :param pulumi.Input[str] e_tag: The ETag of the storage insight.\n :param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource\n :param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
pulumi.set(__self__, 'storage_account', storage_account)
pulumi.set(__self__, 'workspace_name', workspace_name)
if (containers is not None):
pulumi.set(__self__, 'containers', containers)
if (e_tag is not None):
pulumi.set(__self__, 'e_tag', e_tag)
if (storage_insight_name is not None):
pulumi.set(__self__, 'storage_insight_name', storage_insight_name)
if (tables is not None):
pulumi.set(__self__, 'tables', tables)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
| -41,640,019,047,830,790
|
The set of arguments for constructing a StorageInsightConfig resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
__init__
|
polivbr/pulumi-azure-native
|
python
|
def __init__(__self__, *, resource_group_name: pulumi.Input[str], storage_account: pulumi.Input['StorageAccountArgs'], workspace_name: pulumi.Input[str], containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, e_tag: Optional[pulumi.Input[str]]=None, storage_insight_name: Optional[pulumi.Input[str]]=None, tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None):
"\n The set of arguments for constructing a StorageInsightConfig resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read\n :param pulumi.Input[str] e_tag: The ETag of the storage insight.\n :param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource\n :param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
pulumi.set(__self__, 'storage_account', storage_account)
pulumi.set(__self__, 'workspace_name', workspace_name)
if (containers is not None):
pulumi.set(__self__, 'containers', containers)
if (e_tag is not None):
pulumi.set(__self__, 'e_tag', e_tag)
if (storage_insight_name is not None):
pulumi.set(__self__, 'storage_insight_name', storage_insight_name)
if (tables is not None):
pulumi.set(__self__, 'tables', tables)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
|
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n The name of the resource group. The name is case insensitive.\n '
return pulumi.get(self, 'resource_group_name')
| 9,099,428,823,929,783,000
|
The name of the resource group. The name is case insensitive.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
resource_group_name
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_group_name')
|
@property
@pulumi.getter(name='storageAccount')
def storage_account(self) -> pulumi.Input['StorageAccountArgs']:
'\n The storage account connection details\n '
return pulumi.get(self, 'storage_account')
| 507,877,174,712,349,700
|
The storage account connection details
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
storage_account
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='storageAccount')
def storage_account(self) -> pulumi.Input['StorageAccountArgs']:
'\n \n '
return pulumi.get(self, 'storage_account')
|
@property
@pulumi.getter(name='workspaceName')
def workspace_name(self) -> pulumi.Input[str]:
'\n The name of the workspace.\n '
return pulumi.get(self, 'workspace_name')
| -6,043,356,629,165,876,000
|
The name of the workspace.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
workspace_name
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='workspaceName')
def workspace_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'workspace_name')
|
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n The names of the blob containers that the workspace should read\n '
return pulumi.get(self, 'containers')
| 2,516,808,853,289,985,000
|
The names of the blob containers that the workspace should read
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
containers
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n \n '
return pulumi.get(self, 'containers')
|
@property
@pulumi.getter(name='eTag')
def e_tag(self) -> Optional[pulumi.Input[str]]:
'\n The ETag of the storage insight.\n '
return pulumi.get(self, 'e_tag')
| 5,386,400,399,290,158,000
|
The ETag of the storage insight.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
e_tag
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='eTag')
def e_tag(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'e_tag')
|
@property
@pulumi.getter(name='storageInsightName')
def storage_insight_name(self) -> Optional[pulumi.Input[str]]:
'\n Name of the storageInsightsConfigs resource\n '
return pulumi.get(self, 'storage_insight_name')
| -9,068,494,032,015,256,000
|
Name of the storageInsightsConfigs resource
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
storage_insight_name
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='storageInsightName')
def storage_insight_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'storage_insight_name')
|
@property
@pulumi.getter
def tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n The names of the Azure tables that the workspace should read\n '
return pulumi.get(self, 'tables')
| -5,734,022,118,253,810,000
|
The names of the Azure tables that the workspace should read
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
tables
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n \n '
return pulumi.get(self, 'tables')
|
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags')
| -2,047,115,851,061,118,500
|
Resource tags.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
tags
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n \n '
return pulumi.get(self, 'tags')
|
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, e_tag: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]]=None, storage_insight_name: Optional[pulumi.Input[str]]=None, tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n The top level storage insight resource container.\n API Version: 2020-08-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read\n :param pulumi.Input[str] e_tag: The ETag of the storage insight.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details\n :param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource\n :param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n "
...
| 8,636,932,789,713,882,000
|
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
__init__
|
polivbr/pulumi-azure-native
|
python
|
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, e_tag: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]]=None, storage_insight_name: Optional[pulumi.Input[str]]=None, tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n The top level storage insight resource container.\n API Version: 2020-08-01.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read\n :param pulumi.Input[str] e_tag: The ETag of the storage insight.\n :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.\n :param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details\n :param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource\n :param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[str] workspace_name: The name of the workspace.\n "
...
|
@overload
def __init__(__self__, resource_name: str, args: StorageInsightConfigArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n The top level storage insight resource container.\n API Version: 2020-08-01.\n\n :param str resource_name: The name of the resource.\n :param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
...
| 3,297,964,980,969,051,000
|
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
__init__
|
polivbr/pulumi-azure-native
|
python
|
@overload
def __init__(__self__, resource_name: str, args: StorageInsightConfigArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n The top level storage insight resource container.\n API Version: 2020-08-01.\n\n :param str resource_name: The name of the resource.\n :param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
...
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'StorageInsightConfig':
"\n Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__['containers'] = None
__props__.__dict__['e_tag'] = None
__props__.__dict__['name'] = None
__props__.__dict__['status'] = None
__props__.__dict__['storage_account'] = None
__props__.__dict__['tables'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
return StorageInsightConfig(resource_name, opts=opts, __props__=__props__)
| 4,728,537,262,257,571,000
|
Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
get
|
polivbr/pulumi-azure-native
|
python
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'StorageInsightConfig':
"\n Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__['containers'] = None
__props__.__dict__['e_tag'] = None
__props__.__dict__['name'] = None
__props__.__dict__['status'] = None
__props__.__dict__['storage_account'] = None
__props__.__dict__['tables'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
return StorageInsightConfig(resource_name, opts=opts, __props__=__props__)
|
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n The names of the blob containers that the workspace should read\n '
return pulumi.get(self, 'containers')
| 5,895,872,450,965,376,000
|
The names of the blob containers that the workspace should read
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
containers
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n \n '
return pulumi.get(self, 'containers')
|
@property
@pulumi.getter(name='eTag')
def e_tag(self) -> pulumi.Output[Optional[str]]:
'\n The ETag of the storage insight.\n '
return pulumi.get(self, 'e_tag')
| 6,580,174,356,673,608,000
|
The ETag of the storage insight.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
e_tag
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='eTag')
def e_tag(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'e_tag')
|
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n The name of the resource\n '
return pulumi.get(self, 'name')
| 2,231,345,607,626,165,800
|
The name of the resource
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
name
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name')
|
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StorageInsightStatusResponse']:
'\n The status of the storage insight\n '
return pulumi.get(self, 'status')
| -5,288,598,013,457,447,000
|
The status of the storage insight
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
status
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StorageInsightStatusResponse']:
'\n \n '
return pulumi.get(self, 'status')
|
@property
@pulumi.getter(name='storageAccount')
def storage_account(self) -> pulumi.Output['outputs.StorageAccountResponse']:
'\n The storage account connection details\n '
return pulumi.get(self, 'storage_account')
| -4,159,935,955,763,377,000
|
The storage account connection details
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
storage_account
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter(name='storageAccount')
def storage_account(self) -> pulumi.Output['outputs.StorageAccountResponse']:
'\n \n '
return pulumi.get(self, 'storage_account')
|
@property
@pulumi.getter
def tables(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n The names of the Azure tables that the workspace should read\n '
return pulumi.get(self, 'tables')
| 6,806,337,111,924,012,000
|
The names of the Azure tables that the workspace should read
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
tables
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def tables(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n \n '
return pulumi.get(self, 'tables')
|
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags')
| -2,929,197,049,816,896,000
|
Resource tags.
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
tags
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags')
|
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"\n '
return pulumi.get(self, 'type')
| -5,449,551,391,296,740,000
|
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
type
|
polivbr/pulumi-azure-native
|
python
|
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type')
|
def interpolate1d(x, values, tangents):
'Perform cubic hermite spline interpolation on a 1D spline.\n\n The x coordinates of the spline knots are at [0 : 1 : len(values)-1].\n Queries outside of the range of the spline are computed using linear\n extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline\n for details, where "x" corresponds to `x`, "p" corresponds to `values`, and\n "m" corresponds to `tangents`.\n\n Args:\n x: A tensor of any size of single or double precision floats containing the\n set of values to be used for interpolation into the spline.\n values: A vector of single or double precision floats containing the value\n of each knot of the spline being interpolated into. Must be the same\n length as `tangents` and the same type as `x`.\n tangents: A vector of single or double precision floats containing the\n tangent (derivative) of each knot of the spline being interpolated into.\n Must be the same length as `values` and the same type as `x`.\n\n Returns:\n The result of interpolating along the spline defined by `values`, and\n `tangents`, using `x` as the query values. Will be the same length and type\n as `x`.\n '
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert (values.dtype == float_dtype)
assert (tangents.dtype == float_dtype)
assert (len(values.shape) == 1)
assert (len(tangents.shape) == 1)
assert (values.shape[0] == tangents.shape[0])
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0), (values.shape[0] - 2))).type(torch.int64)
x_hi = (x_lo + 1)
t = (x - x_lo.type(float_dtype))
t_sq = (t ** 2)
t_cu = (t * t_sq)
h01 = (((- 2.0) * t_cu) + (3.0 * t_sq))
h00 = (1.0 - h01)
h11 = (t_cu - t_sq)
h10 = ((h11 - t_sq) + t)
value_before = ((tangents[0] * t) + values[0])
value_after = ((tangents[(- 1)] * (t - 1.0)) + values[(- 1)])
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = ((((neighbor_values_lo * h00) + (neighbor_values_hi * h01)) + (neighbor_tangents_lo * h10)) + (neighbor_tangents_hi * h11))
return torch.where((t < 0.0), value_before, torch.where((t > 1.0), value_after, value_mid))
| -6,282,021,684,821,428,000
|
Perform cubic hermite spline interpolation on a 1D spline.
The x coordinates of the spline knots are at [0 : 1 : len(values)-1].
Queries outside of the range of the spline are computed using linear
extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
for details, where "x" corresponds to `x`, "p" corresponds to `values`, and
"m" corresponds to `tangents`.
Args:
x: A tensor of any size of single or double precision floats containing the
set of values to be used for interpolation into the spline.
values: A vector of single or double precision floats containing the value
of each knot of the spline being interpolated into. Must be the same
length as `tangents` and the same type as `x`.
tangents: A vector of single or double precision floats containing the
tangent (derivative) of each knot of the spline being interpolated into.
Must be the same length as `values` and the same type as `x`.
Returns:
The result of interpolating along the spline defined by `values`, and
`tangents`, using `x` as the query values. Will be the same length and type
as `x`.
|
pioneer/robust_loss_pytorch/cubic_spline.py
|
interpolate1d
|
AaltoVision/automodulator
|
python
|
def interpolate1d(x, values, tangents):
'Perform cubic hermite spline interpolation on a 1D spline.\n\n The x coordinates of the spline knots are at [0 : 1 : len(values)-1].\n Queries outside of the range of the spline are computed using linear\n extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline\n for details, where "x" corresponds to `x`, "p" corresponds to `values`, and\n "m" corresponds to `tangents`.\n\n Args:\n x: A tensor of any size of single or double precision floats containing the\n set of values to be used for interpolation into the spline.\n values: A vector of single or double precision floats containing the value\n of each knot of the spline being interpolated into. Must be the same\n length as `tangents` and the same type as `x`.\n tangents: A vector of single or double precision floats containing the\n tangent (derivative) of each knot of the spline being interpolated into.\n Must be the same length as `values` and the same type as `x`.\n\n Returns:\n The result of interpolating along the spline defined by `values`, and\n `tangents`, using `x` as the query values. Will be the same length and type\n as `x`.\n '
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert (values.dtype == float_dtype)
assert (tangents.dtype == float_dtype)
assert (len(values.shape) == 1)
assert (len(tangents.shape) == 1)
assert (values.shape[0] == tangents.shape[0])
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0), (values.shape[0] - 2))).type(torch.int64)
x_hi = (x_lo + 1)
t = (x - x_lo.type(float_dtype))
t_sq = (t ** 2)
t_cu = (t * t_sq)
h01 = (((- 2.0) * t_cu) + (3.0 * t_sq))
h00 = (1.0 - h01)
h11 = (t_cu - t_sq)
h10 = ((h11 - t_sq) + t)
value_before = ((tangents[0] * t) + values[0])
value_after = ((tangents[(- 1)] * (t - 1.0)) + values[(- 1)])
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = ((((neighbor_values_lo * h00) + (neighbor_values_hi * h01)) + (neighbor_tangents_lo * h10)) + (neighbor_tangents_hi * h11))
return torch.where((t < 0.0), value_before, torch.where((t > 1.0), value_after, value_mid))
|
def print_data(data: list) -> None:
'\n 2차원 리스트의 내용을 출력\n 1 10 20 30 40\n 2 11 21 31 41\n ...\n\n\n :param data: 2차원 행렬 형태의 리스트\n :return: None\n '
readcsv = open('data/exam.csv', mode='r', encoding='utf-8')
line = readcsv.readline()
while line:
print(line.strip())
line = readcsv.readline()
readcsv.close()
| -3,626,549,826,081,273,000
|
2차원 리스트의 내용을 출력
1 10 20 30 40
2 11 21 31 41
...
:param data: 2차원 행렬 형태의 리스트
:return: None
|
lec07_file/file07.py
|
print_data
|
SOOIN-KIM/lab-python
|
python
|
def print_data(data: list) -> None:
'\n 2차원 리스트의 내용을 출력\n 1 10 20 30 40\n 2 11 21 31 41\n ...\n\n\n :param data: 2차원 행렬 형태의 리스트\n :return: None\n '
readcsv = open('data/exam.csv', mode='r', encoding='utf-8')
line = readcsv.readline()
while line:
print(line.strip())
line = readcsv.readline()
readcsv.close()
|
def construct_k_colored_graph(k, n, p):
'\n Constructs a k colored graph of n nodes in which a pair\n of nodes shares an edge with probability 0 <= p <= 1.\n\n Note: this code is for demonstrative purposes only; the\n solution for such a problem will not necessarily exist,\n in which case the concretization process will throw\n an exception.\n '
with coopy.scope():
nodes = [Node() for i in range(n)]
for i in range((n - 1)):
for j in range((i + 1), n):
a = nodes[i]
b = nodes[j]
if (random.uniform(0, 1) < p):
a.direct_edge_towards(b)
b.direct_edge_towards(a)
for node in nodes:
coopy.any([(node.color == i) for i in range(k)]).require()
node.has_valid_connections.require()
coopy.concretize()
return nodes
| 8,291,928,439,992,019,000
|
Constructs a k colored graph of n nodes in which a pair
of nodes shares an edge with probability 0 <= p <= 1.
Note: this code is for demonstrative purposes only; the
solution for such a problem will not necessarily exist,
in which case the concretization process will throw
an exception.
|
examples/example-5.py
|
construct_k_colored_graph
|
abarreal/coopy
|
python
|
def construct_k_colored_graph(k, n, p):
'\n Constructs a k colored graph of n nodes in which a pair\n of nodes shares an edge with probability 0 <= p <= 1.\n\n Note: this code is for demonstrative purposes only; the\n solution for such a problem will not necessarily exist,\n in which case the concretization process will throw\n an exception.\n '
with coopy.scope():
nodes = [Node() for i in range(n)]
for i in range((n - 1)):
for j in range((i + 1), n):
a = nodes[i]
b = nodes[j]
if (random.uniform(0, 1) < p):
a.direct_edge_towards(b)
b.direct_edge_towards(a)
for node in nodes:
coopy.any([(node.color == i) for i in range(k)]).require()
node.has_valid_connections.require()
coopy.concretize()
return nodes
|
def close(self):
'\n Close the socket.\n '
self.sock.close()
| 4,806,724,708,709,453,000
|
Close the socket.
|
python/lib/socket.py
|
close
|
TpmKranz/netsec-scion
|
python
|
def close(self):
'\n \n '
self.sock.close()
|
def __init__(self, bind=None, addr_type=AddrType.IPV6, reuse=False):
'\n Initialize a UDP socket, then call superclass init for socket options\n and binding.\n\n :param tuple bind:\n Optional tuple of (`str`, `int`, `str`) describing respectively the\n address and port to bind to, and an optional description.\n :param addr_type:\n Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,\n :const:`~lib.types.AddrType.IPV6` (default).\n :param reuse:\n Boolean value indicating whether SO_REUSEADDR option should be set.\n '
assert (addr_type in (AddrType.IPV4, AddrType.IPV6))
self._addr_type = addr_type
af_domain = AF_INET6
if (self._addr_type == AddrType.IPV4):
af_domain = AF_INET
self.sock = socket(af_domain, SOCK_DGRAM)
if reuse:
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.port = None
if bind:
self.bind(*bind)
self.active = True
| -3,654,269,377,549,289,000
|
Initialize a UDP socket, then call superclass init for socket options
and binding.
:param tuple bind:
Optional tuple of (`str`, `int`, `str`) describing respectively the
address and port to bind to, and an optional description.
:param addr_type:
Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,
:const:`~lib.types.AddrType.IPV6` (default).
:param reuse:
Boolean value indicating whether SO_REUSEADDR option should be set.
|
python/lib/socket.py
|
__init__
|
TpmKranz/netsec-scion
|
python
|
def __init__(self, bind=None, addr_type=AddrType.IPV6, reuse=False):
'\n Initialize a UDP socket, then call superclass init for socket options\n and binding.\n\n :param tuple bind:\n Optional tuple of (`str`, `int`, `str`) describing respectively the\n address and port to bind to, and an optional description.\n :param addr_type:\n Socket domain. Must be one of :const:`~lib.types.AddrType.IPV4`,\n :const:`~lib.types.AddrType.IPV6` (default).\n :param reuse:\n Boolean value indicating whether SO_REUSEADDR option should be set.\n '
assert (addr_type in (AddrType.IPV4, AddrType.IPV6))
self._addr_type = addr_type
af_domain = AF_INET6
if (self._addr_type == AddrType.IPV4):
af_domain = AF_INET
self.sock = socket(af_domain, SOCK_DGRAM)
if reuse:
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.port = None
if bind:
self.bind(*bind)
self.active = True
|
def bind(self, addr, port=0, desc=None):
'\n Bind socket to the specified address & port. If `addr` is ``None``, the\n socket will bind to all interfaces.\n\n :param str addr: Address to bind to (can be ``None``, see above).\n :param int port: Port to bind to.\n :param str desc: Optional purpose of the port.\n '
if (addr is None):
addr = '::'
if (self._addr_type == AddrType.IPV4):
addr = ''
try:
self.sock.bind((addr, port))
except OSError as e:
logging.critical('Error binding to [%s]:%s: %s', addr, port, e)
kill_self()
self.port = self.sock.getsockname()[1]
if desc:
logging.debug('%s bound to %s:%d', desc, addr, self.port)
| 6,334,852,750,349,397,000
|
Bind socket to the specified address & port. If `addr` is ``None``, the
socket will bind to all interfaces.
:param str addr: Address to bind to (can be ``None``, see above).
:param int port: Port to bind to.
:param str desc: Optional purpose of the port.
|
python/lib/socket.py
|
bind
|
TpmKranz/netsec-scion
|
python
|
def bind(self, addr, port=0, desc=None):
'\n Bind socket to the specified address & port. If `addr` is ``None``, the\n socket will bind to all interfaces.\n\n :param str addr: Address to bind to (can be ``None``, see above).\n :param int port: Port to bind to.\n :param str desc: Optional purpose of the port.\n '
if (addr is None):
addr = '::'
if (self._addr_type == AddrType.IPV4):
addr =
try:
self.sock.bind((addr, port))
except OSError as e:
logging.critical('Error binding to [%s]:%s: %s', addr, port, e)
kill_self()
self.port = self.sock.getsockname()[1]
if desc:
logging.debug('%s bound to %s:%d', desc, addr, self.port)
|
def send(self, data, dst=None):
'\n Send data to a specified destination.\n\n :param bytes data: Data to send.\n :param tuple dst:\n Tuple of (`str`, `int`) describing the destination address and port,\n respectively.\n '
try:
ret = self.sock.sendto(data, dst)
except OSError as e:
errno = e.args[0]
logging.error('Error sending %dB to %s: %s', len(data), dst, e)
if (errno == ENETUNREACH):
raise SCMPUnreachNet(dst)
elif (errno == EHOSTUNREACH):
raise SCMPUnreachHost(dst)
return False
if (ret != len(data)):
logging.error('Wanted to send %dB, only sent %dB', len(data), ret)
return False
return True
| -7,940,587,675,143,595,000
|
Send data to a specified destination.
:param bytes data: Data to send.
:param tuple dst:
Tuple of (`str`, `int`) describing the destination address and port,
respectively.
|
python/lib/socket.py
|
send
|
TpmKranz/netsec-scion
|
python
|
def send(self, data, dst=None):
'\n Send data to a specified destination.\n\n :param bytes data: Data to send.\n :param tuple dst:\n Tuple of (`str`, `int`) describing the destination address and port,\n respectively.\n '
try:
ret = self.sock.sendto(data, dst)
except OSError as e:
errno = e.args[0]
logging.error('Error sending %dB to %s: %s', len(data), dst, e)
if (errno == ENETUNREACH):
raise SCMPUnreachNet(dst)
elif (errno == EHOSTUNREACH):
raise SCMPUnreachHost(dst)
return False
if (ret != len(data)):
logging.error('Wanted to send %dB, only sent %dB', len(data), ret)
return False
return True
|
def recv(self, block=True):
'\n Read data from socket.\n\n :returns:\n Tuple of (`bytes`, (`str`, `int`) containing the data, and remote\n host/port respectively.\n '
flags = 0
if (not block):
flags = MSG_DONTWAIT
while True:
try:
return self.sock.recvfrom(SCION_BUFLEN, flags)
except InterruptedError:
pass
| -3,302,182,164,630,096,400
|
Read data from socket.
:returns:
Tuple of (`bytes`, (`str`, `int`) containing the data, and remote
host/port respectively.
|
python/lib/socket.py
|
recv
|
TpmKranz/netsec-scion
|
python
|
def recv(self, block=True):
'\n Read data from socket.\n\n :returns:\n Tuple of (`bytes`, (`str`, `int`) containing the data, and remote\n host/port respectively.\n '
flags = 0
if (not block):
flags = MSG_DONTWAIT
while True:
try:
return self.sock.recvfrom(SCION_BUFLEN, flags)
except InterruptedError:
pass
|
def __init__(self, reg=None, bind_ip=(), bind_unix=None, sock=None):
'\n Initialise a socket of the specified type, and optionally bind it to an\n address/port.\n\n :param tuple reg:\n Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)\n describing respectively the address, port, SVC type, and init value\n to register with the dispatcher. In sockets that do not connect to\n the dispatcher, this argument is None.\n :param tuple bind_ip:\n Optional tuple of (`SCIONAddr`, `int`) describing the address and port\n of the bind address. Only needed if the bind address is different from\n the public address.\n :param tuple bind_unix:\n Optional tuple of (`str`, `str`) describing path to bind to, and an\n optional description.\n :param sock:\n Optional socket file object to build instance around.\n '
self.sock = (sock or socket(AF_UNIX, SOCK_STREAM))
self.addr = None
if reg:
(addr, port, init, svc) = reg
self.registered = reg_dispatcher(self, addr, port, bind_ip, init, svc)
if bind_unix:
self.bind(*bind_unix)
self.active = True
| 6,635,898,148,288,106,000
|
Initialise a socket of the specified type, and optionally bind it to an
address/port.
:param tuple reg:
Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)
describing respectively the address, port, SVC type, and init value
to register with the dispatcher. In sockets that do not connect to
the dispatcher, this argument is None.
:param tuple bind_ip:
Optional tuple of (`SCIONAddr`, `int`) describing the address and port
of the bind address. Only needed if the bind address is different from
the public address.
:param tuple bind_unix:
Optional tuple of (`str`, `str`) describing path to bind to, and an
optional description.
:param sock:
Optional socket file object to build instance around.
|
python/lib/socket.py
|
__init__
|
TpmKranz/netsec-scion
|
python
|
def __init__(self, reg=None, bind_ip=(), bind_unix=None, sock=None):
'\n Initialise a socket of the specified type, and optionally bind it to an\n address/port.\n\n :param tuple reg:\n Optional tuple of (`SCIONAddr`, `int`, `SVCType`, `bool`)\n describing respectively the address, port, SVC type, and init value\n to register with the dispatcher. In sockets that do not connect to\n the dispatcher, this argument is None.\n :param tuple bind_ip:\n Optional tuple of (`SCIONAddr`, `int`) describing the address and port\n of the bind address. Only needed if the bind address is different from\n the public address.\n :param tuple bind_unix:\n Optional tuple of (`str`, `str`) describing path to bind to, and an\n optional description.\n :param sock:\n Optional socket file object to build instance around.\n '
self.sock = (sock or socket(AF_UNIX, SOCK_STREAM))
self.addr = None
if reg:
(addr, port, init, svc) = reg
self.registered = reg_dispatcher(self, addr, port, bind_ip, init, svc)
if bind_unix:
self.bind(*bind_unix)
self.active = True
|
def send(self, data, dst=None):
'\n Send data through the socket.\n\n :param bytes data: Data to send.\n '
if dst:
(dst_addr, dst_port) = dst
if isinstance(dst_addr, str):
dst_addr = haddr_parse_interface(dst_addr)
addr_type = struct.pack('B', dst_addr.TYPE)
packed_dst = (dst_addr.pack() + struct.pack('!H', dst_port))
else:
addr_type = struct.pack('B', AddrType.NONE)
packed_dst = b''
data_len = struct.pack('!I', len(data))
data = b''.join([self.COOKIE, addr_type, data_len, packed_dst, data])
try:
self.sock.sendall(data)
return True
except OSError as e:
logging.error('error in send: %s', e)
return False
| 1,264,137,445,995,965,200
|
Send data through the socket.
:param bytes data: Data to send.
|
python/lib/socket.py
|
send
|
TpmKranz/netsec-scion
|
python
|
def send(self, data, dst=None):
'\n Send data through the socket.\n\n :param bytes data: Data to send.\n '
if dst:
(dst_addr, dst_port) = dst
if isinstance(dst_addr, str):
dst_addr = haddr_parse_interface(dst_addr)
addr_type = struct.pack('B', dst_addr.TYPE)
packed_dst = (dst_addr.pack() + struct.pack('!H', dst_port))
else:
addr_type = struct.pack('B', AddrType.NONE)
packed_dst = b
data_len = struct.pack('!I', len(data))
data = b.join([self.COOKIE, addr_type, data_len, packed_dst, data])
try:
self.sock.sendall(data)
return True
except OSError as e:
logging.error('error in send: %s', e)
return False
|
def recv(self, block=True):
'\n Read data from socket.\n\n :returns: bytestring containing received data.\n '
flags = 0
if (not block):
flags = MSG_DONTWAIT
buf = recv_all(self.sock, (self.COOKIE_LEN + 5), flags)
if (not buf):
return (None, None)
(cookie, addr_type, packet_len) = struct.unpack('!8sBI', buf)
if (cookie != self.COOKIE):
raise SCIONIOError('Dispatcher socket out of sync')
port_len = 0
if (addr_type != AddrType.NONE):
port_len = 2
addr_len = haddr_get_type(addr_type).LEN
buf = recv_all(self.sock, ((addr_len + port_len) + packet_len), 0)
if (addr_len > 0):
addr = buf[:addr_len]
port = struct.unpack('!H', buf[addr_len:(addr_len + port_len)])
sender = (str(ipaddress.ip_address(addr)), port)
else:
addr = ''
port = 0
sender = (None, None)
packet = buf[(addr_len + port_len):]
return (packet, sender)
| 334,733,081,014,915,200
|
Read data from socket.
:returns: bytestring containing received data.
|
python/lib/socket.py
|
recv
|
TpmKranz/netsec-scion
|
python
|
def recv(self, block=True):
'\n Read data from socket.\n\n :returns: bytestring containing received data.\n '
flags = 0
if (not block):
flags = MSG_DONTWAIT
buf = recv_all(self.sock, (self.COOKIE_LEN + 5), flags)
if (not buf):
return (None, None)
(cookie, addr_type, packet_len) = struct.unpack('!8sBI', buf)
if (cookie != self.COOKIE):
raise SCIONIOError('Dispatcher socket out of sync')
port_len = 0
if (addr_type != AddrType.NONE):
port_len = 2
addr_len = haddr_get_type(addr_type).LEN
buf = recv_all(self.sock, ((addr_len + port_len) + packet_len), 0)
if (addr_len > 0):
addr = buf[:addr_len]
port = struct.unpack('!H', buf[addr_len:(addr_len + port_len)])
sender = (str(ipaddress.ip_address(addr)), port)
else:
addr =
port = 0
sender = (None, None)
packet = buf[(addr_len + port_len):]
return (packet, sender)
|
def add(self, sock, callback):
'\n Add new socket.\n\n :param UDPSocket sock: UDPSocket to add.\n '
if (not sock.is_active()):
return
self._sel.register(sock.sock, selectors.EVENT_READ, (sock, callback))
| 558,938,295,974,006,900
|
Add new socket.
:param UDPSocket sock: UDPSocket to add.
|
python/lib/socket.py
|
add
|
TpmKranz/netsec-scion
|
python
|
def add(self, sock, callback):
'\n Add new socket.\n\n :param UDPSocket sock: UDPSocket to add.\n '
if (not sock.is_active()):
return
self._sel.register(sock.sock, selectors.EVENT_READ, (sock, callback))
|
def remove(self, sock):
'\n Remove socket.\n\n :param UDPSocket sock: UDPSocket to remove.\n '
self._sel.unregister(sock.sock)
| -9,222,864,604,528,158,000
|
Remove socket.
:param UDPSocket sock: UDPSocket to remove.
|
python/lib/socket.py
|
remove
|
TpmKranz/netsec-scion
|
python
|
def remove(self, sock):
'\n Remove socket.\n\n :param UDPSocket sock: UDPSocket to remove.\n '
self._sel.unregister(sock.sock)
|
def select_(self, timeout=None):
'\n Return the set of UDPSockets that have data pending.\n\n :param float timeout:\n Number of seconds to wait for at least one UDPSocket to become\n ready. ``None`` means wait forever.\n '
for (key, _) in self._sel.select(timeout=timeout):
(yield key.data)
| -1,330,899,738,019,222,300
|
Return the set of UDPSockets that have data pending.
:param float timeout:
Number of seconds to wait for at least one UDPSocket to become
ready. ``None`` means wait forever.
|
python/lib/socket.py
|
select_
|
TpmKranz/netsec-scion
|
python
|
def select_(self, timeout=None):
'\n Return the set of UDPSockets that have data pending.\n\n :param float timeout:\n Number of seconds to wait for at least one UDPSocket to become\n ready. ``None`` means wait forever.\n '
for (key, _) in self._sel.select(timeout=timeout):
(yield key.data)
|
def close(self):
'\n Close all sockets.\n '
mapping = self._sel.get_map()
if mapping:
for entry in list(mapping.values()):
sock = entry.data[0]
self.remove(sock)
sock.close()
self._sel.close()
| 5,736,516,918,493,458,000
|
Close all sockets.
|
python/lib/socket.py
|
close
|
TpmKranz/netsec-scion
|
python
|
def close(self):
'\n \n '
mapping = self._sel.get_map()
if mapping:
for entry in list(mapping.values()):
sock = entry.data[0]
self.remove(sock)
sock.close()
self._sel.close()
|
@classmethod
def ReceivePayload(cls, socket):
'\n Return only payload, not the raw message, None if failed.\n socket: a blocking socket for read data.\n '
rbufsize = 0
rfile = socket.makefile('rb', rbufsize)
_L.debug('read raw_magic %s', threading.current_thread().name)
try:
raw_magic = rfile.read(4)
except Exception as e:
_L.debug('Fail to read raw_magic, %s', e)
raw_magic = None
_L.debug('read raw_magic %s done: %s', threading.current_thread().name, repr(raw_magic))
if (not raw_magic):
return None
magic = struct.unpack(fmt, raw_magic)[0]
if (magic != cls.magic):
_L.error('Error: receive a malformat message, the message should start from a four bytes uint32 magic number')
return None
_L.debug('read payload')
raw_payload_size = rfile.read(4)
payload_size = struct.unpack('I', raw_payload_size)[0]
_L.debug('Receive payload size %d', payload_size)
payload = b''
remain_size = payload_size
while (remain_size > 0):
data = rfile.read(remain_size)
if (not data):
return None
payload += data
bytes_read = len(data)
assert (bytes_read <= remain_size)
remain_size -= bytes_read
rfile.close()
return payload
| 4,808,921,762,618,945,000
|
Return only payload, not the raw message, None if failed.
socket: a blocking socket for read data.
|
client/python/unrealcv/__init__.py
|
ReceivePayload
|
AI-cecream/unrealcv
|
python
|
@classmethod
def ReceivePayload(cls, socket):
'\n Return only payload, not the raw message, None if failed.\n socket: a blocking socket for read data.\n '
rbufsize = 0
rfile = socket.makefile('rb', rbufsize)
_L.debug('read raw_magic %s', threading.current_thread().name)
try:
raw_magic = rfile.read(4)
except Exception as e:
_L.debug('Fail to read raw_magic, %s', e)
raw_magic = None
_L.debug('read raw_magic %s done: %s', threading.current_thread().name, repr(raw_magic))
if (not raw_magic):
return None
magic = struct.unpack(fmt, raw_magic)[0]
if (magic != cls.magic):
_L.error('Error: receive a malformat message, the message should start from a four bytes uint32 magic number')
return None
_L.debug('read payload')
raw_payload_size = rfile.read(4)
payload_size = struct.unpack('I', raw_payload_size)[0]
_L.debug('Receive payload size %d', payload_size)
payload = b
remain_size = payload_size
while (remain_size > 0):
data = rfile.read(remain_size)
if (not data):
return None
payload += data
bytes_read = len(data)
assert (bytes_read <= remain_size)
remain_size -= bytes_read
rfile.close()
return payload
|
@classmethod
def WrapAndSendPayload(cls, socket, payload):
'\n Send payload, true if success, false if failed\n '
try:
wbufsize = (- 1)
socket_message = SocketMessage(payload)
wfile = socket.makefile('wb', wbufsize)
wfile.write(struct.pack(fmt, socket_message.magic))
wfile.write(struct.pack(fmt, socket_message.payload_size))
wfile.write(payload)
wfile.flush()
wfile.close()
return True
except Exception as e:
_L.error('Fail to send message %s', e)
return False
| -1,074,405,196,215,218,600
|
Send payload, true if success, false if failed
|
client/python/unrealcv/__init__.py
|
WrapAndSendPayload
|
AI-cecream/unrealcv
|
python
|
@classmethod
def WrapAndSendPayload(cls, socket, payload):
'\n \n '
try:
wbufsize = (- 1)
socket_message = SocketMessage(payload)
wfile = socket.makefile('wb', wbufsize)
wfile.write(struct.pack(fmt, socket_message.magic))
wfile.write(struct.pack(fmt, socket_message.payload_size))
wfile.write(payload)
wfile.flush()
wfile.close()
return True
except Exception as e:
_L.error('Fail to send message %s', e)
return False
|
def __init__(self, endpoint, raw_message_handler):
'\n Parameters:\n endpoint: a tuple (ip, port)\n message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string\n '
self.endpoint = endpoint
self.raw_message_handler = raw_message_handler
self.socket = None
self.wait_connected = threading.Event()
receiving_thread = threading.Thread(target=self.__receiving)
receiving_thread.setDaemon(1)
receiving_thread.start()
| -1,795,471,374,995,031,600
|
Parameters:
endpoint: a tuple (ip, port)
message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string
|
client/python/unrealcv/__init__.py
|
__init__
|
AI-cecream/unrealcv
|
python
|
def __init__(self, endpoint, raw_message_handler):
'\n Parameters:\n endpoint: a tuple (ip, port)\n message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string\n '
self.endpoint = endpoint
self.raw_message_handler = raw_message_handler
self.socket = None
self.wait_connected = threading.Event()
receiving_thread = threading.Thread(target=self.__receiving)
receiving_thread.setDaemon(1)
receiving_thread.start()
|
def connect(self, timeout=1):
'\n Try to connect to server, return whether connection successful\n '
if self.isconnected():
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.endpoint)
self.socket = s
_L.debug('BaseClient: wait for connection confirm')
self.wait_connected.clear()
isset = self.wait_connected.wait(timeout)
assert (isset != None)
if isset:
return True
else:
self.socket = None
_L.error('Socket is created, but can not get connection confirm from %s, timeout after %.2f seconds', self.endpoint, timeout)
return False
except Exception as e:
_L.error('Can not connect to %s', str(self.endpoint))
_L.error('Error %s', e)
self.socket = None
return False
| -721,923,439,331,663,600
|
Try to connect to server, return whether connection successful
|
client/python/unrealcv/__init__.py
|
connect
|
AI-cecream/unrealcv
|
python
|
def connect(self, timeout=1):
'\n \n '
if self.isconnected():
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.endpoint)
self.socket = s
_L.debug('BaseClient: wait for connection confirm')
self.wait_connected.clear()
isset = self.wait_connected.wait(timeout)
assert (isset != None)
if isset:
return True
else:
self.socket = None
_L.error('Socket is created, but can not get connection confirm from %s, timeout after %.2f seconds', self.endpoint, timeout)
return False
except Exception as e:
_L.error('Can not connect to %s', str(self.endpoint))
_L.error('Error %s', e)
self.socket = None
return False
|
def __receiving(self):
'\n Receive packages, Extract message from packages\n Call self.message_handler if got a message\n Also check whether client is still connected\n '
_L.debug('BaseClient start receiving in %s', threading.current_thread().name)
while True:
if self.isconnected():
message = SocketMessage.ReceivePayload(self.socket)
_L.debug('Got server raw message %s', message)
if (not message):
_L.debug('BaseClient: remote disconnected, no more message')
self.socket = None
continue
if message.startswith(b'connected'):
_L.info('Got connection confirm: %s', repr(message))
self.wait_connected.set()
continue
if self.raw_message_handler:
self.raw_message_handler(message)
else:
_L.error('No message handler for raw message %s', message)
| -5,815,084,445,951,740,000
|
Receive packages, Extract message from packages
Call self.message_handler if got a message
Also check whether client is still connected
|
client/python/unrealcv/__init__.py
|
__receiving
|
AI-cecream/unrealcv
|
python
|
def __receiving(self):
'\n Receive packages, Extract message from packages\n Call self.message_handler if got a message\n Also check whether client is still connected\n '
_L.debug('BaseClient start receiving in %s', threading.current_thread().name)
while True:
if self.isconnected():
message = SocketMessage.ReceivePayload(self.socket)
_L.debug('Got server raw message %s', message)
if (not message):
_L.debug('BaseClient: remote disconnected, no more message')
self.socket = None
continue
if message.startswith(b'connected'):
_L.info('Got connection confirm: %s', repr(message))
self.wait_connected.set()
continue
if self.raw_message_handler:
self.raw_message_handler(message)
else:
_L.error('No message handler for raw message %s', message)
|
def send(self, message):
'\n Send message out, return whether the message was successfully sent\n '
if self.isconnected():
_L.debug('BaseClient: Send message %s', self.socket)
SocketMessage.WrapAndSendPayload(self.socket, message)
return True
else:
_L.error('Fail to send message, client is not connected')
return False
| 4,428,672,945,703,160,000
|
Send message out, return whether the message was successfully sent
|
client/python/unrealcv/__init__.py
|
send
|
AI-cecream/unrealcv
|
python
|
def send(self, message):
'\n \n '
if self.isconnected():
_L.debug('BaseClient: Send message %s', self.socket)
SocketMessage.WrapAndSendPayload(self.socket, message)
return True
else:
_L.error('Fail to send message, client is not connected')
return False
|
def request(self, message, timeout=5):
"\n Send a request to server and wait util get a response from server or timeout.\n\n Parameters\n ----------\n cmd : str\n command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html\n\n Returns\n -------\n str\n plain text message from server\n\n Examples\n --------\n >>> client = Client('localhost', 9000)\n >>> client.connect()\n >>> response = client.request('vget /camera/0/view')\n "
if (sys.version_info[0] == 3):
if (not isinstance(message, bytes)):
message = message.encode('utf-8')
def do_request():
raw_message = (b'%d:%s' % (self.message_id, message))
_L.debug('Request: %s', raw_message.decode('utf-8'))
if (not self.message_client.send(raw_message)):
return None
if (threading.current_thread().name == self.main_thread.name):
do_request()
else:
self.queue.put(do_request)
self.wait_response.clear()
isset = self.wait_response.wait(timeout)
self.message_id += 1
assert (isset != None)
if isset:
return self.response
else:
_L.error('Can not receive a response from server, timeout after %.2f seconds', timeout)
return None
| -3,716,979,843,409,303,000
|
Send a request to server and wait util get a response from server or timeout.
Parameters
----------
cmd : str
command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html
Returns
-------
str
plain text message from server
Examples
--------
>>> client = Client('localhost', 9000)
>>> client.connect()
>>> response = client.request('vget /camera/0/view')
|
client/python/unrealcv/__init__.py
|
request
|
AI-cecream/unrealcv
|
python
|
def request(self, message, timeout=5):
"\n Send a request to server and wait util get a response from server or timeout.\n\n Parameters\n ----------\n cmd : str\n command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html\n\n Returns\n -------\n str\n plain text message from server\n\n Examples\n --------\n >>> client = Client('localhost', 9000)\n >>> client.connect()\n >>> response = client.request('vget /camera/0/view')\n "
if (sys.version_info[0] == 3):
if (not isinstance(message, bytes)):
message = message.encode('utf-8')
def do_request():
raw_message = (b'%d:%s' % (self.message_id, message))
_L.debug('Request: %s', raw_message.decode('utf-8'))
if (not self.message_client.send(raw_message)):
return None
if (threading.current_thread().name == self.main_thread.name):
do_request()
else:
self.queue.put(do_request)
self.wait_response.clear()
isset = self.wait_response.wait(timeout)
self.message_id += 1
assert (isset != None)
if isset:
return self.response
else:
_L.error('Can not receive a response from server, timeout after %.2f seconds', timeout)
return None
|
def get_listener(arn: Optional[str]=None, load_balancer_arn: Optional[str]=None, port: Optional[int]=None, tags: Optional[Mapping[(str, str)]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetListenerResult:
'\n > **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.\n\n Provides information about a Load Balancer Listener.\n\n This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n config = pulumi.Config()\n listener_arn = config.require("listenerArn")\n listener = aws.lb.get_listener(arn=listener_arn)\n selected = aws.lb.get_load_balancer(name="default-public")\n selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,\n port=443)\n ```\n\n\n :param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.\n :param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.\n :param int port: Port of the listener. Required if `arn` is not set.\n '
pulumi.log.warn('get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener')
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(alpn_policy=__ret__.alpn_policy, arn=__ret__.arn, certificate_arn=__ret__.certificate_arn, default_actions=__ret__.default_actions, id=__ret__.id, load_balancer_arn=__ret__.load_balancer_arn, port=__ret__.port, protocol=__ret__.protocol, ssl_policy=__ret__.ssl_policy, tags=__ret__.tags)
| 3,149,790,035,484,996,000
|
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
get_listener
|
RafalSumislawski/pulumi-aws
|
python
|
def get_listener(arn: Optional[str]=None, load_balancer_arn: Optional[str]=None, port: Optional[int]=None, tags: Optional[Mapping[(str, str)]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetListenerResult:
'\n > **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.\n\n Provides information about a Load Balancer Listener.\n\n This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n config = pulumi.Config()\n listener_arn = config.require("listenerArn")\n listener = aws.lb.get_listener(arn=listener_arn)\n selected = aws.lb.get_load_balancer(name="default-public")\n selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,\n port=443)\n ```\n\n\n :param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.\n :param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.\n :param int port: Port of the listener. Required if `arn` is not set.\n '
pulumi.log.warn('get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener')
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(alpn_policy=__ret__.alpn_policy, arn=__ret__.arn, certificate_arn=__ret__.certificate_arn, default_actions=__ret__.default_actions, id=__ret__.id, load_balancer_arn=__ret__.load_balancer_arn, port=__ret__.port, protocol=__ret__.protocol, ssl_policy=__ret__.ssl_policy, tags=__ret__.tags)
|
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]]=None, load_balancer_arn: Optional[pulumi.Input[Optional[str]]]=None, port: Optional[pulumi.Input[Optional[int]]]=None, tags: Optional[pulumi.Input[Optional[Mapping[(str, str)]]]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetListenerResult]:
'\n > **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.\n\n Provides information about a Load Balancer Listener.\n\n This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n config = pulumi.Config()\n listener_arn = config.require("listenerArn")\n listener = aws.lb.get_listener(arn=listener_arn)\n selected = aws.lb.get_load_balancer(name="default-public")\n selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,\n port=443)\n ```\n\n\n :param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.\n :param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.\n :param int port: Port of the listener. Required if `arn` is not set.\n '
pulumi.log.warn('get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener')
...
| 1,704,198,829,280,914,400
|
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
get_listener_output
|
RafalSumislawski/pulumi-aws
|
python
|
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]]=None, load_balancer_arn: Optional[pulumi.Input[Optional[str]]]=None, port: Optional[pulumi.Input[Optional[int]]]=None, tags: Optional[pulumi.Input[Optional[Mapping[(str, str)]]]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetListenerResult]:
'\n > **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.\n\n Provides information about a Load Balancer Listener.\n\n This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n config = pulumi.Config()\n listener_arn = config.require("listenerArn")\n listener = aws.lb.get_listener(arn=listener_arn)\n selected = aws.lb.get_load_balancer(name="default-public")\n selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,\n port=443)\n ```\n\n\n :param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.\n :param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.\n :param int port: Port of the listener. Required if `arn` is not set.\n '
pulumi.log.warn('get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener')
...
|
@property
@pulumi.getter
def id(self) -> str:
'\n The provider-assigned unique ID for this managed resource.\n '
return pulumi.get(self, 'id')
| 3,214,403,723,836,065,300
|
The provider-assigned unique ID for this managed resource.
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
id
|
RafalSumislawski/pulumi-aws
|
python
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')
|
def get_ami_ids(executable_users: Optional[Sequence[str]]=None, filters: Optional[Sequence[pulumi.InputType['GetAmiIdsFilterArgs']]]=None, name_regex: Optional[str]=None, owners: Optional[Sequence[str]]=None, sort_ascending: Optional[bool]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetAmiIdsResult:
'\n Use this data source to get a list of AMI IDs matching the specified criteria.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(\n name="name",\n values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],\n )],\n owners=["099720109477"])\n ```\n\n\n :param Sequence[str] executable_users: Limit search to users with *explicit* launch\n permission on the image. Valid items are the numeric account ID or `self`.\n :param Sequence[pulumi.InputType[\'GetAmiIdsFilterArgs\']] filters: One or more name/value pairs to filter off of. There\n are several valid keys, for a full reference, check out\n [describe-images in the AWS CLI reference][1].\n :param str name_regex: A regex string to apply to the AMI list returned\n by AWS. This allows more advanced filtering not supported from the AWS API.\n This filtering is done locally on what AWS returns, and could have a performance\n impact if the result is large. It is recommended to combine this with other\n options to narrow down the list AWS returns.\n :param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).\n :param bool sort_ascending: Used to sort AMIs by creation time.\n '
pulumi.log.warn('get_ami_ids is deprecated: aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds')
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['sortAscending'] = sort_ascending
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmiIds:getAmiIds', __args__, opts=opts, typ=GetAmiIdsResult).value
return AwaitableGetAmiIdsResult(executable_users=__ret__.executable_users, filters=__ret__.filters, id=__ret__.id, ids=__ret__.ids, name_regex=__ret__.name_regex, owners=__ret__.owners, sort_ascending=__ret__.sort_ascending)
| 7,074,052,594,644,177,000
|
Use this data source to get a list of AMI IDs matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(
name="name",
values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],
)],
owners=["099720109477"])
```
:param Sequence[str] executable_users: Limit search to users with *explicit* launch
permission on the image. Valid items are the numeric account ID or `self`.
:param Sequence[pulumi.InputType['GetAmiIdsFilterArgs']] filters: One or more name/value pairs to filter off of. There
are several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API.
This filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param bool sort_ascending: Used to sort AMIs by creation time.
|
sdk/python/pulumi_aws/get_ami_ids.py
|
get_ami_ids
|
elad-snyk/pulumi-aws
|
python
|
def get_ami_ids(executable_users: Optional[Sequence[str]]=None, filters: Optional[Sequence[pulumi.InputType['GetAmiIdsFilterArgs']]]=None, name_regex: Optional[str]=None, owners: Optional[Sequence[str]]=None, sort_ascending: Optional[bool]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetAmiIdsResult:
'\n Use this data source to get a list of AMI IDs matching the specified criteria.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(\n name="name",\n values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],\n )],\n owners=["099720109477"])\n ```\n\n\n :param Sequence[str] executable_users: Limit search to users with *explicit* launch\n permission on the image. Valid items are the numeric account ID or `self`.\n :param Sequence[pulumi.InputType[\'GetAmiIdsFilterArgs\']] filters: One or more name/value pairs to filter off of. There\n are several valid keys, for a full reference, check out\n [describe-images in the AWS CLI reference][1].\n :param str name_regex: A regex string to apply to the AMI list returned\n by AWS. This allows more advanced filtering not supported from the AWS API.\n This filtering is done locally on what AWS returns, and could have a performance\n impact if the result is large. It is recommended to combine this with other\n options to narrow down the list AWS returns.\n :param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).\n :param bool sort_ascending: Used to sort AMIs by creation time.\n '
pulumi.log.warn('get_ami_ids is deprecated: aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds')
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['sortAscending'] = sort_ascending
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmiIds:getAmiIds', __args__, opts=opts, typ=GetAmiIdsResult).value
return AwaitableGetAmiIdsResult(executable_users=__ret__.executable_users, filters=__ret__.filters, id=__ret__.id, ids=__ret__.ids, name_regex=__ret__.name_regex, owners=__ret__.owners, sort_ascending=__ret__.sort_ascending)
|
@property
@pulumi.getter
def id(self) -> str:
'\n The provider-assigned unique ID for this managed resource.\n '
return pulumi.get(self, 'id')
| 3,214,403,723,836,065,300
|
The provider-assigned unique ID for this managed resource.
|
sdk/python/pulumi_aws/get_ami_ids.py
|
id
|
elad-snyk/pulumi-aws
|
python
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')
|
def __init__(self, path_name, surffix, path_surffix):
'\n parameters set\n '
self.NUM_NODES = params['number of nodes in the cluster']
self.env = LraClusterEnv(num_nodes=self.NUM_NODES)
ckpt_path_1 = (((path_surffix + path_name) + '1') + '/model.ckpt')
ckpt_path_2 = (((path_surffix + path_name) + '2') + '/model.ckpt')
ckpt_path_3 = (((path_surffix + path_name) + '3') + '/model.ckpt')
self.nodes_per_group = int(params['nodes per group'])
'\n Build Network\n '
self.n_actions = self.nodes_per_group
self.n_features = int((((self.n_actions * ((self.env.NUM_APPS + 1) + self.env.NUM_APPS)) + 1) + self.env.NUM_APPS))
self.RL_1 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '1a'))
self.RL_2 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '2a'))
self.RL_3 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '3a'))
self.RL_1.restore_session(ckpt_path_1)
self.RL_2.restore_session(ckpt_path_2)
self.RL_3.restore_session(ckpt_path_3)
(self.observation_episode_1, self.action_episode_1, self.reward_episode_1, self.safety_episode_1) = ([], [], [], [])
(self.observation_optimal_1, self.action_optimal_1, self.reward_optimal_1, self.safety_optimal_1) = ([], [], [], [])
(self.observation_episode_2, self.action_episode_2, self.reward_episode_2, self.safety_episode_2) = ([], [], [], [])
(self.observation_optimal_2, self.action_optimal_2, self.reward_optimal_2, self.safety_optimal_2) = ([], [], [], [])
(self.observation_episode_3, self.action_episode_3, self.reward_episode_3, self.safety_episode_3) = ([], [], [], [])
(self.observation_optimal_3, self.action_optimal_3, self.reward_optimal_3, self.safety_optimal_3) = ([], [], [], [])
| 4,691,257,712,958,490,000
|
parameters set
|
testbed/SubScheduler.py
|
__init__
|
George-RL-based-container-sche/George
|
python
|
def __init__(self, path_name, surffix, path_surffix):
'\n \n '
self.NUM_NODES = params['number of nodes in the cluster']
self.env = LraClusterEnv(num_nodes=self.NUM_NODES)
ckpt_path_1 = (((path_surffix + path_name) + '1') + '/model.ckpt')
ckpt_path_2 = (((path_surffix + path_name) + '2') + '/model.ckpt')
ckpt_path_3 = (((path_surffix + path_name) + '3') + '/model.ckpt')
self.nodes_per_group = int(params['nodes per group'])
'\n Build Network\n '
self.n_actions = self.nodes_per_group
self.n_features = int((((self.n_actions * ((self.env.NUM_APPS + 1) + self.env.NUM_APPS)) + 1) + self.env.NUM_APPS))
self.RL_1 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '1a'))
self.RL_2 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '2a'))
self.RL_3 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=(surffix + '3a'))
self.RL_1.restore_session(ckpt_path_1)
self.RL_2.restore_session(ckpt_path_2)
self.RL_3.restore_session(ckpt_path_3)
(self.observation_episode_1, self.action_episode_1, self.reward_episode_1, self.safety_episode_1) = ([], [], [], [])
(self.observation_optimal_1, self.action_optimal_1, self.reward_optimal_1, self.safety_optimal_1) = ([], [], [], [])
(self.observation_episode_2, self.action_episode_2, self.reward_episode_2, self.safety_episode_2) = ([], [], [], [])
(self.observation_optimal_2, self.action_optimal_2, self.reward_optimal_2, self.safety_optimal_2) = ([], [], [], [])
(self.observation_episode_3, self.action_episode_3, self.reward_episode_3, self.safety_episode_3) = ([], [], [], [])
(self.observation_optimal_3, self.action_optimal_3, self.reward_optimal_3, self.safety_optimal_3) = ([], [], [], [])
|
def create_passmanager(self, coupling_map, initial_layout=None):
'Returns a PassManager using self.pass_class(coupling_map, initial_layout)'
passmanager = PassManager()
if initial_layout:
passmanager.append(SetLayout(Layout(initial_layout)))
passmanager.append(self.pass_class(CouplingMap(coupling_map), **self.additional_args))
return passmanager
| 9,120,436,214,806,114,000
|
Returns a PassManager using self.pass_class(coupling_map, initial_layout)
|
test/python/transpiler/test_mappers.py
|
create_passmanager
|
7338/qiskit-terra
|
python
|
def create_passmanager(self, coupling_map, initial_layout=None):
passmanager = PassManager()
if initial_layout:
passmanager.append(SetLayout(Layout(initial_layout)))
passmanager.append(self.pass_class(CouplingMap(coupling_map), **self.additional_args))
return passmanager
|
def create_backend(self):
'Returns a Backend.'
return BasicAer.get_backend('qasm_simulator')
| 4,351,215,274,467,167,700
|
Returns a Backend.
|
test/python/transpiler/test_mappers.py
|
create_backend
|
7338/qiskit-terra
|
python
|
def create_backend(self):
return BasicAer.get_backend('qasm_simulator')
|
def generate_ground_truth(self, transpiled_result, filename):
"Generates the expected result into a file.\n\n Checks if transpiled_result matches self.counts by running in a backend\n (self.create_backend()). That's saved in a QASM in filename.\n\n Args:\n transpiled_result (DAGCircuit): The DAGCircuit to execute.\n filename (string): Where the QASM is saved.\n "
sim_backend = self.create_backend()
job = execute(transpiled_result, sim_backend, seed_simulator=self.seed_simulator, seed_transpiler=self.seed_transpiler, shots=self.shots)
self.assertDictAlmostEqual(self.counts, job.result().get_counts(), delta=self.delta)
transpiled_result.qasm(formatted=False, filename=filename)
| -8,147,350,874,310,590,000
|
Generates the expected result into a file.
Checks if transpiled_result matches self.counts by running in a backend
(self.create_backend()). That's saved in a QASM in filename.
Args:
transpiled_result (DAGCircuit): The DAGCircuit to execute.
filename (string): Where the QASM is saved.
|
test/python/transpiler/test_mappers.py
|
generate_ground_truth
|
7338/qiskit-terra
|
python
|
def generate_ground_truth(self, transpiled_result, filename):
"Generates the expected result into a file.\n\n Checks if transpiled_result matches self.counts by running in a backend\n (self.create_backend()). That's saved in a QASM in filename.\n\n Args:\n transpiled_result (DAGCircuit): The DAGCircuit to execute.\n filename (string): Where the QASM is saved.\n "
sim_backend = self.create_backend()
job = execute(transpiled_result, sim_backend, seed_simulator=self.seed_simulator, seed_transpiler=self.seed_transpiler, shots=self.shots)
self.assertDictAlmostEqual(self.counts, job.result().get_counts(), delta=self.delta)
transpiled_result.qasm(formatted=False, filename=filename)
|
def assertResult(self, result, circuit):
'Fetches the QASM in circuit.name file and compares it with result.'
qasm_name = ('%s_%s.qasm' % (type(self).__name__, circuit.name))
filename = os.path.join(DIRNAME, qasm_name)
if self.regenerate_expected:
self.generate_ground_truth(result, filename)
expected = QuantumCircuit.from_qasm_file(filename)
self.assertEqual(result, expected)
| 5,088,906,843,568,885,000
|
Fetches the QASM in circuit.name file and compares it with result.
|
test/python/transpiler/test_mappers.py
|
assertResult
|
7338/qiskit-terra
|
python
|
def assertResult(self, result, circuit):
qasm_name = ('%s_%s.qasm' % (type(self).__name__, circuit.name))
filename = os.path.join(DIRNAME, qasm_name)
if self.regenerate_expected:
self.generate_ground_truth(result, filename)
expected = QuantumCircuit.from_qasm_file(filename)
self.assertEqual(result, expected)
|
def test_a_cx_to_map(self):
"A single CX needs to be remapped.\n\n q0:----------m-----\n |\n q1:-[H]-(+)--|-m---\n | | |\n q2:------.---|-|-m-\n | | |\n c0:----------.-|-|-\n c1:------------.-|-\n c2:--------------.-\n\n CouplingMap map: [1]<-[0]->[2]\n\n expected count: '000': 50%\n '110': 50%\n "
self.counts = {'000': 512, '110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2]]
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr, name='a_cx_to_map')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
| -8,152,201,310,096,919,000
|
A single CX needs to be remapped.
q0:----------m-----
|
q1:-[H]-(+)--|-m---
| | |
q2:------.---|-|-m-
| | |
c0:----------.-|-|-
c1:------------.-|-
c2:--------------.-
CouplingMap map: [1]<-[0]->[2]
expected count: '000': 50%
'110': 50%
|
test/python/transpiler/test_mappers.py
|
test_a_cx_to_map
|
7338/qiskit-terra
|
python
|
def test_a_cx_to_map(self):
"A single CX needs to be remapped.\n\n q0:----------m-----\n |\n q1:-[H]-(+)--|-m---\n | | |\n q2:------.---|-|-m-\n | | |\n c0:----------.-|-|-\n c1:------------.-|-\n c2:--------------.-\n\n CouplingMap map: [1]<-[0]->[2]\n\n expected count: '000': 50%\n '110': 50%\n "
self.counts = {'000': 512, '110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2]]
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr, name='a_cx_to_map')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
|
def test_initial_layout(self):
"Using a non-trivial initial_layout.\n\n q3:----------------m--\n q0:----------m-----|--\n | |\n q1:-[H]-(+)--|-m---|--\n | | | |\n q2:------.---|-|-m-|--\n | | | |\n c0:----------.-|-|-|--\n c1:------------.-|-|--\n c2:--------------.-|--\n c3:----------------.--\n CouplingMap map: [1]<-[0]->[2]->[3]\n\n expected count: '000': 50%\n '110': 50%\n "
self.counts = {'0000': 512, '0110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='initial_layout')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
layout = {qr[3]: 0, qr[0]: 1, qr[1]: 2, qr[2]: 3}
result = self.create_passmanager(coupling_map, layout).run(circuit)
self.assertResult(result, circuit)
| -6,708,405,234,380,215,000
|
Using a non-trivial initial_layout.
q3:----------------m--
q0:----------m-----|--
| |
q1:-[H]-(+)--|-m---|--
| | | |
q2:------.---|-|-m-|--
| | | |
c0:----------.-|-|-|--
c1:------------.-|-|--
c2:--------------.-|--
c3:----------------.--
CouplingMap map: [1]<-[0]->[2]->[3]
expected count: '000': 50%
'110': 50%
|
test/python/transpiler/test_mappers.py
|
test_initial_layout
|
7338/qiskit-terra
|
python
|
def test_initial_layout(self):
"Using a non-trivial initial_layout.\n\n q3:----------------m--\n q0:----------m-----|--\n | |\n q1:-[H]-(+)--|-m---|--\n | | | |\n q2:------.---|-|-m-|--\n | | | |\n c0:----------.-|-|-|--\n c1:------------.-|-|--\n c2:--------------.-|--\n c3:----------------.--\n CouplingMap map: [1]<-[0]->[2]->[3]\n\n expected count: '000': 50%\n '110': 50%\n "
self.counts = {'0000': 512, '0110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='initial_layout')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
layout = {qr[3]: 0, qr[0]: 1, qr[1]: 2, qr[2]: 3}
result = self.create_passmanager(coupling_map, layout).run(circuit)
self.assertResult(result, circuit)
|
def test_handle_measurement(self):
"Handle measurement correctly.\n\n q0:--.-----(+)-m-------\n | | |\n q1:-(+)-(+)-|--|-m-----\n | | | |\n q2:------|--|--|-|-m---\n | | | | |\n q3:-[H]--.--.--|-|-|-m-\n | | | |\n c0:------------.-|-|-|-\n c1:--------------.-|-|-\n c2:----------------.-|-\n c3:------------------.-\n\n CouplingMap map: [0]->[1]->[2]->[3]\n\n expected count: '0000': 50%\n '1011': 50%\n "
self.counts = {'1011': 512, '0000': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [1, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='handle_measurement')
circuit.h(qr[3])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[3], qr[0])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
| 1,909,281,407,744,232,700
|
Handle measurement correctly.
q0:--.-----(+)-m-------
| | |
q1:-(+)-(+)-|--|-m-----
| | | |
q2:------|--|--|-|-m---
| | | | |
q3:-[H]--.--.--|-|-|-m-
| | | |
c0:------------.-|-|-|-
c1:--------------.-|-|-
c2:----------------.-|-
c3:------------------.-
CouplingMap map: [0]->[1]->[2]->[3]
expected count: '0000': 50%
'1011': 50%
|
test/python/transpiler/test_mappers.py
|
test_handle_measurement
|
7338/qiskit-terra
|
python
|
def test_handle_measurement(self):
"Handle measurement correctly.\n\n q0:--.-----(+)-m-------\n | | |\n q1:-(+)-(+)-|--|-m-----\n | | | |\n q2:------|--|--|-|-m---\n | | | | |\n q3:-[H]--.--.--|-|-|-m-\n | | | |\n c0:------------.-|-|-|-\n c1:--------------.-|-|-\n c2:----------------.-|-\n c3:------------------.-\n\n CouplingMap map: [0]->[1]->[2]->[3]\n\n expected count: '0000': 50%\n '1011': 50%\n "
self.counts = {'1011': 512, '0000': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [1, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='handle_measurement')
circuit.h(qr[3])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[3], qr[0])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
|
def build_holder_map(self) -> None:
'\n Build a mapping of `HoldableObject` types to their corresponding\n `ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically\n holderify all returned values from methods and functions.\n '
self.holder_map.update({list: P_OBJ.ArrayHolder, dict: P_OBJ.DictHolder, int: P_OBJ.IntegerHolder, bool: P_OBJ.BooleanHolder, str: P_OBJ.StringHolder, P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder, mesonlib.File: OBJ.FileHolder, build.SharedLibrary: OBJ.SharedLibraryHolder, build.StaticLibrary: OBJ.StaticLibraryHolder, build.BothLibraries: OBJ.BothLibrariesHolder, build.SharedModule: OBJ.SharedModuleHolder, build.Executable: OBJ.ExecutableHolder, build.Jar: OBJ.JarHolder, build.CustomTarget: OBJ.CustomTargetHolder, build.CustomTargetIndex: OBJ.CustomTargetIndexHolder, build.Generator: OBJ.GeneratorHolder, build.GeneratedList: OBJ.GeneratedListHolder, build.ExtractedObjects: OBJ.GeneratedObjectsHolder, build.RunTarget: OBJ.RunTargetHolder, build.AliasTarget: OBJ.AliasTargetHolder, build.Headers: OBJ.HeadersHolder, build.Man: OBJ.ManHolder, build.EmptyDir: OBJ.EmptyDirHolder, build.Data: OBJ.DataHolder, build.SymlinkData: OBJ.SymlinkDataHolder, build.InstallDir: OBJ.InstallDirHolder, build.IncludeDirs: OBJ.IncludeDirsHolder, build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder, build.StructuredSources: OBJ.StructuredSourcesHolder, compilers.RunResult: compilerOBJ.TryRunResultHolder, dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder, coredata.UserFeatureOption: OBJ.FeatureOptionHolder, envconfig.MachineInfo: OBJ.MachineHolder, build.ConfigurationData: OBJ.ConfigurationDataHolder})
'\n Build a mapping of `HoldableObject` base classes to their\n corresponding `ObjectHolder`s. The difference to `self.holder_map`\n is that the keys here define an upper bound instead of requiring an\n exact match.\n\n The mappings defined here are only used when there was no direct hit\n found in `self.holder_map`.\n '
self.bound_holder_map.update({dependencies.Dependency: OBJ.DependencyHolder, ExternalProgram: OBJ.ExternalProgramHolder, compilers.Compiler: compilerOBJ.CompilerHolder, ModuleObject: OBJ.ModuleObjectHolder, MutableModuleObject: OBJ.MutableModuleObjectHolder})
| 2,333,314,712,237,210,600
|
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
|
mesonbuild/interpreter/interpreter.py
|
build_holder_map
|
val-verde/python-meson
|
python
|
def build_holder_map(self) -> None:
'\n Build a mapping of `HoldableObject` types to their corresponding\n `ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically\n holderify all returned values from methods and functions.\n '
self.holder_map.update({list: P_OBJ.ArrayHolder, dict: P_OBJ.DictHolder, int: P_OBJ.IntegerHolder, bool: P_OBJ.BooleanHolder, str: P_OBJ.StringHolder, P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder, mesonlib.File: OBJ.FileHolder, build.SharedLibrary: OBJ.SharedLibraryHolder, build.StaticLibrary: OBJ.StaticLibraryHolder, build.BothLibraries: OBJ.BothLibrariesHolder, build.SharedModule: OBJ.SharedModuleHolder, build.Executable: OBJ.ExecutableHolder, build.Jar: OBJ.JarHolder, build.CustomTarget: OBJ.CustomTargetHolder, build.CustomTargetIndex: OBJ.CustomTargetIndexHolder, build.Generator: OBJ.GeneratorHolder, build.GeneratedList: OBJ.GeneratedListHolder, build.ExtractedObjects: OBJ.GeneratedObjectsHolder, build.RunTarget: OBJ.RunTargetHolder, build.AliasTarget: OBJ.AliasTargetHolder, build.Headers: OBJ.HeadersHolder, build.Man: OBJ.ManHolder, build.EmptyDir: OBJ.EmptyDirHolder, build.Data: OBJ.DataHolder, build.SymlinkData: OBJ.SymlinkDataHolder, build.InstallDir: OBJ.InstallDirHolder, build.IncludeDirs: OBJ.IncludeDirsHolder, build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder, build.StructuredSources: OBJ.StructuredSourcesHolder, compilers.RunResult: compilerOBJ.TryRunResultHolder, dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder, coredata.UserFeatureOption: OBJ.FeatureOptionHolder, envconfig.MachineInfo: OBJ.MachineHolder, build.ConfigurationData: OBJ.ConfigurationDataHolder})
'\n Build a mapping of `HoldableObject` base classes to their\n corresponding `ObjectHolder`s. The difference to `self.holder_map`\n is that the keys here define an upper bound instead of requiring an\n exact match.\n\n The mappings defined here are only used when there was no direct hit\n found in `self.holder_map`.\n '
self.bound_holder_map.update({dependencies.Dependency: OBJ.DependencyHolder, ExternalProgram: OBJ.ExternalProgramHolder, compilers.Compiler: compilerOBJ.CompilerHolder, ModuleObject: OBJ.ModuleObjectHolder, MutableModuleObject: OBJ.MutableModuleObjectHolder})
|
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'\n Adds one additional mapping to the `holder_map`.\n\n The intended use for this function is in the `initialize` method of\n modules to register custom object holders.\n '
self.holder_map.update({held_type: holder_type})
| 2,906,162,101,009,650,700
|
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
|
mesonbuild/interpreter/interpreter.py
|
append_holder_map
|
val-verde/python-meson
|
python
|
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'\n Adds one additional mapping to the `holder_map`.\n\n The intended use for this function is in the `initialize` method of\n modules to register custom object holders.\n '
self.holder_map.update({held_type: holder_type})
|
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
'Checks for additional invalid values in a custom_target output.\n\n This cannot be done with typed_kwargs because it requires the number of\n inputs.\n '
for out in outputs:
if (has_multi_in and (('@PLAINNAME@' in out) or ('@BASENAME@' in out))):
raise InvalidArguments(f"""{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" when there is more than one input (we can't know which to use)""")
| 215,060,965,166,665,920
|
Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
|
mesonbuild/interpreter/interpreter.py
|
_validate_custom_target_outputs
|
val-verde/python-meson
|
python
|
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
'Checks for additional invalid values in a custom_target output.\n\n This cannot be done with typed_kwargs because it requires the number of\n inputs.\n '
for out in outputs:
if (has_multi_in and (('@PLAINNAME@' in out) or ('@BASENAME@' in out))):
raise InvalidArguments(f"{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" when there is more than one input (we can't know which to use)")
|
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str, install_mode: FileMode, rename: T.Optional[str], tag: T.Optional[str], install_dir_name: T.Optional[str]=None, install_data_type: T.Optional[str]=None) -> build.Data:
'Just the implementation with no validation.'
data = build.Data(sources, install_dir, (install_dir_name or install_dir), install_mode, self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
| 7,161,431,886,592,338,000
|
Just the implementation with no validation.
|
mesonbuild/interpreter/interpreter.py
|
install_data_impl
|
val-verde/python-meson
|
python
|
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str, install_mode: FileMode, rename: T.Optional[str], tag: T.Optional[str], install_dir_name: T.Optional[str]=None, install_data_type: T.Optional[str]=None) -> build.Data:
data = build.Data(sources, install_dir, (install_dir_name or install_dir), install_mode, self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
|
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool=True) -> T.List['SourceOutputs']:
'Lower inputs to a list of Targets and Files, replacing any strings.\n\n :param sources: A raw (Meson DSL) list of inputs (targets, files, and\n strings)\n :raises InterpreterException: if any of the inputs are of an invalid type\n :return: A list of Targets and Files\n '
mesonlib.check_direntry_issues(sources)
if (not isinstance(sources, list)):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if ((not strict) and s.startswith(self.environment.get_build_dir())):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget, build.CustomTargetIndex, build.CustomTarget, build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of string or File-type object')
return results
| -1,622,654,216,923,596,800
|
Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
|
mesonbuild/interpreter/interpreter.py
|
source_strings_to_files
|
val-verde/python-meson
|
python
|
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool=True) -> T.List['SourceOutputs']:
'Lower inputs to a list of Targets and Files, replacing any strings.\n\n :param sources: A raw (Meson DSL) list of inputs (targets, files, and\n strings)\n :raises InterpreterException: if any of the inputs are of an invalid type\n :return: A list of Targets and Files\n '
mesonlib.check_direntry_issues(sources)
if (not isinstance(sources, list)):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if ((not strict) and s.startswith(self.environment.get_build_dir())):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget, build.CustomTargetIndex, build.CustomTarget, build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of string or File-type object')
return results
|
def is_locked(hass, entity_id=None):
'Return if the lock is locked based on the statemachine.'
entity_id = (entity_id or ENTITY_ID_ALL_LOCKS)
return hass.states.is_state(entity_id, STATE_LOCKED)
| 5,302,125,805,284,089,000
|
Return if the lock is locked based on the statemachine.
|
homeassistant/components/lock/__init__.py
|
is_locked
|
Norien/Home-Assistant
|
python
|
def is_locked(hass, entity_id=None):
entity_id = (entity_id or ENTITY_ID_ALL_LOCKS)
return hass.states.is_state(entity_id, STATE_LOCKED)
|
def lock(hass, entity_id=None, code=None):
'Lock all or specified locks.'
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_LOCK, data)
| 4,340,812,531,294,071,300
|
Lock all or specified locks.
|
homeassistant/components/lock/__init__.py
|
lock
|
Norien/Home-Assistant
|
python
|
def lock(hass, entity_id=None, code=None):
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_LOCK, data)
|
def unlock(hass, entity_id=None, code=None):
'Unlock all or specified locks.'
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_UNLOCK, data)
| -934,216,263,176,914,000
|
Unlock all or specified locks.
|
homeassistant/components/lock/__init__.py
|
unlock
|
Norien/Home-Assistant
|
python
|
def unlock(hass, entity_id=None, code=None):
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_UNLOCK, data)
|
@asyncio.coroutine
def async_setup(hass, config):
'Track states and offer events for locks.'
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
(yield from component.async_setup(config))
@asyncio.coroutine
def async_handle_lock_service(service):
'Handle calls to the lock services.'
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if (service.service == SERVICE_LOCK):
(yield from entity.async_lock(code=code))
else:
(yield from entity.async_unlock(code=code))
update_tasks = []
for entity in target_locks:
if (not entity.should_poll):
continue
update_coro = hass.loop.create_task(entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
(yield from update_coro)
if update_tasks:
(yield from asyncio.wait(update_tasks, loop=hass.loop))
descriptions = (yield from hass.loop.run_in_executor(None, load_yaml_config_file, os.path.join(os.path.dirname(__file__), 'services.yaml')))
hass.services.async_register(DOMAIN, SERVICE_UNLOCK, async_handle_lock_service, descriptions.get(SERVICE_UNLOCK), schema=LOCK_SERVICE_SCHEMA)
hass.services.async_register(DOMAIN, SERVICE_LOCK, async_handle_lock_service, descriptions.get(SERVICE_LOCK), schema=LOCK_SERVICE_SCHEMA)
return True
| 6,413,752,585,001,510,000
|
Track states and offer events for locks.
|
homeassistant/components/lock/__init__.py
|
async_setup
|
Norien/Home-Assistant
|
python
|
@asyncio.coroutine
def async_setup(hass, config):
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
(yield from component.async_setup(config))
@asyncio.coroutine
def async_handle_lock_service(service):
'Handle calls to the lock services.'
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if (service.service == SERVICE_LOCK):
(yield from entity.async_lock(code=code))
else:
(yield from entity.async_unlock(code=code))
update_tasks = []
for entity in target_locks:
if (not entity.should_poll):
continue
update_coro = hass.loop.create_task(entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
(yield from update_coro)
if update_tasks:
(yield from asyncio.wait(update_tasks, loop=hass.loop))
descriptions = (yield from hass.loop.run_in_executor(None, load_yaml_config_file, os.path.join(os.path.dirname(__file__), 'services.yaml')))
hass.services.async_register(DOMAIN, SERVICE_UNLOCK, async_handle_lock_service, descriptions.get(SERVICE_UNLOCK), schema=LOCK_SERVICE_SCHEMA)
hass.services.async_register(DOMAIN, SERVICE_LOCK, async_handle_lock_service, descriptions.get(SERVICE_LOCK), schema=LOCK_SERVICE_SCHEMA)
return True
|
@asyncio.coroutine
def async_handle_lock_service(service):
'Handle calls to the lock services.'
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if (service.service == SERVICE_LOCK):
(yield from entity.async_lock(code=code))
else:
(yield from entity.async_unlock(code=code))
update_tasks = []
for entity in target_locks:
if (not entity.should_poll):
continue
update_coro = hass.loop.create_task(entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
(yield from update_coro)
if update_tasks:
(yield from asyncio.wait(update_tasks, loop=hass.loop))
| 2,997,979,547,901,405,700
|
Handle calls to the lock services.
|
homeassistant/components/lock/__init__.py
|
async_handle_lock_service
|
Norien/Home-Assistant
|
python
|
@asyncio.coroutine
def async_handle_lock_service(service):
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if (service.service == SERVICE_LOCK):
(yield from entity.async_lock(code=code))
else:
(yield from entity.async_unlock(code=code))
update_tasks = []
for entity in target_locks:
if (not entity.should_poll):
continue
update_coro = hass.loop.create_task(entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
(yield from update_coro)
if update_tasks:
(yield from asyncio.wait(update_tasks, loop=hass.loop))
|
@property
def changed_by(self):
'Last change triggered by.'
return None
| -9,118,920,222,303,797,000
|
Last change triggered by.
|
homeassistant/components/lock/__init__.py
|
changed_by
|
Norien/Home-Assistant
|
python
|
@property
def changed_by(self):
return None
|
@property
def code_format(self):
'Regex for code format or None if no code is required.'
return None
| -1,164,223,299,001,269,500
|
Regex for code format or None if no code is required.
|
homeassistant/components/lock/__init__.py
|
code_format
|
Norien/Home-Assistant
|
python
|
@property
def code_format(self):
return None
|
@property
def is_locked(self):
'Return true if the lock is locked.'
return None
| 2,237,670,600,384,488,200
|
Return true if the lock is locked.
|
homeassistant/components/lock/__init__.py
|
is_locked
|
Norien/Home-Assistant
|
python
|
@property
def is_locked(self):
return None
|
def lock(self, **kwargs):
'Lock the lock.'
raise NotImplementedError()
| 8,987,824,800,343,890,000
|
Lock the lock.
|
homeassistant/components/lock/__init__.py
|
lock
|
Norien/Home-Assistant
|
python
|
def lock(self, **kwargs):
raise NotImplementedError()
|
def async_lock(self, **kwargs):
'Lock the lock.\n\n This method must be run in the event loop and returns a coroutine.\n '
return self.hass.loop.run_in_executor(None, ft.partial(self.lock, **kwargs))
| -7,589,919,978,103,788,000
|
Lock the lock.
This method must be run in the event loop and returns a coroutine.
|
homeassistant/components/lock/__init__.py
|
async_lock
|
Norien/Home-Assistant
|
python
|
def async_lock(self, **kwargs):
'Lock the lock.\n\n This method must be run in the event loop and returns a coroutine.\n '
return self.hass.loop.run_in_executor(None, ft.partial(self.lock, **kwargs))
|
def unlock(self, **kwargs):
'Unlock the lock.'
raise NotImplementedError()
| -4,919,582,497,115,918,000
|
Unlock the lock.
|
homeassistant/components/lock/__init__.py
|
unlock
|
Norien/Home-Assistant
|
python
|
def unlock(self, **kwargs):
raise NotImplementedError()
|
def async_unlock(self, **kwargs):
'Unlock the lock.\n\n This method must be run in the event loop and returns a coroutine.\n '
return self.hass.loop.run_in_executor(None, ft.partial(self.unlock, **kwargs))
| -4,903,250,694,778,949,000
|
Unlock the lock.
This method must be run in the event loop and returns a coroutine.
|
homeassistant/components/lock/__init__.py
|
async_unlock
|
Norien/Home-Assistant
|
python
|
def async_unlock(self, **kwargs):
'Unlock the lock.\n\n This method must be run in the event loop and returns a coroutine.\n '
return self.hass.loop.run_in_executor(None, ft.partial(self.unlock, **kwargs))
|
@property
def state_attributes(self):
'Return the state attributes.'
if (self.code_format is None):
return None
state_attr = {ATTR_CODE_FORMAT: self.code_format, ATTR_CHANGED_BY: self.changed_by}
return state_attr
| -6,797,728,062,003,370,000
|
Return the state attributes.
|
homeassistant/components/lock/__init__.py
|
state_attributes
|
Norien/Home-Assistant
|
python
|
@property
def state_attributes(self):
if (self.code_format is None):
return None
state_attr = {ATTR_CODE_FORMAT: self.code_format, ATTR_CHANGED_BY: self.changed_by}
return state_attr
|
@property
def state(self):
'Return the state.'
locked = self.is_locked
if (locked is None):
return STATE_UNKNOWN
return (STATE_LOCKED if locked else STATE_UNLOCKED)
| 7,724,984,683,789,897,000
|
Return the state.
|
homeassistant/components/lock/__init__.py
|
state
|
Norien/Home-Assistant
|
python
|
@property
def state(self):
locked = self.is_locked
if (locked is None):
return STATE_UNKNOWN
return (STATE_LOCKED if locked else STATE_UNLOCKED)
|
@computed_property
def system_wide_role(self):
'For choosing the role string to show to the user; of all the roles in\n the system-wide context, it shows the highest ranked one (if there are\n multiple) or "No Access" if there are none.\n '
if (self.email in getattr(settings, 'BOOTSTRAP_ADMIN_USERS', [])):
return u'Superuser'
ROLE_HIERARCHY = {u'gGRC Admin': 0, u'Editor': 1, u'Reader': 2, u'Creator': 3}
system_wide_roles = ROLE_HIERARCHY.keys()
unique_roles = set([user_role.role.name for user_role in self.user_roles if (user_role.role.name in system_wide_roles)])
if (len(unique_roles) == 0):
return u'No Access'
else:
sorted_roles = sorted(unique_roles, key=(lambda x: ROLE_HIERARCHY.get(x, (- 1))))
return sorted_roles[0]
| 2,767,158,828,240,276,000
|
For choosing the role string to show to the user; of all the roles in
the system-wide context, it shows the highest ranked one (if there are
multiple) or "No Access" if there are none.
|
src/ggrc/models/person.py
|
system_wide_role
|
mikecb/ggrc-core
|
python
|
@computed_property
def system_wide_role(self):
'For choosing the role string to show to the user; of all the roles in\n the system-wide context, it shows the highest ranked one (if there are\n multiple) or "No Access" if there are none.\n '
if (self.email in getattr(settings, 'BOOTSTRAP_ADMIN_USERS', [])):
return u'Superuser'
ROLE_HIERARCHY = {u'gGRC Admin': 0, u'Editor': 1, u'Reader': 2, u'Creator': 3}
system_wide_roles = ROLE_HIERARCHY.keys()
unique_roles = set([user_role.role.name for user_role in self.user_roles if (user_role.role.name in system_wide_roles)])
if (len(unique_roles) == 0):
return u'No Access'
else:
sorted_roles = sorted(unique_roles, key=(lambda x: ROLE_HIERARCHY.get(x, (- 1))))
return sorted_roles[0]
|
def _get_column_by_index(tensor, indices):
'Returns columns from a 2-D tensor by index.'
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [(- 1)])
i_flat = array_ops.reshape((array_ops.reshape((math_ops.range(0, shape[0]) * shape[1]), [(- 1), 1]) + indices), [(- 1)])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], (- 1)])
| 5,450,827,665,698,653,000
|
Returns columns from a 2-D tensor by index.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_get_column_by_index
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _get_column_by_index(tensor, indices):
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [(- 1)])
i_flat = array_ops.reshape((array_ops.reshape((math_ops.range(0, shape[0]) * shape[1]), [(- 1), 1]) + indices), [(- 1)])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], (- 1)])
|
def _make_predictions_dict(stamp, logits, partition_ids, ensemble_stats, used_handlers, leaf_index=None):
'Returns predictions for the given logits and n_classes.\n\n Args:\n stamp: The ensemble stamp.\n logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that\n contains predictions when no dropout was applied.\n partition_ids: A rank 1 `Tensor` with shape [batch_size].\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a\n boolean mask.\n leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that\n contains leaf id for each example prediction.\n\n Returns:\n A dict of predictions.\n '
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if (leaf_index is not None):
result[LEAF_INDEX] = leaf_index
return result
| -2,629,575,622,720,228,000
|
Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_make_predictions_dict
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _make_predictions_dict(stamp, logits, partition_ids, ensemble_stats, used_handlers, leaf_index=None):
'Returns predictions for the given logits and n_classes.\n\n Args:\n stamp: The ensemble stamp.\n logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that\n contains predictions when no dropout was applied.\n partition_ids: A rank 1 `Tensor` with shape [batch_size].\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a\n boolean mask.\n leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that\n contains leaf id for each example prediction.\n\n Returns:\n A dict of predictions.\n '
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if (leaf_index is not None):
result[LEAF_INDEX] = leaf_index
return result
|
def extract_features(features, feature_columns, use_core_columns):
'Extracts columns from a dictionary of features.\n\n Args:\n features: `dict` of `Tensor` objects.\n feature_columns: A list of feature_columns.\n\n Returns:\n Seven values:\n - A list of all feature column names.\n - A list of dense floats.\n - A list of sparse float feature indices.\n - A list of sparse float feature values.\n - A list of sparse float feature shapes.\n - A list of sparse int feature indices.\n - A list of sparse int feature values.\n - A list of sparse int feature shapes.\n Raises:\n ValueError: if features is not valid.\n '
if (not features):
raise ValueError('Features dictionary must be specified.')
features = copy.copy(features)
if feature_columns:
scope = 'gbdt'
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
if use_core_columns:
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
transformed_features[fc.name] = fc_core.input_layer(features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if (len(result) > 1):
raise ValueError('Unexpected number of output features')
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
if isinstance(tensor, tuple):
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([array_ops.slice(categorical_tensor.indices, [0, 0], [(- 1), 1]), array_ops.expand_dims(math_ops.to_int64(categorical_tensor.values), (- 1))], 1)
tensor = sparse_tensor.SparseTensor(indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if (tensor.values.dtype == dtypes.float32):
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif (tensor.values.dtype == dtypes.int64):
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError(('Unsupported sparse feature %s with dtype %s.' % (tensor.indices.name, tensor.dtype)))
elif (tensor.dtype == dtypes.float32):
if ((len(tensor.shape) > 1) and (tensor.shape[1] > 1)):
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append((_FEATURE_NAME_TEMPLATE % (key, i)))
dense_floats.append(array_ops.reshape(unstacked[i], [(- 1), 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError(('Unsupported dense feature %s with dtype %s.' % (tensor.name, tensor.dtype)))
fc_names = ((dense_float_names + sparse_float_names) + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes)
| -7,117,044,286,999,141,000
|
Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
extract_features
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def extract_features(features, feature_columns, use_core_columns):
'Extracts columns from a dictionary of features.\n\n Args:\n features: `dict` of `Tensor` objects.\n feature_columns: A list of feature_columns.\n\n Returns:\n Seven values:\n - A list of all feature column names.\n - A list of dense floats.\n - A list of sparse float feature indices.\n - A list of sparse float feature values.\n - A list of sparse float feature shapes.\n - A list of sparse int feature indices.\n - A list of sparse int feature values.\n - A list of sparse int feature shapes.\n Raises:\n ValueError: if features is not valid.\n '
if (not features):
raise ValueError('Features dictionary must be specified.')
features = copy.copy(features)
if feature_columns:
scope = 'gbdt'
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
if use_core_columns:
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
transformed_features[fc.name] = fc_core.input_layer(features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if (len(result) > 1):
raise ValueError('Unexpected number of output features')
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
if isinstance(tensor, tuple):
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([array_ops.slice(categorical_tensor.indices, [0, 0], [(- 1), 1]), array_ops.expand_dims(math_ops.to_int64(categorical_tensor.values), (- 1))], 1)
tensor = sparse_tensor.SparseTensor(indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if (tensor.values.dtype == dtypes.float32):
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif (tensor.values.dtype == dtypes.int64):
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError(('Unsupported sparse feature %s with dtype %s.' % (tensor.indices.name, tensor.dtype)))
elif (tensor.dtype == dtypes.float32):
if ((len(tensor.shape) > 1) and (tensor.shape[1] > 1)):
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append((_FEATURE_NAME_TEMPLATE % (key, i)))
dense_floats.append(array_ops.reshape(unstacked[i], [(- 1), 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError(('Unsupported dense feature %s with dtype %s.' % (tensor.name, tensor.dtype)))
fc_names = ((dense_float_names + sparse_float_names) + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values, sparse_float_shapes, sparse_int_indices, sparse_int_values, sparse_int_shapes)
|
def _dropout_params(mode, ensemble_stats):
'Returns parameters relevant for dropout.\n\n Args:\n mode: Train/Eval/Infer\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n\n Returns:\n Whether to apply dropout and a dropout seed.\n '
if (mode == learn.ModeKeys.TRAIN):
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = (- 1)
apply_dropout = False
return (apply_dropout, seed)
| 6,727,304,262,766,258,000
|
Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
_dropout_params
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def _dropout_params(mode, ensemble_stats):
'Returns parameters relevant for dropout.\n\n Args:\n mode: Train/Eval/Infer\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n\n Returns:\n Whether to apply dropout and a dropout seed.\n '
if (mode == learn.ModeKeys.TRAIN):
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = (- 1)
apply_dropout = False
return (apply_dropout, seed)
|
def __init__(self, ps_ops, num_tasks):
'Create a new `_RoundRobinStrategy`.\n\n Args:\n ps_ops: List of Op types to place on PS.\n num_tasks: Number of ps tasks to cycle among.\n '
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (((next_task + 1) % num_tasks) if num_tasks else 0)
self._num_tasks = num_tasks
| -2,634,130,208,861,780,500
|
Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
__init__
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def __init__(self, ps_ops, num_tasks):
'Create a new `_RoundRobinStrategy`.\n\n Args:\n ps_ops: List of Op types to place on PS.\n num_tasks: Number of ps tasks to cycle among.\n '
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (((next_task + 1) % num_tasks) if num_tasks else 0)
self._num_tasks = num_tasks
|
def __call__(self, op):
'Choose a ps task index for the given `Operation`.\n\n Args:\n op: An `Operation` to be placed on ps.\n\n Returns:\n The next ps task index to use for the `Operation`. Returns the next\n index, in the range `[offset, offset + num_tasks)`.\n\n Raises:\n ValueError: If attempting to place non-PS Op.\n '
if (op.type not in self._next_task_per_op):
raise ValueError(("Unknown op type '%s' for placement:" % op.type))
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = (((task + 1) % self._num_tasks) if self._num_tasks else 0)
return task
| -5,665,164,402,685,172,000
|
Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
__call__
|
JustinACoder/H22-GR3-UnrealAI
|
python
|
def __call__(self, op):
'Choose a ps task index for the given `Operation`.\n\n Args:\n op: An `Operation` to be placed on ps.\n\n Returns:\n The next ps task index to use for the `Operation`. Returns the next\n index, in the range `[offset, offset + num_tasks)`.\n\n Raises:\n ValueError: If attempting to place non-PS Op.\n '
if (op.type not in self._next_task_per_op):
raise ValueError(("Unknown op type '%s' for placement:" % op.type))
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = (((task + 1) % self._num_tasks) if self._num_tasks else 0)
return task
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.