body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def device_snapshot_rollback_with_http_info(self, scope_id, device_id, snapshot_id, **kwargs):
'Gets a list of snapshots # noqa: E501\n\n Updates the configuration of a device rolling back a given snapshot ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the device (required)\n :param str device_id: The id of the device (required)\n :param str snapshot_id: the ID of the snapshot to rollback to (required)\n :param int timeout: The timeout of the operation\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['scope_id', 'device_id', 'snapshot_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method device_snapshot_rollback" % key))
params[key] = val
del params['kwargs']
if (('scope_id' not in params) or (params['scope_id'] is None)):
raise ValueError('Missing the required parameter `scope_id` when calling `device_snapshot_rollback`')
if (('device_id' not in params) or (params['device_id'] is None)):
raise ValueError('Missing the required parameter `device_id` when calling `device_snapshot_rollback`')
if (('snapshot_id' not in params) or (params['snapshot_id'] is None)):
raise ValueError('Missing the required parameter `snapshot_id` when calling `device_snapshot_rollback`')
collection_formats = {}
path_params = {}
if ('scope_id' in params):
path_params['scopeId'] = params['scope_id']
if ('device_id' in params):
path_params['deviceId'] = params['device_id']
if ('snapshot_id' in params):
path_params['snapshotId'] = params['snapshot_id']
query_params = []
if ('timeout' in params):
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api('/{scopeId}/devices/{deviceId}/snapshots/{snapshotId}/_rollback', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| 3,986,388,578,949,759,500
|
Gets a list of snapshots # noqa: E501
Updates the configuration of a device rolling back a given snapshot ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param str snapshot_id: the ID of the snapshot to rollback to (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
|
kapua-client/python-client/swagger_client/api/devices_api.py
|
device_snapshot_rollback_with_http_info
|
liang-faan/SmartIOT-Diec
|
python
|
def device_snapshot_rollback_with_http_info(self, scope_id, device_id, snapshot_id, **kwargs):
'Gets a list of snapshots # noqa: E501\n\n Updates the configuration of a device rolling back a given snapshot ID. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the device (required)\n :param str device_id: The id of the device (required)\n :param str snapshot_id: the ID of the snapshot to rollback to (required)\n :param int timeout: The timeout of the operation\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['scope_id', 'device_id', 'snapshot_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method device_snapshot_rollback" % key))
params[key] = val
del params['kwargs']
if (('scope_id' not in params) or (params['scope_id'] is None)):
raise ValueError('Missing the required parameter `scope_id` when calling `device_snapshot_rollback`')
if (('device_id' not in params) or (params['device_id'] is None)):
raise ValueError('Missing the required parameter `device_id` when calling `device_snapshot_rollback`')
if (('snapshot_id' not in params) or (params['snapshot_id'] is None)):
raise ValueError('Missing the required parameter `snapshot_id` when calling `device_snapshot_rollback`')
collection_formats = {}
path_params = {}
if ('scope_id' in params):
path_params['scopeId'] = params['scope_id']
if ('device_id' in params):
path_params['deviceId'] = params['device_id']
if ('snapshot_id' in params):
path_params['snapshotId'] = params['snapshot_id']
query_params = []
if ('timeout' in params):
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api('/{scopeId}/devices/{deviceId}/snapshots/{snapshotId}/_rollback', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def device_update(self, scope_id, device_id, body, **kwargs):
'Update a Device # noqa: E501\n\n Updates a new Device based on the information provided in the Device parameter. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_update(scope_id, device_id, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the requested Device. (required)\n :param str device_id: The id of the requested Device (required)\n :param Device body: The modified Device whose attributed need to be updated (required)\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
else:
data = self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
return data
| -4,749,977,442,186,164,000
|
Update a Device # noqa: E501
Updates a new Device based on the information provided in the Device parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_update(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device. (required)
:param str device_id: The id of the requested Device (required)
:param Device body: The modified Device whose attributed need to be updated (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
|
kapua-client/python-client/swagger_client/api/devices_api.py
|
device_update
|
liang-faan/SmartIOT-Diec
|
python
|
def device_update(self, scope_id, device_id, body, **kwargs):
'Update a Device # noqa: E501\n\n Updates a new Device based on the information provided in the Device parameter. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_update(scope_id, device_id, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the requested Device. (required)\n :param str device_id: The id of the requested Device (required)\n :param Device body: The modified Device whose attributed need to be updated (required)\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
else:
data = self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
return data
|
def device_update_with_http_info(self, scope_id, device_id, body, **kwargs):
'Update a Device # noqa: E501\n\n Updates a new Device based on the information provided in the Device parameter. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_update_with_http_info(scope_id, device_id, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the requested Device. (required)\n :param str device_id: The id of the requested Device (required)\n :param Device body: The modified Device whose attributed need to be updated (required)\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['scope_id', 'device_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method device_update" % key))
params[key] = val
del params['kwargs']
if (('scope_id' not in params) or (params['scope_id'] is None)):
raise ValueError('Missing the required parameter `scope_id` when calling `device_update`')
if (('device_id' not in params) or (params['device_id'] is None)):
raise ValueError('Missing the required parameter `device_id` when calling `device_update`')
if (('body' not in params) or (params['body'] is None)):
raise ValueError('Missing the required parameter `body` when calling `device_update`')
collection_formats = {}
path_params = {}
if ('scope_id' in params):
path_params['scopeId'] = params['scope_id']
if ('device_id' in params):
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in params):
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api('/{scopeId}/devices/{deviceId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Device', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
| 8,073,674,989,738,873,000
|
Update a Device # noqa: E501
Updates a new Device based on the information provided in the Device parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_update_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device. (required)
:param str device_id: The id of the requested Device (required)
:param Device body: The modified Device whose attributed need to be updated (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
|
kapua-client/python-client/swagger_client/api/devices_api.py
|
device_update_with_http_info
|
liang-faan/SmartIOT-Diec
|
python
|
def device_update_with_http_info(self, scope_id, device_id, body, **kwargs):
'Update a Device # noqa: E501\n\n Updates a new Device based on the information provided in the Device parameter. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.device_update_with_http_info(scope_id, device_id, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str scope_id: The ScopeId of the requested Device. (required)\n :param str device_id: The id of the requested Device (required)\n :param Device body: The modified Device whose attributed need to be updated (required)\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n '
all_params = ['scope_id', 'device_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for (key, val) in six.iteritems(params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method device_update" % key))
params[key] = val
del params['kwargs']
if (('scope_id' not in params) or (params['scope_id'] is None)):
raise ValueError('Missing the required parameter `scope_id` when calling `device_update`')
if (('device_id' not in params) or (params['device_id'] is None)):
raise ValueError('Missing the required parameter `device_id` when calling `device_update`')
if (('body' not in params) or (params['body'] is None)):
raise ValueError('Missing the required parameter `body` when calling `device_update`')
collection_formats = {}
path_params = {}
if ('scope_id' in params):
path_params['scopeId'] = params['scope_id']
if ('device_id' in params):
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if ('body' in params):
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api('/{scopeId}/devices/{deviceId}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Device', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
|
def length_conversion(value: float, from_type: str, to_type: str) -> float:
'\n Conversion between length units.\n\n >>> length_conversion(4, "METER", "FEET")\n 13.12336\n >>> length_conversion(4, "M", "FT")\n 13.12336\n >>> length_conversion(1, "meter", "kilometer")\n 0.001\n >>> length_conversion(1, "kilometer", "inch")\n 39370.1\n >>> length_conversion(3, "kilometer", "mile")\n 1.8641130000000001\n >>> length_conversion(2, "feet", "meter")\n 0.6096\n >>> length_conversion(4, "feet", "yard")\n 1.333329312\n >>> length_conversion(1, "inch", "meter")\n 0.0254\n >>> length_conversion(2, "inch", "mile")\n 3.15656468e-05\n >>> length_conversion(2, "centimeter", "millimeter")\n 20.0\n >>> length_conversion(2, "centimeter", "yard")\n 0.0218722\n >>> length_conversion(4, "yard", "meter")\n 3.6576\n >>> length_conversion(4, "yard", "kilometer")\n 0.0036576\n >>> length_conversion(3, "foot", "meter")\n 0.9144000000000001\n >>> length_conversion(3, "foot", "inch")\n 36.00001944\n >>> length_conversion(4, "mile", "kilometer")\n 6.43736\n >>> length_conversion(2, "miles", "InChEs")\n 126719.753468\n >>> length_conversion(3, "millimeter", "centimeter")\n 0.3\n >>> length_conversion(3, "mm", "in")\n 0.1181103\n >>> length_conversion(4, "wrongUnit", "inch")\n Traceback (most recent call last):\n ...\n ValueError: Invalid \'from_type\' value: \'wrongUnit\'.\n Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi\n '
new_from = from_type.lower().rstrip('s')
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip('s')
new_to = TYPE_CONVERSION.get(new_to, new_to)
if (new_from not in METRIC_CONVERSION):
raise ValueError(f'''Invalid 'from_type' value: {from_type!r}.
Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}''')
if (new_to not in METRIC_CONVERSION):
raise ValueError(f'''Invalid 'to_type' value: {to_type!r}.
Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}''')
return ((value * METRIC_CONVERSION[new_from].from_) * METRIC_CONVERSION[new_to].to)
| -2,713,754,555,307,478,500
|
Conversion between length units.
>>> length_conversion(4, "METER", "FEET")
13.12336
>>> length_conversion(4, "M", "FT")
13.12336
>>> length_conversion(1, "meter", "kilometer")
0.001
>>> length_conversion(1, "kilometer", "inch")
39370.1
>>> length_conversion(3, "kilometer", "mile")
1.8641130000000001
>>> length_conversion(2, "feet", "meter")
0.6096
>>> length_conversion(4, "feet", "yard")
1.333329312
>>> length_conversion(1, "inch", "meter")
0.0254
>>> length_conversion(2, "inch", "mile")
3.15656468e-05
>>> length_conversion(2, "centimeter", "millimeter")
20.0
>>> length_conversion(2, "centimeter", "yard")
0.0218722
>>> length_conversion(4, "yard", "meter")
3.6576
>>> length_conversion(4, "yard", "kilometer")
0.0036576
>>> length_conversion(3, "foot", "meter")
0.9144000000000001
>>> length_conversion(3, "foot", "inch")
36.00001944
>>> length_conversion(4, "mile", "kilometer")
6.43736
>>> length_conversion(2, "miles", "InChEs")
126719.753468
>>> length_conversion(3, "millimeter", "centimeter")
0.3
>>> length_conversion(3, "mm", "in")
0.1181103
>>> length_conversion(4, "wrongUnit", "inch")
Traceback (most recent call last):
...
ValueError: Invalid 'from_type' value: 'wrongUnit'.
Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi
|
conversions/length_conversion.py
|
length_conversion
|
04n0/TheAlgorithms-Python
|
python
|
def length_conversion(value: float, from_type: str, to_type: str) -> float:
'\n Conversion between length units.\n\n >>> length_conversion(4, "METER", "FEET")\n 13.12336\n >>> length_conversion(4, "M", "FT")\n 13.12336\n >>> length_conversion(1, "meter", "kilometer")\n 0.001\n >>> length_conversion(1, "kilometer", "inch")\n 39370.1\n >>> length_conversion(3, "kilometer", "mile")\n 1.8641130000000001\n >>> length_conversion(2, "feet", "meter")\n 0.6096\n >>> length_conversion(4, "feet", "yard")\n 1.333329312\n >>> length_conversion(1, "inch", "meter")\n 0.0254\n >>> length_conversion(2, "inch", "mile")\n 3.15656468e-05\n >>> length_conversion(2, "centimeter", "millimeter")\n 20.0\n >>> length_conversion(2, "centimeter", "yard")\n 0.0218722\n >>> length_conversion(4, "yard", "meter")\n 3.6576\n >>> length_conversion(4, "yard", "kilometer")\n 0.0036576\n >>> length_conversion(3, "foot", "meter")\n 0.9144000000000001\n >>> length_conversion(3, "foot", "inch")\n 36.00001944\n >>> length_conversion(4, "mile", "kilometer")\n 6.43736\n >>> length_conversion(2, "miles", "InChEs")\n 126719.753468\n >>> length_conversion(3, "millimeter", "centimeter")\n 0.3\n >>> length_conversion(3, "mm", "in")\n 0.1181103\n >>> length_conversion(4, "wrongUnit", "inch")\n Traceback (most recent call last):\n ...\n ValueError: Invalid \'from_type\' value: \'wrongUnit\'.\n Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi\n '
new_from = from_type.lower().rstrip('s')
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip('s')
new_to = TYPE_CONVERSION.get(new_to, new_to)
if (new_from not in METRIC_CONVERSION):
raise ValueError(f'Invalid 'from_type' value: {from_type!r}.
Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}')
if (new_to not in METRIC_CONVERSION):
raise ValueError(f'Invalid 'to_type' value: {to_type!r}.
Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}')
return ((value * METRIC_CONVERSION[new_from].from_) * METRIC_CONVERSION[new_to].to)
|
def integrate(self):
' See abstract method. '
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib, self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform, cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
self.data.error_bound = (self.data.fudge(self.data.m) * self.data.stilde)
ub = max(self.abs_tol, (self.rel_tol * abs((self.data.solution + self.data.error_bound))))
lb = max(self.abs_tol, (self.rel_tol * abs((self.data.solution - self.data.error_bound))))
self.data.solution = (self.data.solution - ((self.data.error_bound * (ub - lb)) / (ub + lb)))
if (((4 * (self.data.error_bound ** 2.0)) / ((ub + lb) ** 2.0)) <= 1.0):
break
elif (self.data.m == self.data.m_max):
warning_s = ('\n Alread generated %d samples.\n Trying to generate %d new samples would exceed n_max = %d.\n No more samples will be generated.\n Note that error tolerances may no longer be satisfied' % (int((2 ** self.data.m)), int((2 ** self.data.m)), int((2 ** self.data.m_max))))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
self.data.m += 1.0
self.data.time_integrate = (time() - t_start)
return (self.data.solution, self.data)
| -8,699,421,854,595,669,000
|
See abstract method.
|
qmcpy/stopping_criterion/_cub_qmc_ld_g.py
|
integrate
|
QMCSoftware/QMCSoftware
|
python
|
def integrate(self):
' '
self.data = LDTransformData(self, self.integrand, self.true_measure, self.discrete_distrib, self.coefv, self.m_min, self.m_max, self.fudge, self.check_cone, ptransform=self.ptransform, cast_complex=self.cast_complex, control_variates=self.cv, control_variate_means=self.cv_mu, update_beta=self.ub)
t_start = time()
while True:
self.data.update_data()
self.data.error_bound = (self.data.fudge(self.data.m) * self.data.stilde)
ub = max(self.abs_tol, (self.rel_tol * abs((self.data.solution + self.data.error_bound))))
lb = max(self.abs_tol, (self.rel_tol * abs((self.data.solution - self.data.error_bound))))
self.data.solution = (self.data.solution - ((self.data.error_bound * (ub - lb)) / (ub + lb)))
if (((4 * (self.data.error_bound ** 2.0)) / ((ub + lb) ** 2.0)) <= 1.0):
break
elif (self.data.m == self.data.m_max):
warning_s = ('\n Alread generated %d samples.\n Trying to generate %d new samples would exceed n_max = %d.\n No more samples will be generated.\n Note that error tolerances may no longer be satisfied' % (int((2 ** self.data.m)), int((2 ** self.data.m)), int((2 ** self.data.m_max))))
warnings.warn(warning_s, MaxSamplesWarning)
break
else:
self.data.m += 1.0
self.data.time_integrate = (time() - t_start)
return (self.data.solution, self.data)
|
def set_tolerance(self, abs_tol=None, rel_tol=None):
'\n See abstract method. \n \n Args:\n abs_tol (float): absolute tolerance. Reset if supplied, ignored if not. \n rel_tol (float): relative tolerance. Reset if supplied, ignored if not. \n '
if (abs_tol != None):
self.abs_tol = abs_tol
if (rel_tol != None):
self.rel_tol = rel_tol
| 366,003,947,805,066,500
|
See abstract method.
Args:
abs_tol (float): absolute tolerance. Reset if supplied, ignored if not.
rel_tol (float): relative tolerance. Reset if supplied, ignored if not.
|
qmcpy/stopping_criterion/_cub_qmc_ld_g.py
|
set_tolerance
|
QMCSoftware/QMCSoftware
|
python
|
def set_tolerance(self, abs_tol=None, rel_tol=None):
'\n See abstract method. \n \n Args:\n abs_tol (float): absolute tolerance. Reset if supplied, ignored if not. \n rel_tol (float): relative tolerance. Reset if supplied, ignored if not. \n '
if (abs_tol != None):
self.abs_tol = abs_tol
if (rel_tol != None):
self.rel_tol = rel_tol
|
def create_device_name(node: OZWNode):
'Generate sensible (short) default device name from a OZWNode.'
if node.node_name:
return node.node_name
if (node.meta_data and node.meta_data.get('Name')):
return node.meta_data['Name']
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
return f'Node {node.id}'
| 7,304,968,344,675,889,000
|
Generate sensible (short) default device name from a OZWNode.
|
homeassistant/components/ozw/entity.py
|
create_device_name
|
2Fake/core
|
python
|
def create_device_name(node: OZWNode):
if node.node_name:
return node.node_name
if (node.meta_data and node.meta_data.get('Name')):
return node.meta_data['Name']
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
return f'Node {node.id}'
|
def create_device_id(node: OZWNode, node_instance: int=1):
'Generate unique device_id from a OZWNode.'
ozw_instance = node.parent.id
dev_id = f'{ozw_instance}.{node.node_id}.{node_instance}'
return dev_id
| 7,974,858,200,362,401,000
|
Generate unique device_id from a OZWNode.
|
homeassistant/components/ozw/entity.py
|
create_device_id
|
2Fake/core
|
python
|
def create_device_id(node: OZWNode, node_instance: int=1):
ozw_instance = node.parent.id
dev_id = f'{ozw_instance}.{node.node_id}.{node_instance}'
return dev_id
|
def create_value_id(value: OZWValue):
'Generate unique value_id from an OZWValue.'
return f'{value.node.parent.id}-{value.node.id}-{value.value_id_key}'
| 1,264,765,362,012,980,000
|
Generate unique value_id from an OZWValue.
|
homeassistant/components/ozw/entity.py
|
create_value_id
|
2Fake/core
|
python
|
def create_value_id(value: OZWValue):
return f'{value.node.parent.id}-{value.node.id}-{value.value_id_key}'
|
def __init__(self, hass, options, schema, primary_value):
'Initialize the values object with the passed entity schema.'
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
for (name, disc_settings) in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
| -5,097,259,871,609,913,000
|
Initialize the values object with the passed entity schema.
|
homeassistant/components/ozw/entity.py
|
__init__
|
2Fake/core
|
python
|
def __init__(self, hass, options, schema, primary_value):
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
for (name, disc_settings) in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
|
def async_setup(self):
'Set up values instance.'
for value in self._node.values():
self.async_check_value(value)
self._async_check_entity_ready()
| -3,300,220,210,320,531,000
|
Set up values instance.
|
homeassistant/components/ozw/entity.py
|
async_setup
|
2Fake/core
|
python
|
def async_setup(self):
for value in self._node.values():
self.async_check_value(value)
self._async_check_entity_ready()
|
def __getattr__(self, name):
'Get the specified value for this entity.'
return self._values.get(name, None)
| -7,461,882,825,756,319,000
|
Get the specified value for this entity.
|
homeassistant/components/ozw/entity.py
|
__getattr__
|
2Fake/core
|
python
|
def __getattr__(self, name):
return self._values.get(name, None)
|
def __iter__(self):
'Allow iteration over all values.'
return iter(self._values.values())
| 6,936,969,451,779,372,000
|
Allow iteration over all values.
|
homeassistant/components/ozw/entity.py
|
__iter__
|
2Fake/core
|
python
|
def __iter__(self):
return iter(self._values.values())
|
def __contains__(self, name):
'Check if the specified name/key exists in the values.'
return (name in self._values)
| 7,321,282,956,213,657,000
|
Check if the specified name/key exists in the values.
|
homeassistant/components/ozw/entity.py
|
__contains__
|
2Fake/core
|
python
|
def __contains__(self, name):
return (name in self._values)
|
@callback
def async_check_value(self, value):
'Check if the new value matches a missing value for this entity.\n\n If a match is found, it is added to the values mapping.\n '
if (not check_node_schema(value.node, self._schema)):
return
for (name, name_value) in self._values.items():
if (name_value is not None):
continue
if (not check_value_schema(value, self._schema[const.DISC_VALUES][name])):
continue
self._values[name] = value
if self._entity_created:
async_dispatcher_send(self._hass, f'{DOMAIN}_{self.values_id}_value_added')
self._async_check_entity_ready()
| 8,416,800,283,286,510,000
|
Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
|
homeassistant/components/ozw/entity.py
|
async_check_value
|
2Fake/core
|
python
|
@callback
def async_check_value(self, value):
'Check if the new value matches a missing value for this entity.\n\n If a match is found, it is added to the values mapping.\n '
if (not check_node_schema(value.node, self._schema)):
return
for (name, name_value) in self._values.items():
if (name_value is not None):
continue
if (not check_value_schema(value, self._schema[const.DISC_VALUES][name])):
continue
self._values[name] = value
if self._entity_created:
async_dispatcher_send(self._hass, f'{DOMAIN}_{self.values_id}_value_added')
self._async_check_entity_ready()
|
@callback
def _async_check_entity_ready(self):
'Check if all required values are discovered and create entity.'
if self._entity_created:
return
for (name, disc_settings) in self._schema[const.DISC_VALUES].items():
if ((self._values[name] is None) and (not disc_settings.get(const.DISC_OPTIONAL))):
return
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug('Adding Node_id=%s Generic_command_class=%s, Specific_command_class=%s, Command_class=%s, Index=%s, Value type=%s, Genre=%s as %s', self._node.node_id, self._node.node_generic, self._node.node_specific, self.primary.command_class, self.primary.index, self.primary.type, self.primary.genre, component)
self._entity_created = True
if (component in PLATFORMS):
async_dispatcher_send(self._hass, f'{DOMAIN}_new_{component}', self)
| 4,784,856,119,720,975,000
|
Check if all required values are discovered and create entity.
|
homeassistant/components/ozw/entity.py
|
_async_check_entity_ready
|
2Fake/core
|
python
|
@callback
def _async_check_entity_ready(self):
if self._entity_created:
return
for (name, disc_settings) in self._schema[const.DISC_VALUES].items():
if ((self._values[name] is None) and (not disc_settings.get(const.DISC_OPTIONAL))):
return
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug('Adding Node_id=%s Generic_command_class=%s, Specific_command_class=%s, Command_class=%s, Index=%s, Value type=%s, Genre=%s as %s', self._node.node_id, self._node.node_generic, self._node.node_specific, self.primary.command_class, self.primary.index, self.primary.type, self.primary.genre, component)
self._entity_created = True
if (component in PLATFORMS):
async_dispatcher_send(self._hass, f'{DOMAIN}_new_{component}', self)
|
@property
def values_id(self):
'Identification for this values collection.'
return create_value_id(self.primary)
| -4,657,473,676,736,909,000
|
Identification for this values collection.
|
homeassistant/components/ozw/entity.py
|
values_id
|
2Fake/core
|
python
|
@property
def values_id(self):
return create_value_id(self.primary)
|
def __init__(self, values):
'Initialize a generic Z-Wave device entity.'
self.values = values
self.options = values.options
| -8,800,890,720,828,710,000
|
Initialize a generic Z-Wave device entity.
|
homeassistant/components/ozw/entity.py
|
__init__
|
2Fake/core
|
python
|
def __init__(self, values):
self.values = values
self.options = values.options
|
@callback
def on_value_update(self):
'Call when a value is added/updated in the entity EntityValues Collection.\n\n To be overridden by platforms needing this event.\n '
| 991,291,505,622,625,800
|
Call when a value is added/updated in the entity EntityValues Collection.
To be overridden by platforms needing this event.
|
homeassistant/components/ozw/entity.py
|
on_value_update
|
2Fake/core
|
python
|
@callback
def on_value_update(self):
'Call when a value is added/updated in the entity EntityValues Collection.\n\n To be overridden by platforms needing this event.\n '
|
async def async_added_to_hass(self):
'Call when entity is added.'
self.async_on_remove(self.options.listen(EVENT_VALUE_CHANGED, self._value_changed))
self.async_on_remove(self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated))
self.async_on_remove(async_dispatcher_connect(self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback))
self.async_on_remove(async_dispatcher_connect(self.hass, f'{DOMAIN}_{self.values.values_id}_value_added', self._value_added))
| 4,368,566,727,181,420,500
|
Call when entity is added.
|
homeassistant/components/ozw/entity.py
|
async_added_to_hass
|
2Fake/core
|
python
|
async def async_added_to_hass(self):
self.async_on_remove(self.options.listen(EVENT_VALUE_CHANGED, self._value_changed))
self.async_on_remove(self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated))
self.async_on_remove(async_dispatcher_connect(self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback))
self.async_on_remove(async_dispatcher_connect(self.hass, f'{DOMAIN}_{self.values.values_id}_value_added', self._value_added))
|
@property
def device_info(self) -> DeviceInfo:
'Return device information for the device registry.'
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(CommandClass.VERSION, ValueIndex.VERSION_APPLICATION)
device_info = DeviceInfo(identifiers={(DOMAIN, dev_id)}, name=create_device_name(node), manufacturer=node.node_manufacturer_name, model=node.node_product_name)
if (node_firmware is not None):
device_info[ATTR_SW_VERSION] = node_firmware.value
if (node_instance > 1):
parent_dev_id = create_device_id(node)
device_info[ATTR_NAME] += f' - Instance {node_instance}'
device_info[ATTR_VIA_DEVICE] = (DOMAIN, parent_dev_id)
return device_info
| -7,016,024,995,082,802,000
|
Return device information for the device registry.
|
homeassistant/components/ozw/entity.py
|
device_info
|
2Fake/core
|
python
|
@property
def device_info(self) -> DeviceInfo:
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(CommandClass.VERSION, ValueIndex.VERSION_APPLICATION)
device_info = DeviceInfo(identifiers={(DOMAIN, dev_id)}, name=create_device_name(node), manufacturer=node.node_manufacturer_name, model=node.node_product_name)
if (node_firmware is not None):
device_info[ATTR_SW_VERSION] = node_firmware.value
if (node_instance > 1):
parent_dev_id = create_device_id(node)
device_info[ATTR_NAME] += f' - Instance {node_instance}'
device_info[ATTR_VIA_DEVICE] = (DOMAIN, parent_dev_id)
return device_info
|
@property
def extra_state_attributes(self):
'Return the device specific state attributes.'
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
| 8,201,373,440,509,317,000
|
Return the device specific state attributes.
|
homeassistant/components/ozw/entity.py
|
extra_state_attributes
|
2Fake/core
|
python
|
@property
def extra_state_attributes(self):
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
|
@property
def name(self):
'Return the name of the entity.'
node = self.values.primary.node
return f'{create_device_name(node)}: {self.values.primary.label}'
| -2,945,267,306,644,348,400
|
Return the name of the entity.
|
homeassistant/components/ozw/entity.py
|
name
|
2Fake/core
|
python
|
@property
def name(self):
node = self.values.primary.node
return f'{create_device_name(node)}: {self.values.primary.label}'
|
@property
def unique_id(self):
'Return the unique_id of the entity.'
return self.values.values_id
| 2,640,360,077,620,196,400
|
Return the unique_id of the entity.
|
homeassistant/components/ozw/entity.py
|
unique_id
|
2Fake/core
|
python
|
@property
def unique_id(self):
return self.values.values_id
|
@property
def available(self) -> bool:
'Return entity availability.'
instance_status = self.values.primary.ozw_instance.get_status()
return (instance_status and (instance_status.status in OZW_READY_STATES_VALUES))
| 1,111,907,310,407,255,400
|
Return entity availability.
|
homeassistant/components/ozw/entity.py
|
available
|
2Fake/core
|
python
|
@property
def available(self) -> bool:
instance_status = self.values.primary.ozw_instance.get_status()
return (instance_status and (instance_status.status in OZW_READY_STATES_VALUES))
|
@callback
def _value_changed(self, value):
'Call when a value from ZWaveDeviceEntityValues is changed.\n\n Should not be overridden by subclasses.\n '
if (value.value_id_key in (v.value_id_key for v in self.values if v)):
self.on_value_update()
self.async_write_ha_state()
| -9,045,919,858,291,650,000
|
Call when a value from ZWaveDeviceEntityValues is changed.
Should not be overridden by subclasses.
|
homeassistant/components/ozw/entity.py
|
_value_changed
|
2Fake/core
|
python
|
@callback
def _value_changed(self, value):
'Call when a value from ZWaveDeviceEntityValues is changed.\n\n Should not be overridden by subclasses.\n '
if (value.value_id_key in (v.value_id_key for v in self.values if v)):
self.on_value_update()
self.async_write_ha_state()
|
@callback
def _value_added(self):
'Call when a value from ZWaveDeviceEntityValues is added.\n\n Should not be overridden by subclasses.\n '
self.on_value_update()
| -9,171,750,733,512,999,000
|
Call when a value from ZWaveDeviceEntityValues is added.
Should not be overridden by subclasses.
|
homeassistant/components/ozw/entity.py
|
_value_added
|
2Fake/core
|
python
|
@callback
def _value_added(self):
'Call when a value from ZWaveDeviceEntityValues is added.\n\n Should not be overridden by subclasses.\n '
self.on_value_update()
|
@callback
def _instance_updated(self, new_status):
'Call when the instance status changes.\n\n Should not be overridden by subclasses.\n '
self.on_value_update()
self.async_write_ha_state()
| 6,747,436,077,086,713,000
|
Call when the instance status changes.
Should not be overridden by subclasses.
|
homeassistant/components/ozw/entity.py
|
_instance_updated
|
2Fake/core
|
python
|
@callback
def _instance_updated(self, new_status):
'Call when the instance status changes.\n\n Should not be overridden by subclasses.\n '
self.on_value_update()
self.async_write_ha_state()
|
@property
def should_poll(self):
'No polling needed.'
return False
| 8,786,248,581,820,519,000
|
No polling needed.
|
homeassistant/components/ozw/entity.py
|
should_poll
|
2Fake/core
|
python
|
@property
def should_poll(self):
return False
|
async def _delete_callback(self, values_id):
'Remove this entity.'
if (not self.values):
return
if (values_id == self.values.values_id):
(await self.async_remove(force_remove=True))
| 369,757,128,528,806,000
|
Remove this entity.
|
homeassistant/components/ozw/entity.py
|
_delete_callback
|
2Fake/core
|
python
|
async def _delete_callback(self, values_id):
if (not self.values):
return
if (values_id == self.values.values_id):
(await self.async_remove(force_remove=True))
|
def get_event_channel(event_channel_name: Optional[str]=None, partner_namespace_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetEventChannelResult:
"\n Event Channel.\n API Version: 2020-04-01-preview.\n\n\n :param str event_channel_name: Name of the event channel.\n :param str partner_namespace_name: Name of the partner namespace.\n :param str resource_group_name: The name of the resource group within the user's subscription.\n "
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(destination=__ret__.destination, expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc, filter=__ret__.filter, id=__ret__.id, name=__ret__.name, partner_topic_friendly_description=__ret__.partner_topic_friendly_description, partner_topic_readiness_state=__ret__.partner_topic_readiness_state, provisioning_state=__ret__.provisioning_state, source=__ret__.source, type=__ret__.type)
| -6,736,609,495,564,944,000
|
Event Channel.
API Version: 2020-04-01-preview.
:param str event_channel_name: Name of the event channel.
:param str partner_namespace_name: Name of the partner namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
get_event_channel
|
pulumi/pulumi-azure-nextgen
|
python
|
def get_event_channel(event_channel_name: Optional[str]=None, partner_namespace_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetEventChannelResult:
"\n Event Channel.\n API Version: 2020-04-01-preview.\n\n\n :param str event_channel_name: Name of the event channel.\n :param str partner_namespace_name: Name of the partner namespace.\n :param str resource_group_name: The name of the resource group within the user's subscription.\n "
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(destination=__ret__.destination, expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc, filter=__ret__.filter, id=__ret__.id, name=__ret__.name, partner_topic_friendly_description=__ret__.partner_topic_friendly_description, partner_topic_readiness_state=__ret__.partner_topic_readiness_state, provisioning_state=__ret__.provisioning_state, source=__ret__.source, type=__ret__.type)
|
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
'\n Represents the destination of an event channel.\n '
return pulumi.get(self, 'destination')
| -4,506,886,846,389,863,400
|
Represents the destination of an event channel.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
destination
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
'\n \n '
return pulumi.get(self, 'destination')
|
@property
@pulumi.getter(name='expirationTimeIfNotActivatedUtc')
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
'\n Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,\n the event channel and corresponding partner topic are deleted.\n '
return pulumi.get(self, 'expiration_time_if_not_activated_utc')
| 1,019,862,384,949,718,000
|
Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,
the event channel and corresponding partner topic are deleted.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
expiration_time_if_not_activated_utc
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter(name='expirationTimeIfNotActivatedUtc')
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
'\n Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,\n the event channel and corresponding partner topic are deleted.\n '
return pulumi.get(self, 'expiration_time_if_not_activated_utc')
|
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
'\n Information about the filter for the event channel.\n '
return pulumi.get(self, 'filter')
| 4,082,275,425,832,576,000
|
Information about the filter for the event channel.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
filter
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
'\n \n '
return pulumi.get(self, 'filter')
|
@property
@pulumi.getter
def id(self) -> str:
'\n Fully qualified identifier of the resource.\n '
return pulumi.get(self, 'id')
| -797,042,907,259,989,800
|
Fully qualified identifier of the resource.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
id
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')
|
@property
@pulumi.getter
def name(self) -> str:
'\n Name of the resource\n '
return pulumi.get(self, 'name')
| 8,397,791,671,501,956,000
|
Name of the resource
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
name
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name')
|
@property
@pulumi.getter(name='partnerTopicFriendlyDescription')
def partner_topic_friendly_description(self) -> Optional[str]:
'\n Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.\n This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.\n '
return pulumi.get(self, 'partner_topic_friendly_description')
| -2,828,560,881,684,123,600
|
Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.
This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
partner_topic_friendly_description
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter(name='partnerTopicFriendlyDescription')
def partner_topic_friendly_description(self) -> Optional[str]:
'\n Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.\n This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.\n '
return pulumi.get(self, 'partner_topic_friendly_description')
|
@property
@pulumi.getter(name='partnerTopicReadinessState')
def partner_topic_readiness_state(self) -> str:
'\n The readiness state of the corresponding partner topic.\n '
return pulumi.get(self, 'partner_topic_readiness_state')
| -6,023,202,783,094,649,000
|
The readiness state of the corresponding partner topic.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
partner_topic_readiness_state
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter(name='partnerTopicReadinessState')
def partner_topic_readiness_state(self) -> str:
'\n \n '
return pulumi.get(self, 'partner_topic_readiness_state')
|
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n Provisioning state of the event channel.\n '
return pulumi.get(self, 'provisioning_state')
| -8,108,073,650,365,843,000
|
Provisioning state of the event channel.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
provisioning_state
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n \n '
return pulumi.get(self, 'provisioning_state')
|
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
"\n Source of the event channel. This represents a unique resource in the partner's resource model.\n "
return pulumi.get(self, 'source')
| 6,002,450,563,591,865,000
|
Source of the event channel. This represents a unique resource in the partner's resource model.
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
source
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
"\n \n "
return pulumi.get(self, 'source')
|
@property
@pulumi.getter
def type(self) -> str:
'\n Type of the resource\n '
return pulumi.get(self, 'type')
| -7,565,207,814,351,393,000
|
Type of the resource
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
type
|
pulumi/pulumi-azure-nextgen
|
python
|
@property
@pulumi.getter
def type(self) -> str:
'\n \n '
return pulumi.get(self, 'type')
|
def _ExtendDefaultOptions(**kwargs):
'Extend DEFAULT_OPTIONS with keys/values in kwargs.'
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
| 730,533,219,309,188,200
|
Extend DEFAULT_OPTIONS with keys/values in kwargs.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
_ExtendDefaultOptions
|
FLOSSBoxIN/src
|
python
|
def _ExtendDefaultOptions(**kwargs):
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
|
def _ExtendDefaultConfig(**kwargs):
'Extend DEFAULT_CONFIG with keys/values in kwargs.'
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
| 1,578,621,462,235,720,400
|
Extend DEFAULT_CONFIG with keys/values in kwargs.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
_ExtendDefaultConfig
|
FLOSSBoxIN/src
|
python
|
def _ExtendDefaultConfig(**kwargs):
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
|
def _NewBuilderRun(options=None, config=None):
'Create a BuilderRun objection from options and config values.\n\n Args:\n options: Specify options or default to DEFAULT_OPTIONS.\n config: Specify build config or default to DEFAULT_CONFIG.\n\n Returns:\n BuilderRun object.\n '
manager = parallel_unittest.FakeMultiprocessManager()
options = (options or DEFAULT_OPTIONS)
config = (config or DEFAULT_CONFIG)
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, manager)
| -4,143,234,409,975,037,400
|
Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
_NewBuilderRun
|
FLOSSBoxIN/src
|
python
|
def _NewBuilderRun(options=None, config=None):
'Create a BuilderRun objection from options and config values.\n\n Args:\n options: Specify options or default to DEFAULT_OPTIONS.\n config: Specify build config or default to DEFAULT_CONFIG.\n\n Returns:\n BuilderRun object.\n '
manager = parallel_unittest.FakeMultiprocessManager()
options = (options or DEFAULT_OPTIONS)
config = (config or DEFAULT_CONFIG)
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, manager)
|
def _GetBaseUploadURI(self, *args, **kwargs):
'Test GetBaseUploadURI with archive_base and no bot_id.'
return archive_lib.GetBaseUploadURI(self.cfg, *args, **kwargs)
| -7,643,745,006,367,143,000
|
Test GetBaseUploadURI with archive_base and no bot_id.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
_GetBaseUploadURI
|
FLOSSBoxIN/src
|
python
|
def _GetBaseUploadURI(self, *args, **kwargs):
return archive_lib.GetBaseUploadURI(self.cfg, *args, **kwargs)
|
def testDefaultGSPath(self):
'Test GetBaseUploadURI with default gs_path value in config.'
self.cfg = _ExtendDefaultConfig(gs_path=config_lib.GS_PATH_DEFAULT)
expected_result = ('%s/%s' % (config_lib.GetConfig().params.ARCHIVE_URL, DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = ('%s/%s' % (config_lib.GetConfig().params.ARCHIVE_URL, self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
| 4,054,763,538,668,536,000
|
Test GetBaseUploadURI with default gs_path value in config.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
testDefaultGSPath
|
FLOSSBoxIN/src
|
python
|
def testDefaultGSPath(self):
self.cfg = _ExtendDefaultConfig(gs_path=config_lib.GS_PATH_DEFAULT)
expected_result = ('%s/%s' % (config_lib.GetConfig().params.ARCHIVE_URL, DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = ('%s/%s' % (config_lib.GetConfig().params.ARCHIVE_URL, self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
|
def testOverrideGSPath(self):
'Test GetBaseUploadURI with default gs_path value in config.'
self.cfg = _ExtendDefaultConfig(gs_path='gs://funkytown/foo/bar')
expected_result = 'gs://funkytown/foo/bar/TheCoolBot'
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = 'gs://funkytown/foo/bar/TheNewBotId'
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
| -7,068,031,788,268,119,000
|
Test GetBaseUploadURI with default gs_path value in config.
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
testOverrideGSPath
|
FLOSSBoxIN/src
|
python
|
def testOverrideGSPath(self):
self.cfg = _ExtendDefaultConfig(gs_path='gs://funkytown/foo/bar')
expected_result = 'gs://funkytown/foo/bar/TheCoolBot'
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = 'gs://funkytown/foo/bar/TheNewBotId'
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
|
def app(argv):
'Execute in system shell\n '
if (len(argv) < 2):
print(('Usage: python %s <command> <table> [<column1>=<value1> [...]]\n <command>: print, insert, delete, or edit\n <table>: barrier, shape, or josephson\n' % argv[0]))
sys.exit(0)
db = SVJJDBInteract()
funcname = argv[1]
table = argv[2]
kwargs = args2kwargs(argv[3:])
getattr(db, funcname)(table, **kwargs)
db.close()
| 4,346,635,271,438,776,300
|
Execute in system shell
|
cryomem/cmtools/lib/old/sql_svjj_old2.py
|
app
|
bebaek/cryomem
|
python
|
def app(argv):
'\n '
if (len(argv) < 2):
print(('Usage: python %s <command> <table> [<column1>=<value1> [...]]\n <command>: print, insert, delete, or edit\n <table>: barrier, shape, or josephson\n' % argv[0]))
sys.exit(0)
db = SVJJDBInteract()
funcname = argv[1]
table = argv[2]
kwargs = args2kwargs(argv[3:])
getattr(db, funcname)(table, **kwargs)
db.close()
|
def _has_required_moto():
'\n Returns True or False depending on if ``moto`` is installed and at the correct version,\n depending on what version of Python is running these tests.\n '
if (not HAS_MOTO):
return False
else:
moto_version = salt.utils.versions.LooseVersion(pkg_resources.get_distribution('moto').version)
if (moto_version < salt.utils.versions.LooseVersion(required_moto)):
return False
elif (moto_version < salt.utils.versions.LooseVersion(required_moto_py3)):
return False
return True
| 6,249,751,324,489,795,000
|
Returns True or False depending on if ``moto`` is installed and at the correct version,
depending on what version of Python is running these tests.
|
tests/unit/modules/test_boto_elb.py
|
_has_required_moto
|
Anujsahu902/salt
|
python
|
def _has_required_moto():
'\n Returns True or False depending on if ``moto`` is installed and at the correct version,\n depending on what version of Python is running these tests.\n '
if (not HAS_MOTO):
return False
else:
moto_version = salt.utils.versions.LooseVersion(pkg_resources.get_distribution('moto').version)
if (moto_version < salt.utils.versions.LooseVersion(required_moto)):
return False
elif (moto_version < salt.utils.versions.LooseVersion(required_moto_py3)):
return False
return True
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_result_true(self):
'\n tests that given a valid instance id and valid ELB that\n register_instances returns True.\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
register_result = boto_elb.register_instances(elb_name, reservations.instances[0].id, **conn_parameters)
self.assertEqual(True, register_result)
| 2,777,180,147,702,114,000
|
tests that given a valid instance id and valid ELB that
register_instances returns True.
|
tests/unit/modules/test_boto_elb.py
|
test_register_instances_valid_id_result_true
|
Anujsahu902/salt
|
python
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_result_true(self):
'\n tests that given a valid instance id and valid ELB that\n register_instances returns True.\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
register_result = boto_elb.register_instances(elb_name, reservations.instances[0].id, **conn_parameters)
self.assertEqual(True, register_result)
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_string(self):
'\n tests that given a string containing a instance id and valid ELB that\n register_instances adds the given instance to an ELB\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
boto_elb.register_instances(elb_name, reservations.instances[0].id, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
registered_instance_ids = [instance.id for instance in load_balancer_refreshed.instances]
log.debug(load_balancer_refreshed.instances)
self.assertEqual([reservations.instances[0].id], registered_instance_ids)
| -6,257,057,722,977,847,000
|
tests that given a string containing a instance id and valid ELB that
register_instances adds the given instance to an ELB
|
tests/unit/modules/test_boto_elb.py
|
test_register_instances_valid_id_string
|
Anujsahu902/salt
|
python
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_register_instances_valid_id_string(self):
'\n tests that given a string containing a instance id and valid ELB that\n register_instances adds the given instance to an ELB\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
boto_elb.register_instances(elb_name, reservations.instances[0].id, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
registered_instance_ids = [instance.id for instance in load_balancer_refreshed.instances]
log.debug(load_balancer_refreshed.instances)
self.assertEqual([reservations.instances[0].id], registered_instance_ids)
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_result_true(self):
'\n tests that given an valid id the boto_elb deregister_instances method\n removes exactly one of a number of ELB registered instances\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdResult'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
load_balancer.register_instances(reservations.instances[0].id)
deregister_result = boto_elb.deregister_instances(elb_name, reservations.instances[0].id, **conn_parameters)
self.assertEqual(True, deregister_result)
| -5,711,186,435,208,022,000
|
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
|
tests/unit/modules/test_boto_elb.py
|
test_deregister_instances_valid_id_result_true
|
Anujsahu902/salt
|
python
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_result_true(self):
'\n tests that given an valid id the boto_elb deregister_instances method\n removes exactly one of a number of ELB registered instances\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdResult'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
load_balancer.register_instances(reservations.instances[0].id)
deregister_result = boto_elb.deregister_instances(elb_name, reservations.instances[0].id, **conn_parameters)
self.assertEqual(True, deregister_result)
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_string(self):
'\n tests that given an valid id the boto_elb deregister_instances method\n removes exactly one of a number of ELB registered instances\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdString'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=2)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
boto_elb.deregister_instances(elb_name, reservations.instances[0].id, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
expected_instances = deepcopy(all_instance_ids)
expected_instances.remove(reservations.instances[0].id)
actual_instances = [instance.id for instance in load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
| 8,067,242,356,702,884,000
|
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
|
tests/unit/modules/test_boto_elb.py
|
test_deregister_instances_valid_id_string
|
Anujsahu902/salt
|
python
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_string(self):
'\n tests that given an valid id the boto_elb deregister_instances method\n removes exactly one of a number of ELB registered instances\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdString'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=2)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
boto_elb.deregister_instances(elb_name, reservations.instances[0].id, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
expected_instances = deepcopy(all_instance_ids)
expected_instances.remove(reservations.instances[0].id)
actual_instances = [instance.id for instance in load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_list(self):
'\n tests that given an valid ids in the form of a list that the boto_elb\n deregister_instances all members of the given list\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdList'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=3)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
deregister_instances = [instance.id for instance in reservations.instances[:(- 1)]]
expected_instances = [reservations.instances[(- 1)].id]
boto_elb.deregister_instances(elb_name, deregister_instances, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
actual_instances = [instance.id for instance in load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
| 5,285,540,475,398,629,000
|
tests that given an valid ids in the form of a list that the boto_elb
deregister_instances all members of the given list
|
tests/unit/modules/test_boto_elb.py
|
test_deregister_instances_valid_id_list
|
Anujsahu902/salt
|
python
|
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances_valid_id_list(self):
'\n tests that given an valid ids in the form of a list that the boto_elb\n deregister_instances all members of the given list\n '
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdList'
load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=3)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
deregister_instances = [instance.id for instance in reservations.instances[:(- 1)]]
expected_instances = [reservations.instances[(- 1)].id]
boto_elb.deregister_instances(elb_name, deregister_instances, **conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
actual_instances = [instance.id for instance in load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
|
def mock_ec2_deprecated(self):
'\n if the mock_ec2_deprecated function is not available due to import failure\n this replaces the decorated function with stub_function.\n Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator\n without a "NameError: name \'mock_ec2_deprecated\' is not defined" error.\n '
def stub_function(self):
pass
return stub_function
| 1,059,726,787,215,719,600
|
if the mock_ec2_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator
without a "NameError: name 'mock_ec2_deprecated' is not defined" error.
|
tests/unit/modules/test_boto_elb.py
|
mock_ec2_deprecated
|
Anujsahu902/salt
|
python
|
def mock_ec2_deprecated(self):
'\n if the mock_ec2_deprecated function is not available due to import failure\n this replaces the decorated function with stub_function.\n Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator\n without a "NameError: name \'mock_ec2_deprecated\' is not defined" error.\n '
def stub_function(self):
pass
return stub_function
|
def mock_elb_deprecated(self):
'\n if the mock_elb_deprecated function is not available due to import failure\n this replaces the decorated function with stub_function.\n Allows boto_elb unit tests to use the @mock_elb_deprecated decorator\n without a "NameError: name \'mock_elb_deprecated\' is not defined" error.\n '
def stub_function(self):
pass
return stub_function
| 8,916,713,692,544,773,000
|
if the mock_elb_deprecated function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_elb unit tests to use the @mock_elb_deprecated decorator
without a "NameError: name 'mock_elb_deprecated' is not defined" error.
|
tests/unit/modules/test_boto_elb.py
|
mock_elb_deprecated
|
Anujsahu902/salt
|
python
|
def mock_elb_deprecated(self):
'\n if the mock_elb_deprecated function is not available due to import failure\n this replaces the decorated function with stub_function.\n Allows boto_elb unit tests to use the @mock_elb_deprecated decorator\n without a "NameError: name \'mock_elb_deprecated\' is not defined" error.\n '
def stub_function(self):
pass
return stub_function
|
def trim(self, t=None, run=None):
'\n Trims all runs of the Evaluator to the length specified by the threshold value t.\n\n @param t: Threshold parameter or number of top-k documents to be considered.\n @param run: If run is not None, only the provided run will be trimmed.\n '
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
| -3,830,407,540,122,297,000
|
Trims all runs of the Evaluator to the length specified by the threshold value t.
@param t: Threshold parameter or number of top-k documents to be considered.
@param run: If run is not None, only the provided run will be trimmed.
|
repro_eval/Evaluator.py
|
trim
|
irgroup/repro_eval
|
python
|
def trim(self, t=None, run=None):
'\n Trims all runs of the Evaluator to the length specified by the threshold value t.\n\n @param t: Threshold parameter or number of top-k documents to be considered.\n @param run: If run is not None, only the provided run will be trimmed.\n '
if run:
run = break_ties(run)
if t:
trim(run, thresh=t)
else:
trim(run)
return
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
if t:
trim(self.run_b_orig, thresh=t)
else:
trim(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
if t:
trim(self.run_a_orig, thresh=t)
else:
trim(self.run_a_orig)
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
if t:
trim(self.run_b_rep, thresh=t)
else:
trim(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
if t:
trim(self.run_a_rep, thresh=t)
else:
trim(self.run_a_rep)
|
def evaluate(self, run=None):
'\n Evaluates the original baseline and advanced run if available.\n\n @param run: Reproduced or replicated run that will be evaluated.\n '
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
| -2,836,894,970,880,344,000
|
Evaluates the original baseline and advanced run if available.
@param run: Reproduced or replicated run that will be evaluated.
|
repro_eval/Evaluator.py
|
evaluate
|
irgroup/repro_eval
|
python
|
def evaluate(self, run=None):
'\n Evaluates the original baseline and advanced run if available.\n\n @param run: Reproduced or replicated run that will be evaluated.\n '
if self.run_b_orig:
self.run_b_orig = break_ties(self.run_b_orig)
self.run_b_orig_score = self.rel_eval.evaluate(self.run_b_orig)
if self.run_a_orig:
self.run_a_orig = break_ties(self.run_a_orig)
self.run_a_orig_score = self.rel_eval.evaluate(self.run_a_orig)
|
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Effect Ratio (ER) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n The ER value is determined by the ratio between the mean improvements\n of the original and reproduced/replicated experiments.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary containing the ER values for the specified run combination.\n '
if print_feedback:
print('Determining Effect Ratio (ER)')
if (self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path):
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = (self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep))
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = (self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep))
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score):
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score):
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
| -8,725,338,648,397,147,000
|
Determines the Effect Ratio (ER) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The ER value is determined by the ratio between the mean improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the ER values for the specified run combination.
|
repro_eval/Evaluator.py
|
er
|
irgroup/repro_eval
|
python
|
def er(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Effect Ratio (ER) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n The ER value is determined by the ratio between the mean improvements\n of the original and reproduced/replicated experiments.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary containing the ER values for the specified run combination.\n '
if print_feedback:
print('Determining Effect Ratio (ER)')
if (self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path):
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = (self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep))
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = (self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep))
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score):
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score):
return ER(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
|
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Delta Relative Improvement (DeltaRI) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n The DeltaRI value is determined by the difference between the relative improvements\n of the original and reproduced/replicated experiments.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary containing the DRI values for the specified run combination.\n '
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if (self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path):
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = (self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep))
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = (self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep))
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score):
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score):
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
| -6,920,642,213,305,712,000
|
Determines the Delta Relative Improvement (DeltaRI) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
The DeltaRI value is determined by the difference between the relative improvements
of the original and reproduced/replicated experiments.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary containing the DRI values for the specified run combination.
|
repro_eval/Evaluator.py
|
dri
|
irgroup/repro_eval
|
python
|
def dri(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Delta Relative Improvement (DeltaRI) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n The DeltaRI value is determined by the difference between the relative improvements\n of the original and reproduced/replicated experiments.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary containing the DRI values for the specified run combination.\n '
if print_feedback:
print('Determining Delta Relative Improvement (DRI)')
if (self.run_b_orig_score and self.run_a_orig_score and run_b_path and run_a_path):
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = (self.rel_eval_rpl.evaluate(run_b_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_b_rep))
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = (self.rel_eval_rpl.evaluate(run_a_rep) if hasattr(self, 'rel_eval_rpl') else self.rel_eval.evaluate(run_a_rep))
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_rep_score, rep_score_a=run_a_rep_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and run_b_score and run_a_score):
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=run_b_score, rep_score_a=run_a_score, pbar=print_feedback)
if (self.run_b_orig_score and self.run_a_orig_score and self.run_b_rep_score and self.run_a_rep_score):
return deltaRI(orig_score_b=self.run_b_orig_score, orig_score_a=self.run_a_orig_score, rep_score_b=self.run_b_rep_score, rep_score_a=self.run_a_rep_score, pbar=print_feedback)
else:
print(ERR_MSG)
|
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
'\n Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param rpd: Boolean indicating if the evaluated runs are reproduced.\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if (self.run_b_orig_score and (self.run_b_rep_score or run_b_score)):
if (run_b_score and run_a_score):
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback), 'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback), 'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
| 4,213,860,720,074,198,000
|
Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param rpd: Boolean indicating if the evaluated runs are reproduced.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
|
repro_eval/Evaluator.py
|
_ttest
|
irgroup/repro_eval
|
python
|
def _ttest(self, rpd=True, run_b_score=None, run_a_score=None, print_feedback=False):
'\n Conducts either a paired (reproducibility) or unpaired (replicability) two-sided t-test according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param rpd: Boolean indicating if the evaluated runs are reproduced.\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if (self.run_b_orig_score and (self.run_b_rep_score or run_b_score)):
if (run_b_score and run_a_score):
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback), 'advanced': ttest(self.run_a_orig_score, run_a_score, rpd=rpd, pbar=print_feedback)}
if run_b_score:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, run_b_score, rpd=rpd, pbar=print_feedback)}
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining p-values of t-test for baseline and advanced run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback), 'advanced': ttest(self.run_a_orig_score, self.run_a_rep_score, rpd=rpd, pbar=print_feedback)}
else:
if print_feedback:
print('Determining p-values of t-test for baseline run.')
return {'baseline': ttest(self.run_b_orig_score, self.run_b_rep_score, rpd=rpd, pbar=print_feedback)}
else:
print(ERR_MSG)
|
def evaluate(self, run=None):
'\n Evaluates the scores of the original and reproduced baseline and advanced runs.\n If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding\n scores is returned.\n @param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will\n be used instead.\n @return: If run is specified, a dictionary with the corresponding scores is returned.\n '
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
| 2,651,078,559,028,764,000
|
Evaluates the scores of the original and reproduced baseline and advanced runs.
If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
|
repro_eval/Evaluator.py
|
evaluate
|
irgroup/repro_eval
|
python
|
def evaluate(self, run=None):
'\n Evaluates the scores of the original and reproduced baseline and advanced runs.\n If a (reproduced) run is provided only this one will be evaluated and a dictionary with the corresponding\n scores is returned.\n @param run: A reproduced run. If not specified, the original and reproduced runs of the the RpdEvaluator will\n be used instead.\n @return: If run is specified, a dictionary with the corresponding scores is returned.\n '
if run:
return self.rel_eval.evaluate(run)
super(RpdEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval.evaluate(self.run_a_rep)
|
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"\n Determines Kendall's tau Union (KTU) between the original and reproduced document orderings\n according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_rep: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_rep: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.\n "
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if (self.run_b_orig and run_b_rep):
if (self.run_a_orig and run_a_rep):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if (self.run_b_orig and self.run_b_rep):
if (self.run_a_orig and self.run_a_rep):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
| 7,629,346,488,053,989,000
|
Determines Kendall's tau Union (KTU) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.
|
repro_eval/Evaluator.py
|
ktau_union
|
irgroup/repro_eval
|
python
|
def ktau_union(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False):
"\n Determines Kendall's tau Union (KTU) between the original and reproduced document orderings\n according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_rep: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_rep: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with KTU values that compare the document orderings of the original and reproduced runs.\n "
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if (self.run_b_orig and run_b_rep):
if (self.run_a_orig and run_a_rep):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, run_b_rep, pbar=print_feedback)}
if (self.run_b_orig and self.run_b_rep):
if (self.run_a_orig and self.run_a_rep):
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline and advanced run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback), 'advanced': ktu(self.run_a_orig, self.run_a_rep, pbar=print_feedback)}
else:
if print_feedback:
print("Determining Kendall's tau Union (KTU) for baseline run.")
return {'baseline': ktu(self.run_b_orig, self.run_b_rep, pbar=print_feedback)}
else:
print(ERR_MSG)
|
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
'\n Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings\n according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_rep: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_rep: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.\n See also: https://github.com/claclark/Compatibility\n @return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if (self.run_b_orig and run_b_rep):
if (self.run_a_orig and run_a_rep):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if (self.run_b_orig and self.run_b_rep):
if (self.run_a_orig and self.run_a_rep):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
| -7,116,054,795,594,959,000
|
Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings
according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_rep: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_rep: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.
See also: https://github.com/claclark/Compatibility
@return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.
|
repro_eval/Evaluator.py
|
rbo
|
irgroup/repro_eval
|
python
|
def rbo(self, run_b_rep=None, run_a_rep=None, run_b_path=None, run_a_path=None, print_feedback=False, misinfo=True):
'\n Determines the Rank-Biased Overlap (RBO) between the original and reproduced document orderings\n according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_rep: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_rep: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @param misinfo: Use the RBO implementation that is also used in the TREC Health Misinformation Track.\n See also: https://github.com/claclark/Compatibility\n @return: Dictionary with RBO values that compare the document orderings of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if (self.run_b_orig and run_b_rep):
if (self.run_a_orig and run_a_rep):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
return {'baseline': RBO(self.run_b_orig, run_b_rep, pbar=print_feedback, misinfo=misinfo)}
if (self.run_b_orig and self.run_b_rep):
if (self.run_a_orig and self.run_a_rep):
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline and advanced run.')
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo), 'advanced': RBO(self.run_a_orig, self.run_a_rep, pbar=print_feedback, misinfo=misinfo)}
else:
if print_feedback:
print('Determining Rank-biased Overlap (RBO) for baseline run.')
return {'baseline': RBO(self.run_b_orig, self.run_b_rep, pbar=print_feedback, misinfo=misinfo)}
else:
print(ERR_MSG)
|
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Root Mean Square Error (RMSE) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with RMSE values that measure the closeness\n between the topics scores of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if (self.run_b_orig_score and run_b_score):
if (self.run_a_orig_score and run_a_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if (self.run_b_orig_score and self.run_b_rep_score):
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
| 6,958,954,102,970,656,000
|
Determines the Root Mean Square Error (RMSE) according to the following paper:
Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.
How to Measure the Reproducibility of System-oriented IR Experiments.
Proceedings of SIGIR, pages 349-358, 2020.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with RMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
|
repro_eval/Evaluator.py
|
rmse
|
irgroup/repro_eval
|
python
|
def rmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the Root Mean Square Error (RMSE) according to the following paper:\n Timo Breuer, Nicola Ferro, Norbert Fuhr, Maria Maistro, Tetsuya Sakai, Philipp Schaer, Ian Soboroff.\n How to Measure the Reproducibility of System-oriented IR Experiments.\n Proceedings of SIGIR, pages 349-358, 2020.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with RMSE values that measure the closeness\n between the topics scores of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': RMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if (self.run_b_orig_score and run_b_score):
if (self.run_a_orig_score and run_a_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': RMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if (self.run_b_orig_score and self.run_b_rep_score):
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback), 'advanced': RMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': RMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
|
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the normalized Root Mean Square Error (RMSE).\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with nRMSE values that measure the closeness\n between the topics scores of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if (self.run_b_orig_score and run_b_score):
if (self.run_a_orig_score and run_a_score):
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if (self.run_b_orig_score and self.run_b_rep_score):
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
| 7,757,995,920,480,574,000
|
Determines the normalized Root Mean Square Error (RMSE).
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with nRMSE values that measure the closeness
between the topics scores of the original and reproduced runs.
|
repro_eval/Evaluator.py
|
nrmse
|
irgroup/repro_eval
|
python
|
def nrmse(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Determines the normalized Root Mean Square Error (RMSE).\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with nRMSE values that measure the closeness\n between the topics scores of the original and reproduced runs.\n '
if (self.run_b_orig and run_b_path):
if (self.run_a_orig and run_a_path):
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.')
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return {'baseline': nRMSE(self.run_b_orig_score, run_b_rep_score, pbar=print_feedback)}
if (self.run_b_orig_score and run_b_score):
if (self.run_a_orig_score and run_a_score):
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, run_a_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': nRMSE(self.run_b_orig_score, run_b_score, pbar=print_feedback)}
if (self.run_b_orig_score and self.run_b_rep_score):
if (self.run_a_orig_score and self.run_a_rep_score):
if print_feedback:
print('Determining Root Mean Square Error (RMSE) for baseline and advanced run.')
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback), 'advanced': nRMSE(self.run_a_orig_score, self.run_a_rep_score, pbar=print_feedback)}
else:
if print_feedback:
print('Determining normalized Root Mean Square Error (RMSE) for baseline run.')
return {'baseline': nRMSE(self.run_b_orig_score, self.run_b_rep_score, pbar=print_feedback)}
else:
print(ERR_MSG)
|
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection\n as in the original experiment.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
| -5,172,674,030,073,816,000
|
Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection
as in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another reproduced baseline run,
if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.
@param run_a_path: Path to another reproduced advanced run,
if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
|
repro_eval/Evaluator.py
|
ttest
|
irgroup/repro_eval
|
python
|
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Conducts a paired two-tailed t-test for reproduced runs that were derived from the same test collection\n as in the original experiment.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another reproduced baseline run,\n if not provided the reproduced baseline run of the RpdEvaluator object will be used instead.\n @param run_a_path: Path to another reproduced advanced run,\n if not provided the reproduced advanced run of the RpdEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval.evaluate(run_a_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval.evaluate(run_b_rep)
return self._ttest(run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def evaluate(self, run=None):
'\n Evaluates the scores of the original and replicated baseline and advanced runs.\n If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding\n scores is returned.\n @param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will\n be used instead.\n @return: If run is specified, a dictionary with the corresponding scores is returned.\n '
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
| -1,068,061,838,797,665,700
|
Evaluates the scores of the original and replicated baseline and advanced runs.
If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding
scores is returned.
@param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will
be used instead.
@return: If run is specified, a dictionary with the corresponding scores is returned.
|
repro_eval/Evaluator.py
|
evaluate
|
irgroup/repro_eval
|
python
|
def evaluate(self, run=None):
'\n Evaluates the scores of the original and replicated baseline and advanced runs.\n If a (replicated) run is provided only this one will be evaluated and a dictionary with the corresponding\n scores is returned.\n @param run: A replicated run. If not specified, the original and replicated runs of the the RplEvaluator will\n be used instead.\n @return: If run is specified, a dictionary with the corresponding scores is returned.\n '
if run:
return self.rel_eval_rpl.evaluate(run)
super(RplEvaluator, self).evaluate()
if self.run_b_rep:
self.run_b_rep = break_ties(self.run_b_rep)
self.run_b_rep_score = self.rel_eval_rpl.evaluate(self.run_b_rep)
if self.run_a_rep:
self.run_a_rep = break_ties(self.run_a_rep)
self.run_a_rep_score = self.rel_eval_rpl.evaluate(self.run_a_rep)
|
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection\n not used in the original experiment.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another replicated baseline run,\n if not provided the replicated baseline run of the RplEvaluator object will be used instead.\n @param run_a_path: Path to another replicated advanced run,\n if not provided the replicated advanced run of the RplEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
| 3,054,882,994,089,691,000
|
Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection
not used in the original experiment.
@param run_b_score: Scores of the baseline run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_a_score: Scores of the advanced run,
if not provided the scores of the RpdEvaluator object will be used instead.
@param run_b_path: Path to another replicated baseline run,
if not provided the replicated baseline run of the RplEvaluator object will be used instead.
@param run_a_path: Path to another replicated advanced run,
if not provided the replicated advanced run of the RplEvaluator object will be used instead.
@param print_feedback: Boolean value indicating if feedback on progress should be printed.
@return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.
|
repro_eval/Evaluator.py
|
ttest
|
irgroup/repro_eval
|
python
|
def ttest(self, run_b_score=None, run_a_score=None, run_b_path=None, run_a_path=None, print_feedback=False):
'\n Conducts an un-paired two-tailed t-test for replicated runs that were derived from a test collection\n not used in the original experiment.\n\n @param run_b_score: Scores of the baseline run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_a_score: Scores of the advanced run,\n if not provided the scores of the RpdEvaluator object will be used instead.\n @param run_b_path: Path to another replicated baseline run,\n if not provided the replicated baseline run of the RplEvaluator object will be used instead.\n @param run_a_path: Path to another replicated advanced run,\n if not provided the replicated advanced run of the RplEvaluator object will be used instead.\n @param print_feedback: Boolean value indicating if feedback on progress should be printed.\n @return: Dictionary with p-values that compare the score distributions of the baseline and advanced run.\n '
if run_b_path:
if run_a_path:
with open(run_b_path, 'r') as b_run, open(run_a_path, 'r') as a_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
run_a_rep = pytrec_eval.parse_run(a_run)
run_a_rep = {t: run_a_rep[t] for t in sorted(run_a_rep)}
run_a_rep_score = self.rel_eval_rpl.evaluate(run_a_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=run_a_rep_score, print_feedback=print_feedback)
else:
with open(run_b_path, 'r') as b_run:
run_b_rep = pytrec_eval.parse_run(b_run)
run_b_rep = {t: run_b_rep[t] for t in sorted(run_b_rep)}
run_b_rep_score = self.rel_eval_rpl.evaluate(run_b_rep)
return self._ttest(rpd=False, run_b_score=run_b_rep_score, run_a_score=None, print_feedback=print_feedback)
return self._ttest(rpd=False, run_b_score=run_b_score, run_a_score=run_a_score, print_feedback=print_feedback)
|
def _is_visible(idx_row, idx_col, lengths):
'\n Index -> {(idx_row, idx_col): bool}).\n '
return ((idx_col, idx_row) in lengths)
| 2,937,382,027,686,599,000
|
Index -> {(idx_row, idx_col): bool}).
|
pandas/io/formats/style.py
|
_is_visible
|
harunpehlivan/pandas
|
python
|
def _is_visible(idx_row, idx_col, lengths):
'\n \n '
return ((idx_col, idx_row) in lengths)
|
def _get_level_lengths(index, hidden_elements=None):
'\n Given an index, find the level length for each element.\n\n Optional argument is a list of index positions which\n should not be visible.\n\n Result is a dictionary of (level, inital_position): span\n '
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if (hidden_elements is None):
hidden_elements = []
lengths = {}
if (index.nlevels == 1):
for (i, value) in enumerate(levels):
if (i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for (i, lvl) in enumerate(levels):
for (j, row) in enumerate(lvl):
if (not get_option('display.multi_sparse')):
lengths[(i, j)] = 1
elif ((row != sentinel) and (j not in hidden_elements)):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
last_label = j
lengths[(i, last_label)] = 0
elif (j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {element: length for (element, length) in lengths.items() if (length >= 1)}
return non_zero_lengths
| -7,061,920,920,471,018,000
|
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
|
pandas/io/formats/style.py
|
_get_level_lengths
|
harunpehlivan/pandas
|
python
|
def _get_level_lengths(index, hidden_elements=None):
'\n Given an index, find the level length for each element.\n\n Optional argument is a list of index positions which\n should not be visible.\n\n Result is a dictionary of (level, inital_position): span\n '
sentinel = object()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if (hidden_elements is None):
hidden_elements = []
lengths = {}
if (index.nlevels == 1):
for (i, value) in enumerate(levels):
if (i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for (i, lvl) in enumerate(levels):
for (j, row) in enumerate(lvl):
if (not get_option('display.multi_sparse')):
lengths[(i, j)] = 1
elif ((row != sentinel) and (j not in hidden_elements)):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
last_label = j
lengths[(i, last_label)] = 0
elif (j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {element: length for (element, length) in lengths.items() if (length >= 1)}
return non_zero_lengths
|
def _repr_html_(self):
'\n Hooks into Jupyter notebook rich display system.\n '
return self.render()
| -7,018,489,556,849,788,000
|
Hooks into Jupyter notebook rich display system.
|
pandas/io/formats/style.py
|
_repr_html_
|
harunpehlivan/pandas
|
python
|
def _repr_html_(self):
'\n \n '
return self.render()
|
def _translate(self):
'\n Convert the DataFrame in `self.data` and the attrs from `_build_styles`\n into a dictionary of {head, body, uuid, cellstyle}.\n '
table_styles = (self.table_styles or [])
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = (self.uuid or str(uuid1()).replace('-', '_'))
ROW_HEADING_CLASS = 'row_heading'
COL_HEADING_CLASS = 'col_heading'
INDEX_NAME_CLASS = 'index_name'
DATA_CLASS = 'data'
BLANK_CLASS = 'blank'
BLANK_VALUE = ''
def format_attr(pair):
return '{key}={value}'.format(**pair)
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if (n_rlvls == 1):
rlabels = [[x] for x in rlabels]
if (n_clvls == 1):
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = ([{'type': 'th', 'value': BLANK_VALUE, 'display_value': BLANK_VALUE, 'is_visible': (not hidden_index), 'class': ' '.join([BLANK_CLASS])}] * (n_rlvls - 1))
name = self.data.columns.names[r]
cs = [(BLANK_CLASS if (name is None) else INDEX_NAME_CLASS), 'level{lvl}'.format(lvl=r)]
name = (BLANK_VALUE if (name is None) else name)
row_es.append({'type': 'th', 'value': name, 'display_value': name, 'class': ' '.join(cs), 'is_visible': (not hidden_index)})
if clabels:
for (c, value) in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, 'level{lvl}'.format(lvl=r), 'col{col}'.format(col=c)]
cs.extend(cell_context.get('col_headings', {}).get(r, {}).get(c, []))
es = {'type': 'th', 'value': value, 'display_value': value, 'class': ' '.join(cs), 'is_visible': _is_visible(c, r, col_lengths)}
colspan = col_lengths.get((r, c), 0)
if (colspan > 1):
es['attributes'] = [format_attr({'key': 'colspan', 'value': colspan})]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and com._any_not_none(*self.data.index.names) and (not hidden_index)):
index_header_row = []
for (c, name) in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, 'level{lvl}'.format(lvl=c)]
name = ('' if (name is None) else name)
index_header_row.append({'type': 'th', 'value': name, 'class': ' '.join(cs)})
index_header_row.extend(([{'type': 'th', 'value': BLANK_VALUE, 'class': ' '.join([BLANK_CLASS])}] * (len(clabels[0]) - len(hidden_columns))))
head.append(index_header_row)
body = []
for (r, idx) in enumerate(self.data.index):
row_es = []
for (c, value) in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, 'level{lvl}'.format(lvl=c), 'row{row}'.format(row=r)]
es = {'type': 'th', 'is_visible': (_is_visible(r, c, idx_lengths) and (not hidden_index)), 'value': value, 'display_value': value, 'id': '_'.join(rid[1:]), 'class': ' '.join(rid)}
rowspan = idx_lengths.get((c, r), 0)
if (rowspan > 1):
es['attributes'] = [format_attr({'key': 'rowspan', 'value': rowspan})]
row_es.append(es)
for (c, col) in enumerate(self.data.columns):
cs = [DATA_CLASS, 'row{row}'.format(row=r), 'col{col}'.format(col=c)]
cs.extend(cell_context.get('data', {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[(r, c)]
row_dict = {'type': 'td', 'value': value, 'class': ' '.join(cs), 'display_value': formatter(value), 'is_visible': (c not in hidden_columns)}
if (self.cell_ids or (not ((len(ctx[(r, c)]) == 1) and (ctx[(r, c)][0] == '')))):
row_dict['id'] = '_'.join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[(r, c)]:
if x.count(':'):
props.append(x.split(':'))
else:
props.append(['', ''])
cellstyle.append({'props': props, 'selector': 'row{row}_col{col}'.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option('display.html.use_mathjax')
if (not use_mathjax):
table_attr = (table_attr or '')
if ('class="' in table_attr):
table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid, precision=precision, table_styles=table_styles, caption=caption, table_attributes=table_attr)
| 7,732,696,741,437,161,000
|
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}.
|
pandas/io/formats/style.py
|
_translate
|
harunpehlivan/pandas
|
python
|
def _translate(self):
'\n Convert the DataFrame in `self.data` and the attrs from `_build_styles`\n into a dictionary of {head, body, uuid, cellstyle}.\n '
table_styles = (self.table_styles or [])
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = (self.uuid or str(uuid1()).replace('-', '_'))
ROW_HEADING_CLASS = 'row_heading'
COL_HEADING_CLASS = 'col_heading'
INDEX_NAME_CLASS = 'index_name'
DATA_CLASS = 'data'
BLANK_CLASS = 'blank'
BLANK_VALUE =
def format_attr(pair):
return '{key}={value}'.format(**pair)
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if (n_rlvls == 1):
rlabels = [[x] for x in rlabels]
if (n_clvls == 1):
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = ([{'type': 'th', 'value': BLANK_VALUE, 'display_value': BLANK_VALUE, 'is_visible': (not hidden_index), 'class': ' '.join([BLANK_CLASS])}] * (n_rlvls - 1))
name = self.data.columns.names[r]
cs = [(BLANK_CLASS if (name is None) else INDEX_NAME_CLASS), 'level{lvl}'.format(lvl=r)]
name = (BLANK_VALUE if (name is None) else name)
row_es.append({'type': 'th', 'value': name, 'display_value': name, 'class': ' '.join(cs), 'is_visible': (not hidden_index)})
if clabels:
for (c, value) in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, 'level{lvl}'.format(lvl=r), 'col{col}'.format(col=c)]
cs.extend(cell_context.get('col_headings', {}).get(r, {}).get(c, []))
es = {'type': 'th', 'value': value, 'display_value': value, 'class': ' '.join(cs), 'is_visible': _is_visible(c, r, col_lengths)}
colspan = col_lengths.get((r, c), 0)
if (colspan > 1):
es['attributes'] = [format_attr({'key': 'colspan', 'value': colspan})]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and com._any_not_none(*self.data.index.names) and (not hidden_index)):
index_header_row = []
for (c, name) in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS, 'level{lvl}'.format(lvl=c)]
name = ( if (name is None) else name)
index_header_row.append({'type': 'th', 'value': name, 'class': ' '.join(cs)})
index_header_row.extend(([{'type': 'th', 'value': BLANK_VALUE, 'class': ' '.join([BLANK_CLASS])}] * (len(clabels[0]) - len(hidden_columns))))
head.append(index_header_row)
body = []
for (r, idx) in enumerate(self.data.index):
row_es = []
for (c, value) in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, 'level{lvl}'.format(lvl=c), 'row{row}'.format(row=r)]
es = {'type': 'th', 'is_visible': (_is_visible(r, c, idx_lengths) and (not hidden_index)), 'value': value, 'display_value': value, 'id': '_'.join(rid[1:]), 'class': ' '.join(rid)}
rowspan = idx_lengths.get((c, r), 0)
if (rowspan > 1):
es['attributes'] = [format_attr({'key': 'rowspan', 'value': rowspan})]
row_es.append(es)
for (c, col) in enumerate(self.data.columns):
cs = [DATA_CLASS, 'row{row}'.format(row=r), 'col{col}'.format(col=c)]
cs.extend(cell_context.get('data', {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[(r, c)]
row_dict = {'type': 'td', 'value': value, 'class': ' '.join(cs), 'display_value': formatter(value), 'is_visible': (c not in hidden_columns)}
if (self.cell_ids or (not ((len(ctx[(r, c)]) == 1) and (ctx[(r, c)][0] == )))):
row_dict['id'] = '_'.join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[(r, c)]:
if x.count(':'):
props.append(x.split(':'))
else:
props.append([, ])
cellstyle.append({'props': props, 'selector': 'row{row}_col{col}'.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option('display.html.use_mathjax')
if (not use_mathjax):
table_attr = (table_attr or )
if ('class="' in table_attr):
table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid, precision=precision, table_styles=table_styles, caption=caption, table_attributes=table_attr)
|
def format(self, formatter, subset=None):
'\n Format the text display value of cells.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n formatter : str, callable, or dict\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that restricts which elements\n ``formatter`` is applied to.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n\n ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where\n ``a`` is one of\n\n - str: this will be wrapped in: ``a.format(x)``\n - callable: called with the value of an individual cell\n\n The default display value for numeric values is the "general" (``g``)\n format with ``pd.options.display.precision`` precision.\n\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(4, 2), columns=[\'a\', \'b\'])\n >>> df.style.format("{:.2%}")\n >>> df[\'c\'] = [\'a\', \'b\', \'c\', \'d\']\n >>> df.style.format({\'c\': str.upper})\n '
if (subset is None):
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if (len(subset) == 1):
subset = (subset, self.data.columns)
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for (col, col_formatter) in formatter.items():
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
locs = product(*(row_locs, col_locs))
for (i, j) in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
| -4,866,826,672,809,622,000
|
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter : str, callable, or dict
subset : IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
|
pandas/io/formats/style.py
|
format
|
harunpehlivan/pandas
|
python
|
def format(self, formatter, subset=None):
'\n Format the text display value of cells.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n formatter : str, callable, or dict\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that restricts which elements\n ``formatter`` is applied to.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n\n ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where\n ``a`` is one of\n\n - str: this will be wrapped in: ``a.format(x)``\n - callable: called with the value of an individual cell\n\n The default display value for numeric values is the "general" (``g``)\n format with ``pd.options.display.precision`` precision.\n\n Examples\n --------\n\n >>> df = pd.DataFrame(np.random.randn(4, 2), columns=[\'a\', \'b\'])\n >>> df.style.format("{:.2%}")\n >>> df[\'c\'] = [\'a\', \'b\', \'c\', \'d\']\n >>> df.style.format({\'c\': str.upper})\n '
if (subset is None):
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if (len(subset) == 1):
subset = (subset, self.data.columns)
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if is_dict_like(formatter):
for (col, col_formatter) in formatter.items():
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
locs = product(*(row_locs, col_locs))
for (i, j) in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
|
def render(self, **kwargs):
"\n Render the built up styles to HTML.\n\n Parameters\n ----------\n `**kwargs` : Any additional keyword arguments are passed through\n to ``self.template.render``. This is useful when you need to provide\n additional variables for a custom template.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n rendered : str\n The rendered HTML\n\n Notes\n -----\n ``Styler`` objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * precision\n * table_styles\n * caption\n * table_attributes\n "
self._compute()
d = self._translate()
trimmed = [x for x in d['cellstyle'] if any((any(y) for y in x['props']))]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
| 4,806,731,898,727,246,000
|
Render the built up styles to HTML.
Parameters
----------
`**kwargs` : Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you need to provide
additional variables for a custom template.
.. versionadded:: 0.20
Returns
-------
rendered : str
The rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
|
pandas/io/formats/style.py
|
render
|
harunpehlivan/pandas
|
python
|
def render(self, **kwargs):
"\n Render the built up styles to HTML.\n\n Parameters\n ----------\n `**kwargs` : Any additional keyword arguments are passed through\n to ``self.template.render``. This is useful when you need to provide\n additional variables for a custom template.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n rendered : str\n The rendered HTML\n\n Notes\n -----\n ``Styler`` objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * precision\n * table_styles\n * caption\n * table_attributes\n "
self._compute()
d = self._translate()
trimmed = [x for x in d['cellstyle'] if any((any(y) for y in x['props']))]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
|
def _update_ctx(self, attrs):
"\n Update the state of the Styler.\n\n Collects a mapping of {index_label: ['<property>: <value>']}.\n\n attrs : Series or DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n "
for (row_label, v) in attrs.iterrows():
for (col_label, col) in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(';').split(';'):
self.ctx[(i, j)].append(pair)
| 5,688,525,126,651,154,000
|
Update the state of the Styler.
Collects a mapping of {index_label: ['<property>: <value>']}.
attrs : Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
|
pandas/io/formats/style.py
|
_update_ctx
|
harunpehlivan/pandas
|
python
|
def _update_ctx(self, attrs):
"\n Update the state of the Styler.\n\n Collects a mapping of {index_label: ['<property>: <value>']}.\n\n attrs : Series or DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n "
for (row_label, v) in attrs.iterrows():
for (col_label, col) in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(';').split(';'):
self.ctx[(i, j)].append(pair)
|
def __copy__(self):
'\n Deep copy by default.\n '
return self._copy(deepcopy=False)
| 8,099,102,557,575,076,000
|
Deep copy by default.
|
pandas/io/formats/style.py
|
__copy__
|
harunpehlivan/pandas
|
python
|
def __copy__(self):
'\n \n '
return self._copy(deepcopy=False)
|
def clear(self):
'\n Reset the styler, removing any previously applied styles.\n Returns None.\n '
self.ctx.clear()
self._todo = []
| 3,962,199,865,544,457,000
|
Reset the styler, removing any previously applied styles.
Returns None.
|
pandas/io/formats/style.py
|
clear
|
harunpehlivan/pandas
|
python
|
def clear(self):
'\n Reset the styler, removing any previously applied styles.\n Returns None.\n '
self.ctx.clear()
self._todo = []
|
def _compute(self):
'\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .applymap. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n '
r = self
for (func, args, kwargs) in self._todo:
r = func(self)(*args, **kwargs)
return r
| 3,414,097,489,805,550,000
|
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
|
pandas/io/formats/style.py
|
_compute
|
harunpehlivan/pandas
|
python
|
def _compute(self):
'\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .applymap. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n '
r = self
for (func, args, kwargs) in self._todo:
r = func(self)(*args, **kwargs)
return r
|
def apply(self, func, axis=0, subset=None, **kwargs):
"\n Apply a function column-wise, row-wise, or table-wise,\n updating the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series or DataFrame (depending\n on ``axis``), and return an object with the same shape.\n Must return a DataFrame with identical index and\n column labels when ``axis=None``\n axis : int, str or None\n apply to each column (``axis=0`` or ``'index'``)\n or to each row (``axis=1`` or ``'columns'``) or\n to the entire DataFrame at once with ``axis=None``\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n The output shape of ``func`` should match the input, i.e. if\n ``x`` is the input row, column, or table (depending on ``axis``),\n then ``func(x).shape == x.shape`` should be true.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x):\n ... return ['background-color: yellow' if v == x.max() else ''\n for v in x]\n ...\n >>> df = pd.DataFrame(np.random.randn(5, 2))\n >>> df.style.apply(highlight_max)\n "
self._todo.append(((lambda instance: getattr(instance, '_apply')), (func, axis, subset), kwargs))
return self
| 2,671,970,695,599,941,000
|
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
|
pandas/io/formats/style.py
|
apply
|
harunpehlivan/pandas
|
python
|
def apply(self, func, axis=0, subset=None, **kwargs):
"\n Apply a function column-wise, row-wise, or table-wise,\n updating the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series or DataFrame (depending\n on ``axis``), and return an object with the same shape.\n Must return a DataFrame with identical index and\n column labels when ``axis=None``\n axis : int, str or None\n apply to each column (``axis=0`` or ``'index'``)\n or to each row (``axis=1`` or ``'columns'``) or\n to the entire DataFrame at once with ``axis=None``\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n The output shape of ``func`` should match the input, i.e. if\n ``x`` is the input row, column, or table (depending on ``axis``),\n then ``func(x).shape == x.shape`` should be true.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x):\n ... return ['background-color: yellow' if v == x.max() else \n for v in x]\n ...\n >>> df = pd.DataFrame(np.random.randn(5, 2))\n >>> df.style.apply(highlight_max)\n "
self._todo.append(((lambda instance: getattr(instance, '_apply')), (func, axis, subset), kwargs))
return self
|
def applymap(self, func, subset=None, **kwargs):
'\n Apply a function elementwise, updating the HTML\n representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.where\n '
self._todo.append(((lambda instance: getattr(instance, '_applymap')), (func, subset), kwargs))
return self
| 1,650,845,506,284,373,800
|
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
|
pandas/io/formats/style.py
|
applymap
|
harunpehlivan/pandas
|
python
|
def applymap(self, func, subset=None, **kwargs):
'\n Apply a function elementwise, updating the HTML\n representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``func``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.where\n '
self._todo.append(((lambda instance: getattr(instance, '_applymap')), (func, subset), kwargs))
return self
|
def where(self, cond, value, other=None, subset=None, **kwargs):
'\n Apply a function elementwise, updating the HTML\n representation with a style which is selected in\n accordance with the return value of a function.\n\n .. versionadded:: 0.21.0\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar and return a boolean\n value : str\n applied when ``cond`` returns true\n other : str\n applied when ``cond`` returns false\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``cond``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap\n '
if (other is None):
other = ''
return self.applymap((lambda val: (value if cond(val) else other)), subset=subset, **kwargs)
| 4,949,484,499,230,533,000
|
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
|
pandas/io/formats/style.py
|
where
|
harunpehlivan/pandas
|
python
|
def where(self, cond, value, other=None, subset=None, **kwargs):
'\n Apply a function elementwise, updating the HTML\n representation with a style which is selected in\n accordance with the return value of a function.\n\n .. versionadded:: 0.21.0\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar and return a boolean\n value : str\n applied when ``cond`` returns true\n other : str\n applied when ``cond`` returns false\n subset : IndexSlice\n a valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice\n kwargs : dict\n pass along to ``cond``\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap\n '
if (other is None):
other =
return self.applymap((lambda val: (value if cond(val) else other)), subset=subset, **kwargs)
|
def set_precision(self, precision):
'\n Set the precision used to render.\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n '
self.precision = precision
return self
| -3,630,970,260,750,355,500
|
Set the precision used to render.
Parameters
----------
precision : int
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
set_precision
|
harunpehlivan/pandas
|
python
|
def set_precision(self, precision):
'\n Set the precision used to render.\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n '
self.precision = precision
return self
|
def set_table_attributes(self, attributes):
'\n Set the table attributes.\n\n These are the items that show up in the opening ``<table>`` tag\n in addition to to automatic (by default) id.\n\n Parameters\n ----------\n attributes : string\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes(\'class="pure-table"\')\n # ... <table class="pure-table"> ...\n '
self.table_attributes = attributes
return self
| 7,405,072,924,604,848,000
|
Set the table attributes.
These are the items that show up in the opening ``<table>`` tag
in addition to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
|
pandas/io/formats/style.py
|
set_table_attributes
|
harunpehlivan/pandas
|
python
|
def set_table_attributes(self, attributes):
'\n Set the table attributes.\n\n These are the items that show up in the opening ``<table>`` tag\n in addition to to automatic (by default) id.\n\n Parameters\n ----------\n attributes : string\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes(\'class="pure-table"\')\n # ... <table class="pure-table"> ...\n '
self.table_attributes = attributes
return self
|
def export(self):
'\n Export the styles to applied to the current Styler.\n\n Can be applied to a second style with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use\n '
return self._todo
| -6,302,264,730,595,205,000
|
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use
|
pandas/io/formats/style.py
|
export
|
harunpehlivan/pandas
|
python
|
def export(self):
'\n Export the styles to applied to the current Styler.\n\n Can be applied to a second style with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use\n '
return self._todo
|
def use(self, styles):
'\n Set the styles on the current Styler, possibly using styles\n from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n list of style functions\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export\n '
self._todo.extend(styles)
return self
| 8,005,693,763,462,239,000
|
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles : list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
|
pandas/io/formats/style.py
|
use
|
harunpehlivan/pandas
|
python
|
def use(self, styles):
'\n Set the styles on the current Styler, possibly using styles\n from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n list of style functions\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export\n '
self._todo.extend(styles)
return self
|
def set_uuid(self, uuid):
'\n Set the uuid for a Styler.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n '
self.uuid = uuid
return self
| -6,428,507,068,428,712,000
|
Set the uuid for a Styler.
Parameters
----------
uuid : str
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
set_uuid
|
harunpehlivan/pandas
|
python
|
def set_uuid(self, uuid):
'\n Set the uuid for a Styler.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n '
self.uuid = uuid
return self
|
def set_caption(self, caption):
'\n Set the caption on a Styler\n\n Parameters\n ----------\n caption : str\n\n Returns\n -------\n self : Styler\n '
self.caption = caption
return self
| -4,207,975,177,316,797,000
|
Set the caption on a Styler
Parameters
----------
caption : str
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
set_caption
|
harunpehlivan/pandas
|
python
|
def set_caption(self, caption):
'\n Set the caption on a Styler\n\n Parameters\n ----------\n caption : str\n\n Returns\n -------\n self : Styler\n '
self.caption = caption
return self
|
def set_table_styles(self, table_styles):
"\n Set the table styles on a Styler.\n\n These are placed in a ``<style>`` tag before the generated HTML table.\n\n Parameters\n ----------\n table_styles : list\n Each individual table_style should be a dictionary with\n ``selector`` and ``props`` keys. ``selector`` should be a CSS\n selector that the style will be applied to (automatically\n prefixed by the table's UUID) and ``props`` should be a list of\n tuples with ``(attribute, value)``.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n "
self.table_styles = table_styles
return self
| 3,939,827,332,193,556,000
|
Set the table styles on a Styler.
These are placed in a ``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles : list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
|
pandas/io/formats/style.py
|
set_table_styles
|
harunpehlivan/pandas
|
python
|
def set_table_styles(self, table_styles):
"\n Set the table styles on a Styler.\n\n These are placed in a ``<style>`` tag before the generated HTML table.\n\n Parameters\n ----------\n table_styles : list\n Each individual table_style should be a dictionary with\n ``selector`` and ``props`` keys. ``selector`` should be a CSS\n selector that the style will be applied to (automatically\n prefixed by the table's UUID) and ``props`` should be a list of\n tuples with ``(attribute, value)``.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n "
self.table_styles = table_styles
return self
|
def hide_index(self):
'\n Hide any indices from rendering.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n self : Styler\n '
self.hidden_index = True
return self
| 4,394,477,431,902,735,400
|
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
hide_index
|
harunpehlivan/pandas
|
python
|
def hide_index(self):
'\n Hide any indices from rendering.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n self : Styler\n '
self.hidden_index = True
return self
|
def hide_columns(self, subset):
'\n Hide columns from rendering.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that identifies which columns\n are hidden.\n\n Returns\n -------\n self : Styler\n '
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
| -89,767,141,061,159,070
|
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
hide_columns
|
harunpehlivan/pandas
|
python
|
def hide_columns(self, subset):
'\n Hide columns from rendering.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that identifies which columns\n are hidden.\n\n Returns\n -------\n self : Styler\n '
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
|
def highlight_null(self, null_color='red'):
'\n Shade the background ``null_color`` for missing values.\n\n Parameters\n ----------\n null_color : str\n\n Returns\n -------\n self : Styler\n '
self.applymap(self._highlight_null, null_color=null_color)
return self
| 1,991,409,830,712,989,000
|
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color : str
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
highlight_null
|
harunpehlivan/pandas
|
python
|
def highlight_null(self, null_color='red'):
'\n Shade the background ``null_color`` for missing values.\n\n Parameters\n ----------\n null_color : str\n\n Returns\n -------\n self : Styler\n '
self.applymap(self._highlight_null, null_color=null_color)
return self
|
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, subset=None, text_color_threshold=0.408):
"\n Color the background in a gradient according to\n the data in each column (optionally row).\n\n Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n matplotlib colormap\n low, high : float\n compress the range by these values.\n axis : int or str\n 1 or 'columns' for columnwise, 0 or 'index' for rowwise\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n text_color_threshold : float or int\n luminance threshold for determining text color. Facilitates text\n visibility across varying background colors. From 0 to 1.\n 0 = all text is dark colored, 1 = all text is light colored.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n\n Raises\n ------\n ValueError\n If ``text_color_threshold`` is not a value from 0 to 1.\n\n Notes\n -----\n Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the\n text legible by not using the entire range of the color map. The range\n of the data is extended by ``low * (x.max() - x.min())`` and ``high *\n (x.max() - x.min())`` before normalizing.\n "
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset, axis=axis, low=low, high=high, text_color_threshold=text_color_threshold)
return self
| 3,402,305,775,612,038,000
|
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap : str or colormap
matplotlib colormap
low, high : float
compress the range by these values.
axis : int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold : float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
|
pandas/io/formats/style.py
|
background_gradient
|
harunpehlivan/pandas
|
python
|
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, subset=None, text_color_threshold=0.408):
"\n Color the background in a gradient according to\n the data in each column (optionally row).\n\n Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n matplotlib colormap\n low, high : float\n compress the range by these values.\n axis : int or str\n 1 or 'columns' for columnwise, 0 or 'index' for rowwise\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n text_color_threshold : float or int\n luminance threshold for determining text color. Facilitates text\n visibility across varying background colors. From 0 to 1.\n 0 = all text is dark colored, 1 = all text is light colored.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n\n Raises\n ------\n ValueError\n If ``text_color_threshold`` is not a value from 0 to 1.\n\n Notes\n -----\n Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the\n text legible by not using the entire range of the color map. The range\n of the data is extended by ``low * (x.max() - x.min())`` and ``high *\n (x.max() - x.min())`` before normalizing.\n "
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset, axis=axis, low=low, high=high, text_color_threshold=text_color_threshold)
return self
|
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0, text_color_threshold=0.408):
'\n Color background in a range according to the data.\n '
if ((not isinstance(text_color_threshold, (float, int))) or (not (0 <= text_color_threshold <= 1))):
msg = '`text_color_threshold` must be a value from 0 to 1.'
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = (smax - smin)
norm = colors.Normalize((smin - (rng * low)), (smax + (rng * high)))
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
'\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n '
(r, g, b) = (((x / 12.92) if (x <= 0.03928) else ((x + 0.055) / (1.055 ** 2.4))) for x in rgba[:3])
return (((0.2126 * r) + (0.7152 * g)) + (0.0722 * b))
def css(rgba):
dark = (relative_luminance(rgba) < text_color_threshold)
text_color = ('#f1f1f1' if dark else '#000000')
return 'background-color: {b};color: {c};'.format(b=colors.rgb2hex(rgba), c=text_color)
if (s.ndim == 1):
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame([[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns)
| -1,450,159,208,812,286,500
|
Color background in a range according to the data.
|
pandas/io/formats/style.py
|
_background_gradient
|
harunpehlivan/pandas
|
python
|
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0, text_color_threshold=0.408):
'\n \n '
if ((not isinstance(text_color_threshold, (float, int))) or (not (0 <= text_color_threshold <= 1))):
msg = '`text_color_threshold` must be a value from 0 to 1.'
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = (smax - smin)
norm = colors.Normalize((smin - (rng * low)), (smax + (rng * high)))
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
'\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n '
(r, g, b) = (((x / 12.92) if (x <= 0.03928) else ((x + 0.055) / (1.055 ** 2.4))) for x in rgba[:3])
return (((0.2126 * r) + (0.7152 * g)) + (0.0722 * b))
def css(rgba):
dark = (relative_luminance(rgba) < text_color_threshold)
text_color = ('#f1f1f1' if dark else '#000000')
return 'background-color: {b};color: {c};'.format(b=colors.rgb2hex(rgba), c=text_color)
if (s.ndim == 1):
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame([[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns)
|
def set_properties(self, subset=None, **kwargs):
'\n Convenience method for setting one or more non-data dependent\n properties or each cell.\n\n Parameters\n ----------\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n kwargs : dict\n property: value pairs to be set for each cell\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color="white", align="right")\n >>> df.style.set_properties(**{\'background-color\': \'yellow\'})\n '
values = ';'.join(('{p}: {v}'.format(p=p, v=v) for (p, v) in kwargs.items()))
f = (lambda x: values)
return self.applymap(f, subset=subset)
| -614,258,365,675,136,500
|
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset : IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs : dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
|
pandas/io/formats/style.py
|
set_properties
|
harunpehlivan/pandas
|
python
|
def set_properties(self, subset=None, **kwargs):
'\n Convenience method for setting one or more non-data dependent\n properties or each cell.\n\n Parameters\n ----------\n subset : IndexSlice\n a valid slice for ``data`` to limit the style application to\n kwargs : dict\n property: value pairs to be set for each cell\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color="white", align="right")\n >>> df.style.set_properties(**{\'background-color\': \'yellow\'})\n '
values = ';'.join(('{p}: {v}'.format(p=p, v=v) for (p, v) in kwargs.items()))
f = (lambda x: values)
return self.applymap(f, subset=subset)
|
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
'\n Draw bar chart in dataframe cells.\n '
smin = (s.min() if (vmin is None) else vmin)
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = (s.max() if (vmax is None) else vmax)
if isinstance(smax, ABCSeries):
smax = smax.max()
if (align == 'mid'):
smin = min(0, smin)
smax = max(0, smax)
elif (align == 'zero'):
smax = max(abs(smin), abs(smax))
smin = (- smax)
normed = ((width * (s.values - smin)) / ((smax - smin) + 1e-12))
zero = (((- width) * smin) / ((smax - smin) + 1e-12))
def css_bar(start, end, color):
'\n Generate CSS code to draw a bar from start to end.\n '
css = 'width: 10em; height: 80%;'
if (end > start):
css += 'background: linear-gradient(90deg,'
if (start > 0):
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(s=start, c=color)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(e=min(end, width), c=color)
return css
def css(x):
if pd.isna(x):
return ''
color = (colors[1] if (x > zero) else colors[0])
if (align == 'left'):
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if (s.ndim == 1):
return [css(x) for x in normed]
else:
return pd.DataFrame([[css(x) for x in row] for row in normed], index=s.index, columns=s.columns)
| -2,759,675,745,142,124,500
|
Draw bar chart in dataframe cells.
|
pandas/io/formats/style.py
|
_bar
|
harunpehlivan/pandas
|
python
|
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
'\n \n '
smin = (s.min() if (vmin is None) else vmin)
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = (s.max() if (vmax is None) else vmax)
if isinstance(smax, ABCSeries):
smax = smax.max()
if (align == 'mid'):
smin = min(0, smin)
smax = max(0, smax)
elif (align == 'zero'):
smax = max(abs(smin), abs(smax))
smin = (- smax)
normed = ((width * (s.values - smin)) / ((smax - smin) + 1e-12))
zero = (((- width) * smin) / ((smax - smin) + 1e-12))
def css_bar(start, end, color):
'\n Generate CSS code to draw a bar from start to end.\n '
css = 'width: 10em; height: 80%;'
if (end > start):
css += 'background: linear-gradient(90deg,'
if (start > 0):
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(s=start, c=color)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(e=min(end, width), c=color)
return css
def css(x):
if pd.isna(x):
return
color = (colors[1] if (x > zero) else colors[0])
if (align == 'left'):
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if (s.ndim == 1):
return [css(x) for x in normed]
else:
return pd.DataFrame([[css(x) for x in row] for row in normed], index=s.index, columns=s.columns)
|
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None):
"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : IndexSlice, optional\n A valid slice for `data` to limit the style application to.\n axis : int, str or None, default 0\n Apply to each column (`axis=0` or `'index'`)\n or to each row (`axis=1` or `'columns'`) or\n to the entire DataFrame at once with `axis=None`.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n\n .. versionadded:: 0.20.0\n\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n "
if (align not in ('left', 'zero', 'mid')):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if (not is_list_like(color)):
color = [color, color]
elif (len(color) == 1):
color = [color[0], color[0]]
elif (len(color) > 2):
raise ValueError("`color` must be string or a list-like of length 2: [`color_neg`, `color_pos`] (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax)
return self
| -5,269,238,799,898,452,000
|
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
bar
|
harunpehlivan/pandas
|
python
|
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None):
"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : IndexSlice, optional\n A valid slice for `data` to limit the style application to.\n axis : int, str or None, default 0\n Apply to each column (`axis=0` or `'index'`)\n or to each row (`axis=1` or `'columns'`) or\n to the entire DataFrame at once with `axis=None`.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n\n .. versionadded:: 0.20.0\n\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n "
if (align not in ('left', 'zero', 'mid')):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if (not is_list_like(color)):
color = [color, color]
elif (len(color) == 1):
color = [color[0], color[0]]
elif (len(color) > 2):
raise ValueError("`color` must be string or a list-like of length 2: [`color_neg`, `color_pos`] (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax)
return self
|
def highlight_max(self, subset=None, color='yellow', axis=0):
"\n Highlight the maximum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n "
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
| 9,088,035,780,358,375,000
|
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
highlight_max
|
harunpehlivan/pandas
|
python
|
def highlight_max(self, subset=None, color='yellow', axis=0):
"\n Highlight the maximum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n "
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
|
def highlight_min(self, subset=None, color='yellow', axis=0):
"\n Highlight the minimum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n "
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
| -2,233,053,266,408,328,200
|
Highlight the minimum by shading the background.
Parameters
----------
subset : IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color : str, default 'yellow'
axis : int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
|
pandas/io/formats/style.py
|
highlight_min
|
harunpehlivan/pandas
|
python
|
def highlight_min(self, subset=None, color='yellow', axis=0):
"\n Highlight the minimum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n a valid slice for ``data`` to limit the style application to\n color : str, default 'yellow'\n axis : int, str, or None; default 0\n 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,\n or ``None`` for tablewise\n\n Returns\n -------\n self : Styler\n "
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
|
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
'\n Highlight the min or max in a Series or DataFrame.\n '
attr = 'background-color: {0}'.format(color)
if (data.ndim == 1):
if max_:
extrema = (data == data.max())
else:
extrema = (data == data.min())
return [(attr if v else '') for v in extrema]
else:
if max_:
extrema = (data == data.max().max())
else:
extrema = (data == data.min().min())
return pd.DataFrame(np.where(extrema, attr, ''), index=data.index, columns=data.columns)
| 5,835,042,187,748,983,000
|
Highlight the min or max in a Series or DataFrame.
|
pandas/io/formats/style.py
|
_highlight_extrema
|
harunpehlivan/pandas
|
python
|
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
'\n \n '
attr = 'background-color: {0}'.format(color)
if (data.ndim == 1):
if max_:
extrema = (data == data.max())
else:
extrema = (data == data.min())
return [(attr if v else ) for v in extrema]
else:
if max_:
extrema = (data == data.max().max())
else:
extrema = (data == data.min().min())
return pd.DataFrame(np.where(extrema, attr, ), index=data.index, columns=data.columns)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.