body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
51743612dca4796c132349146caaf95561ade25c4158cff112be92a940d2f66c
def pathSum(self, root, sum): '\n :type root: TreeNode\n :type sum: int\n :rtype: int\n ' if (root is None): return 0 res = self.findPath(root, sum) res += self.pathSum(root.left, sum) res += self.pathSum(root.right, sum) return res
:type root: TreeNode :type sum: int :rtype: int
07-tree-and-recursive/leetcode_437.py
pathSum
xiaolinzi-xl/Algorithm-Interview-Study
1
python
def pathSum(self, root, sum): '\n :type root: TreeNode\n :type sum: int\n :rtype: int\n ' if (root is None): return 0 res = self.findPath(root, sum) res += self.pathSum(root.left, sum) res += self.pathSum(root.right, sum) return res
def pathSum(self, root, sum): '\n :type root: TreeNode\n :type sum: int\n :rtype: int\n ' if (root is None): return 0 res = self.findPath(root, sum) res += self.pathSum(root.left, sum) res += self.pathSum(root.right, sum) return res<|docstring|>:type root: TreeNode :type sum: int :rtype: int<|endoftext|>
362919f7f8bcc0c464d47e45b39e270e2bee956a5841e2dd4ff18136703d8e3a
def as_dict(self): '\n Return a python dict of self.\n ' _dict = {} _prefix = self.__dict__['_prefix'] for (key, value) in self.__dict__.items(): value = (value.as_dict() if isinstance(value, self.__class__) else value) if (_prefix in key): _key = key.replace(_prefix, '') _dict[_key] = value return _dict
Return a python dict of self.
attribdict/AttribDict.py
as_dict
YiqunChen1999/attribdict
0
python
def as_dict(self): '\n \n ' _dict = {} _prefix = self.__dict__['_prefix'] for (key, value) in self.__dict__.items(): value = (value.as_dict() if isinstance(value, self.__class__) else value) if (_prefix in key): _key = key.replace(_prefix, ) _dict[_key] = value return _dict
def as_dict(self): '\n \n ' _dict = {} _prefix = self.__dict__['_prefix'] for (key, value) in self.__dict__.items(): value = (value.as_dict() if isinstance(value, self.__class__) else value) if (_prefix in key): _key = key.replace(_prefix, ) _dict[_key] = value return _dict<|docstring|>Return a python dict of self.<|endoftext|>
7b4dbb7685b0f99e191de44c7ca4b31b6ba7c10c27bccaa5e20693738e8fe9a5
def copy(self): '\n Return a shallow copy of self.\n ' return self.__copy__()
Return a shallow copy of self.
attribdict/AttribDict.py
copy
YiqunChen1999/attribdict
0
python
def copy(self): '\n \n ' return self.__copy__()
def copy(self): '\n \n ' return self.__copy__()<|docstring|>Return a shallow copy of self.<|endoftext|>
e3f0973ba8713bc0e79c702f9bd84c5c7e2a48e2099db14c9eefa21693fbc035
def deepcopy(self): '\n Return a deep copy of self.\n ' return self.__deepcopy__({})
Return a deep copy of self.
attribdict/AttribDict.py
deepcopy
YiqunChen1999/attribdict
0
python
def deepcopy(self): '\n \n ' return self.__deepcopy__({})
def deepcopy(self): '\n \n ' return self.__deepcopy__({})<|docstring|>Return a deep copy of self.<|endoftext|>
f68612d3cf298d4380fe2a31cdc0f8c507894b935d34d2e9981b2203ede2e67c
def astromerey(red_path, bare_fits, wcs_catalog=None, wcs_match_distan=0.002, recenter=False, ver_from=None, ver_to=None, overwrite=False): ' Do astrometry, matching and regress with USNO-B1 catalog or SDSS/APASS\n argument:\n red_path: path of output science path\n bare_fits: fits file without path and extension\n wcs_catalog: reference catalog of wcs\n wcs_match_distan: distance limit for matching, default 0.002 deg, 7.2 arcsec\n recenter: use grid method to find real center, default false\n ver_from: version which data come from\n ver_to: version which data write to\n overwrite: is set, overwrite existing output files\n returns:\n n_wcs for stars count matched, 0 or -1 for error\n ' global debug if ('debug' not in globals()): debug = 0 ver_from_fix = ('' if (ver_from is None) else ('.' + ver_from)) ver_to_fix = ('' if (ver_to is None) else ('.' + ver_to))
Do astrometry, matching and regress with USNO-B1 catalog or SDSS/APASS argument: red_path: path of output science path bare_fits: fits file without path and extension wcs_catalog: reference catalog of wcs wcs_match_distan: distance limit for matching, default 0.002 deg, 7.2 arcsec recenter: use grid method to find real center, default false ver_from: version which data come from ver_to: version which data write to overwrite: is set, overwrite existing output files returns: n_wcs for stars count matched, 0 or -1 for error
bok/astrometry.py
astromerey
RapidLzj/2016M
0
python
def astromerey(red_path, bare_fits, wcs_catalog=None, wcs_match_distan=0.002, recenter=False, ver_from=None, ver_to=None, overwrite=False): ' Do astrometry, matching and regress with USNO-B1 catalog or SDSS/APASS\n argument:\n red_path: path of output science path\n bare_fits: fits file without path and extension\n wcs_catalog: reference catalog of wcs\n wcs_match_distan: distance limit for matching, default 0.002 deg, 7.2 arcsec\n recenter: use grid method to find real center, default false\n ver_from: version which data come from\n ver_to: version which data write to\n overwrite: is set, overwrite existing output files\n returns:\n n_wcs for stars count matched, 0 or -1 for error\n ' global debug if ('debug' not in globals()): debug = 0 ver_from_fix = ( if (ver_from is None) else ('.' + ver_from)) ver_to_fix = ( if (ver_to is None) else ('.' + ver_to))
def astromerey(red_path, bare_fits, wcs_catalog=None, wcs_match_distan=0.002, recenter=False, ver_from=None, ver_to=None, overwrite=False): ' Do astrometry, matching and regress with USNO-B1 catalog or SDSS/APASS\n argument:\n red_path: path of output science path\n bare_fits: fits file without path and extension\n wcs_catalog: reference catalog of wcs\n wcs_match_distan: distance limit for matching, default 0.002 deg, 7.2 arcsec\n recenter: use grid method to find real center, default false\n ver_from: version which data come from\n ver_to: version which data write to\n overwrite: is set, overwrite existing output files\n returns:\n n_wcs for stars count matched, 0 or -1 for error\n ' global debug if ('debug' not in globals()): debug = 0 ver_from_fix = ( if (ver_from is None) else ('.' + ver_from)) ver_to_fix = ( if (ver_to is None) else ('.' + ver_to))<|docstring|>Do astrometry, matching and regress with USNO-B1 catalog or SDSS/APASS argument: red_path: path of output science path bare_fits: fits file without path and extension wcs_catalog: reference catalog of wcs wcs_match_distan: distance limit for matching, default 0.002 deg, 7.2 arcsec recenter: use grid method to find real center, default false ver_from: version which data come from ver_to: version which data write to overwrite: is set, overwrite existing output files returns: n_wcs for stars count matched, 0 or -1 for error<|endoftext|>
0af964c8886a77d308a5a8fd481881c4e482498195762345ba008fee887eef66
def palindromePairs(self, words): '\n :type words: List[str]\n :rtype: List[List[int]]\n ' n = len(words) if (n < 2): return [] dic = collections.defaultdict(int) res = [] for i in range(n): dic[words[i]] = i for i in range(n): for j in range((len(words[i]) + 1)): left = words[i][:j] right = words[i][j:] if self.isPalindrome(left): rev_right = right[::(- 1)] if ((rev_right in dic) and (dic[rev_right] != i)): res.append([dic[rev_right], i]) if ((len(right) > 0) and self.isPalindrome(right)): rev_left = left[::(- 1)] if ((rev_left in dic) and (dic[rev_left] != i)): res.append([i, dic[rev_left]]) return res
:type words: List[str] :rtype: List[List[int]]
0336.Palindrome Pairs/solution.py
palindromePairs
zhlinh/leetcode
0
python
def palindromePairs(self, words): '\n :type words: List[str]\n :rtype: List[List[int]]\n ' n = len(words) if (n < 2): return [] dic = collections.defaultdict(int) res = [] for i in range(n): dic[words[i]] = i for i in range(n): for j in range((len(words[i]) + 1)): left = words[i][:j] right = words[i][j:] if self.isPalindrome(left): rev_right = right[::(- 1)] if ((rev_right in dic) and (dic[rev_right] != i)): res.append([dic[rev_right], i]) if ((len(right) > 0) and self.isPalindrome(right)): rev_left = left[::(- 1)] if ((rev_left in dic) and (dic[rev_left] != i)): res.append([i, dic[rev_left]]) return res
def palindromePairs(self, words): '\n :type words: List[str]\n :rtype: List[List[int]]\n ' n = len(words) if (n < 2): return [] dic = collections.defaultdict(int) res = [] for i in range(n): dic[words[i]] = i for i in range(n): for j in range((len(words[i]) + 1)): left = words[i][:j] right = words[i][j:] if self.isPalindrome(left): rev_right = right[::(- 1)] if ((rev_right in dic) and (dic[rev_right] != i)): res.append([dic[rev_right], i]) if ((len(right) > 0) and self.isPalindrome(right)): rev_left = left[::(- 1)] if ((rev_left in dic) and (dic[rev_left] != i)): res.append([i, dic[rev_left]]) return res<|docstring|>:type words: List[str] :rtype: List[List[int]]<|endoftext|>
b7271ba72602e59ebc08c6d30a6a446fbe33c00f788d07517d4cbdbd13a8d781
def average_displacement_error(ground_truth, predicted): 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1)).mean(axis=(- 1))
Calculates average displacement error ADE(y) = (1/T) \sum_{t=1}^T || s_t - s^*_t ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (np.ndarray): array of shape (n_timestamps, 2) predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2) Returns: np.ndarray: array of shape (n_modes,)
sdc/ysdc_dataset_api/evaluation/metrics.py
average_displacement_error
lkra/shifts
156
python
def average_displacement_error(ground_truth, predicted): 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1)).mean(axis=(- 1))
def average_displacement_error(ground_truth, predicted): 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1)).mean(axis=(- 1))<|docstring|>Calculates average displacement error ADE(y) = (1/T) \sum_{t=1}^T || s_t - s^*_t ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (np.ndarray): array of shape (n_timestamps, 2) predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2) Returns: np.ndarray: array of shape (n_modes,)<|endoftext|>
63564d66f75d8049695c8212c98133cffa530ca8e1393dd78c5184650f8e2034
def final_displacement_error(ground_truth, predicted): 'Calculates final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not performs any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1))[(:, (- 1))]
Calculates final displacement error FDE(y) = (1/T) || s_T - s^*_T ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not performs any mode aggregation. Args: ground_truth (np.ndarray): array of shape (n_timestamps, 2) predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2) Returns: np.ndarray: array of shape (n_modes,)
sdc/ysdc_dataset_api/evaluation/metrics.py
final_displacement_error
lkra/shifts
156
python
def final_displacement_error(ground_truth, predicted): 'Calculates final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not performs any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1))[(:, (- 1))]
def final_displacement_error(ground_truth, predicted): 'Calculates final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not performs any mode aggregation.\n\n Args:\n ground_truth (np.ndarray): array of shape (n_timestamps, 2)\n predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2)\n\n Returns:\n np.ndarray: array of shape (n_modes,)\n ' return np.linalg.norm((ground_truth - predicted), axis=(- 1))[(:, (- 1))]<|docstring|>Calculates final displacement error FDE(y) = (1/T) || s_T - s^*_T ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not performs any mode aggregation. Args: ground_truth (np.ndarray): array of shape (n_timestamps, 2) predicted (np.ndarray): array of shape (n_modes, n_timestamps, 2) Returns: np.ndarray: array of shape (n_modes,)<|endoftext|>
a7048178766598ddd8f254b9c95e930ed776b607d3aaf1116c851151858d6046
def aggregate_prediction_request_losses(aggregator: str, per_plan_losses: np.ndarray, per_plan_weights: Optional[np.ndarray]=None) -> np.ndarray: 'Given ADE or FDE losses for each predicted mode and an aggregator,\n produce a final loss value.\n\n Args:\n aggregator (str): aggregator type, see below for valid values\n per_plan_losses (np.ndarray): ADE or FDE losses of shape (n_modes,),\n as returned by `average_displacement_error` or\n `final_displacement_error`\n per_plan_weights (np.ndarray): confidence weights of shape (n_modes,)\n associated with each mode\n\n Returns:\n np.ndarray: scalar loss value\n ' assert (aggregator in {'min', 'avg', 'top1', 'weighted'}) if (aggregator == 'min'): agg_prediction_loss = np.min(per_plan_losses, axis=(- 1)) elif (aggregator == 'avg'): agg_prediction_loss = np.mean(per_plan_losses, axis=(- 1)) elif (aggregator == 'top1'): argmax = np.argmax(per_plan_weights) agg_prediction_loss = per_plan_losses[argmax] elif (aggregator == 'weighted'): assert_weights_near_one(weights=per_plan_weights) assert_weights_non_negative(weights=per_plan_weights) agg_prediction_loss = np.sum((per_plan_weights * per_plan_losses), axis=(- 1)) else: raise NotImplementedError return agg_prediction_loss
Given ADE or FDE losses for each predicted mode and an aggregator, produce a final loss value. Args: aggregator (str): aggregator type, see below for valid values per_plan_losses (np.ndarray): ADE or FDE losses of shape (n_modes,), as returned by `average_displacement_error` or `final_displacement_error` per_plan_weights (np.ndarray): confidence weights of shape (n_modes,) associated with each mode Returns: np.ndarray: scalar loss value
sdc/ysdc_dataset_api/evaluation/metrics.py
aggregate_prediction_request_losses
lkra/shifts
156
python
def aggregate_prediction_request_losses(aggregator: str, per_plan_losses: np.ndarray, per_plan_weights: Optional[np.ndarray]=None) -> np.ndarray: 'Given ADE or FDE losses for each predicted mode and an aggregator,\n produce a final loss value.\n\n Args:\n aggregator (str): aggregator type, see below for valid values\n per_plan_losses (np.ndarray): ADE or FDE losses of shape (n_modes,),\n as returned by `average_displacement_error` or\n `final_displacement_error`\n per_plan_weights (np.ndarray): confidence weights of shape (n_modes,)\n associated with each mode\n\n Returns:\n np.ndarray: scalar loss value\n ' assert (aggregator in {'min', 'avg', 'top1', 'weighted'}) if (aggregator == 'min'): agg_prediction_loss = np.min(per_plan_losses, axis=(- 1)) elif (aggregator == 'avg'): agg_prediction_loss = np.mean(per_plan_losses, axis=(- 1)) elif (aggregator == 'top1'): argmax = np.argmax(per_plan_weights) agg_prediction_loss = per_plan_losses[argmax] elif (aggregator == 'weighted'): assert_weights_near_one(weights=per_plan_weights) assert_weights_non_negative(weights=per_plan_weights) agg_prediction_loss = np.sum((per_plan_weights * per_plan_losses), axis=(- 1)) else: raise NotImplementedError return agg_prediction_loss
def aggregate_prediction_request_losses(aggregator: str, per_plan_losses: np.ndarray, per_plan_weights: Optional[np.ndarray]=None) -> np.ndarray: 'Given ADE or FDE losses for each predicted mode and an aggregator,\n produce a final loss value.\n\n Args:\n aggregator (str): aggregator type, see below for valid values\n per_plan_losses (np.ndarray): ADE or FDE losses of shape (n_modes,),\n as returned by `average_displacement_error` or\n `final_displacement_error`\n per_plan_weights (np.ndarray): confidence weights of shape (n_modes,)\n associated with each mode\n\n Returns:\n np.ndarray: scalar loss value\n ' assert (aggregator in {'min', 'avg', 'top1', 'weighted'}) if (aggregator == 'min'): agg_prediction_loss = np.min(per_plan_losses, axis=(- 1)) elif (aggregator == 'avg'): agg_prediction_loss = np.mean(per_plan_losses, axis=(- 1)) elif (aggregator == 'top1'): argmax = np.argmax(per_plan_weights) agg_prediction_loss = per_plan_losses[argmax] elif (aggregator == 'weighted'): assert_weights_near_one(weights=per_plan_weights) assert_weights_non_negative(weights=per_plan_weights) agg_prediction_loss = np.sum((per_plan_weights * per_plan_losses), axis=(- 1)) else: raise NotImplementedError return agg_prediction_loss<|docstring|>Given ADE or FDE losses for each predicted mode and an aggregator, produce a final loss value. Args: aggregator (str): aggregator type, see below for valid values per_plan_losses (np.ndarray): ADE or FDE losses of shape (n_modes,), as returned by `average_displacement_error` or `final_displacement_error` per_plan_weights (np.ndarray): confidence weights of shape (n_modes,) associated with each mode Returns: np.ndarray: scalar loss value<|endoftext|>
bddcf1a42597358a6604d07ce015af0e2fbc8dac1ddb8b9583e03ad6c3326aab
def log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n sigma (float, optional): distribution standart deviation. Defaults to 1.0.\n\n Returns:\n float: calculated log-likelihood\n ' assert_weights_near_one(weights) assert_weights_non_negative(weights) displacement_norms_squared = np.sum(((ground_truth - predicted) ** 2), axis=(- 1)) normalizing_const = np.log(((2 * np.pi) * (sigma ** 2))) lse_args = (np.log(weights) - np.sum((normalizing_const + ((0.5 * displacement_norms_squared) / (sigma ** 2))), axis=(- 1))) max_arg = lse_args.max() ll = (np.log(np.sum(np.exp((lse_args - max_arg)))) + max_arg) return ll
Calculates log-likelihood of the ground_truth trajectory under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma. Please follow the link below for the metric formulation: https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf Args: ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2) predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2) weights (np.ndarray): confidence weights associated with trajectories, (n_modes,) sigma (float, optional): distribution standart deviation. Defaults to 1.0. Returns: float: calculated log-likelihood
sdc/ysdc_dataset_api/evaluation/metrics.py
log_likelihood
lkra/shifts
156
python
def log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n sigma (float, optional): distribution standart deviation. Defaults to 1.0.\n\n Returns:\n float: calculated log-likelihood\n ' assert_weights_near_one(weights) assert_weights_non_negative(weights) displacement_norms_squared = np.sum(((ground_truth - predicted) ** 2), axis=(- 1)) normalizing_const = np.log(((2 * np.pi) * (sigma ** 2))) lse_args = (np.log(weights) - np.sum((normalizing_const + ((0.5 * displacement_norms_squared) / (sigma ** 2))), axis=(- 1))) max_arg = lse_args.max() ll = (np.log(np.sum(np.exp((lse_args - max_arg)))) + max_arg) return ll
def log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n sigma (float, optional): distribution standart deviation. Defaults to 1.0.\n\n Returns:\n float: calculated log-likelihood\n ' assert_weights_near_one(weights) assert_weights_non_negative(weights) displacement_norms_squared = np.sum(((ground_truth - predicted) ** 2), axis=(- 1)) normalizing_const = np.log(((2 * np.pi) * (sigma ** 2))) lse_args = (np.log(weights) - np.sum((normalizing_const + ((0.5 * displacement_norms_squared) / (sigma ** 2))), axis=(- 1))) max_arg = lse_args.max() ll = (np.log(np.sum(np.exp((lse_args - max_arg)))) + max_arg) return ll<|docstring|>Calculates log-likelihood of the ground_truth trajectory under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma. Please follow the link below for the metric formulation: https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf Args: ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2) predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2) weights (np.ndarray): confidence weights associated with trajectories, (n_modes,) sigma (float, optional): distribution standart deviation. Defaults to 1.0. Returns: float: calculated log-likelihood<|endoftext|>
4d8e5394b099b3628b6f2dd8ab76063b40a6428ed4d78ca3293d809a69d3ece9
def corrected_negative_log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates corrected negative log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n\n Returns:\n float: calculated corrected negative log-likelihood\n ' n_timestamps = ground_truth.shape[0] return ((- log_likelihood(ground_truth, predicted, weights, sigma)) - (n_timestamps * np.log(((2 * np.pi) * (sigma ** 2)))))
Calculates corrected negative log-likelihood of the ground_truth trajectory under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma. Please follow the link below for the metric formulation: https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf Args: ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2) predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2) weights (np.ndarray): confidence weights associated with trajectories, (n_modes,) Returns: float: calculated corrected negative log-likelihood
sdc/ysdc_dataset_api/evaluation/metrics.py
corrected_negative_log_likelihood
lkra/shifts
156
python
def corrected_negative_log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates corrected negative log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n\n Returns:\n float: calculated corrected negative log-likelihood\n ' n_timestamps = ground_truth.shape[0] return ((- log_likelihood(ground_truth, predicted, weights, sigma)) - (n_timestamps * np.log(((2 * np.pi) * (sigma ** 2)))))
def corrected_negative_log_likelihood(ground_truth, predicted, weights, sigma=1.0): 'Calculates corrected negative log-likelihood of the ground_truth trajectory\n under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma.\n Please follow the link below for the metric formulation:\n https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf\n\n Args:\n ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2)\n predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2)\n weights (np.ndarray): confidence weights associated with trajectories, (n_modes,)\n\n Returns:\n float: calculated corrected negative log-likelihood\n ' n_timestamps = ground_truth.shape[0] return ((- log_likelihood(ground_truth, predicted, weights, sigma)) - (n_timestamps * np.log(((2 * np.pi) * (sigma ** 2)))))<|docstring|>Calculates corrected negative log-likelihood of the ground_truth trajectory under the factorized gaussian mixture parametrized by predicted trajectories, weights and sigma. Please follow the link below for the metric formulation: https://github.com/yandex-research/shifts/blob/195b3214ff41e5b6c197ea7ef3e38552361f29fb/sdc/ysdc_dataset_api/evaluation/log_likelihood_based_metrics.pdf Args: ground_truth (np.ndarray): ground truth trajectory, (n_timestamps, 2) predicted (np.ndarray): predicted trajectories, (n_modes, n_timestamps, 2) weights (np.ndarray): confidence weights associated with trajectories, (n_modes,) Returns: float: calculated corrected negative log-likelihood<|endoftext|>
0f69e95b471251d49dcb1bbae7549b3e15ed00b8f3e3e7979403c951420985a4
def batch_mean_metric(base_metric: Callable[([np.ndarray, np.ndarray], np.ndarray)], predictions: np.ndarray, ground_truth: np.ndarray) -> np.ndarray: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n Args:\n base_metric: function such as `average_displacement_error`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return np.mean(base_metric(predicted=predictions, ground_truth=ground_truth))
During training, we may wish to produce a single prediction for each prediction request (i.e., just sample once from the posterior predictive; similar to standard training of an MC Dropout model). Then, we simply average over the batch dimension. Args: base_metric: function such as `average_displacement_error` predictions: shape (B, T, 2) where B is the number of prediction requests in the batch. ground_truth: shape (T, 2), there is only one ground truth trajectory for each prediction request.
sdc/ysdc_dataset_api/evaluation/metrics.py
batch_mean_metric
lkra/shifts
156
python
def batch_mean_metric(base_metric: Callable[([np.ndarray, np.ndarray], np.ndarray)], predictions: np.ndarray, ground_truth: np.ndarray) -> np.ndarray: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n Args:\n base_metric: function such as `average_displacement_error`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return np.mean(base_metric(predicted=predictions, ground_truth=ground_truth))
def batch_mean_metric(base_metric: Callable[([np.ndarray, np.ndarray], np.ndarray)], predictions: np.ndarray, ground_truth: np.ndarray) -> np.ndarray: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n Args:\n base_metric: function such as `average_displacement_error`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return np.mean(base_metric(predicted=predictions, ground_truth=ground_truth))<|docstring|>During training, we may wish to produce a single prediction for each prediction request (i.e., just sample once from the posterior predictive; similar to standard training of an MC Dropout model). Then, we simply average over the batch dimension. Args: base_metric: function such as `average_displacement_error` predictions: shape (B, T, 2) where B is the number of prediction requests in the batch. ground_truth: shape (T, 2), there is only one ground truth trajectory for each prediction request.<|endoftext|>
62e3a6064d7a7e056fdcccab6b519bbad0248fd04f8944d70ec5562a43c9cca3
def average_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.mean(torch.norm((predicted - ground_truth), dim=(- 1)), dim=(- 1))
Calculates average displacement error ADE(y) = (1/T) \sum_{t=1}^T || s_t - s^*_t ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2) predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2) Returns: torch.Tensor: tensor of shape (n_modes,)
sdc/ysdc_dataset_api/evaluation/metrics.py
average_displacement_error_torch
lkra/shifts
156
python
def average_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.mean(torch.norm((predicted - ground_truth), dim=(- 1)), dim=(- 1))
def average_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Calculates average displacement error\n ADE(y) = (1/T) \\sum_{t=1}^T || s_t - s^*_t ||_2\n where T = num_timesteps, y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.mean(torch.norm((predicted - ground_truth), dim=(- 1)), dim=(- 1))<|docstring|>Calculates average displacement error ADE(y) = (1/T) \sum_{t=1}^T || s_t - s^*_t ||_2 where T = num_timesteps, y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2) predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2) Returns: torch.Tensor: tensor of shape (n_modes,)<|endoftext|>
a0411207a7b09dd39b6433d149c9979c4f1063aeaa3fbf324d78ad10e213228e
def final_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Computes final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.norm((ground_truth - predicted), dim=(- 1))[(:, (- 1))]
Computes final displacement error FDE(y) = (1/T) || s_T - s^*_T ||_2 where y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2) predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2) Returns: torch.Tensor: tensor of shape (n_modes,)
sdc/ysdc_dataset_api/evaluation/metrics.py
final_displacement_error_torch
lkra/shifts
156
python
def final_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Computes final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.norm((ground_truth - predicted), dim=(- 1))[(:, (- 1))]
def final_displacement_error_torch(ground_truth: torch.Tensor, predicted: torch.Tensor) -> torch.Tensor: 'Computes final displacement error\n FDE(y) = (1/T) || s_T - s^*_T ||_2\n where y = (s_1, ..., s_T)\n\n Does not perform any mode aggregation.\n\n Args:\n ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2)\n predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2)\n\n Returns:\n torch.Tensor: tensor of shape (n_modes,)\n ' return torch.norm((ground_truth - predicted), dim=(- 1))[(:, (- 1))]<|docstring|>Computes final displacement error FDE(y) = (1/T) || s_T - s^*_T ||_2 where y = (s_1, ..., s_T) Does not perform any mode aggregation. Args: ground_truth (torch.Tensor): tensor of shape (n_timestamps, 2) predicted (torch.Tensor): tensor of shape (n_modes, n_timestamps, 2) Returns: torch.Tensor: tensor of shape (n_modes,)<|endoftext|>
55b0f972009718ad3ee61c13883f6dfac068f3b18a6b753746f4d6fcc14c5860
def batch_mean_metric_torch(base_metric: Callable[([torch.Tensor, torch.Tensor], torch.Tensor)], predictions: torch.Tensor, ground_truth: torch.Tensor) -> torch.Tensor: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n For a Torch model we would expect a Torch base metric\n (e.g., `average_displacement_error_torch`), Torch tensor inputs,\n and a torch.Tensor return type for backpropagation.\n\n Args:\n base_metric: Callable, function such as\n `average_displacement_error_torch`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return torch.mean(base_metric(predicted=predictions, ground_truth=ground_truth))
During training, we may wish to produce a single prediction for each prediction request (i.e., just sample once from the posterior predictive; similar to standard training of an MC Dropout model). Then, we simply average over the batch dimension. For a Torch model we would expect a Torch base metric (e.g., `average_displacement_error_torch`), Torch tensor inputs, and a torch.Tensor return type for backpropagation. Args: base_metric: Callable, function such as `average_displacement_error_torch` predictions: shape (B, T, 2) where B is the number of prediction requests in the batch. ground_truth: shape (T, 2), there is only one ground truth trajectory for each prediction request.
sdc/ysdc_dataset_api/evaluation/metrics.py
batch_mean_metric_torch
lkra/shifts
156
python
def batch_mean_metric_torch(base_metric: Callable[([torch.Tensor, torch.Tensor], torch.Tensor)], predictions: torch.Tensor, ground_truth: torch.Tensor) -> torch.Tensor: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n For a Torch model we would expect a Torch base metric\n (e.g., `average_displacement_error_torch`), Torch tensor inputs,\n and a torch.Tensor return type for backpropagation.\n\n Args:\n base_metric: Callable, function such as\n `average_displacement_error_torch`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return torch.mean(base_metric(predicted=predictions, ground_truth=ground_truth))
def batch_mean_metric_torch(base_metric: Callable[([torch.Tensor, torch.Tensor], torch.Tensor)], predictions: torch.Tensor, ground_truth: torch.Tensor) -> torch.Tensor: 'During training, we may wish to produce a single prediction\n for each prediction request (i.e., just sample once from the\n posterior predictive; similar to standard training of an MC\n Dropout model). Then, we simply average over the batch dimension.\n\n For a Torch model we would expect a Torch base metric\n (e.g., `average_displacement_error_torch`), Torch tensor inputs,\n and a torch.Tensor return type for backpropagation.\n\n Args:\n base_metric: Callable, function such as\n `average_displacement_error_torch`\n predictions: shape (B, T, 2) where B is the number of\n prediction requests in the batch.\n ground_truth: shape (T, 2), there is only one ground truth\n trajectory for each prediction request.\n ' return torch.mean(base_metric(predicted=predictions, ground_truth=ground_truth))<|docstring|>During training, we may wish to produce a single prediction for each prediction request (i.e., just sample once from the posterior predictive; similar to standard training of an MC Dropout model). Then, we simply average over the batch dimension. For a Torch model we would expect a Torch base metric (e.g., `average_displacement_error_torch`), Torch tensor inputs, and a torch.Tensor return type for backpropagation. Args: base_metric: Callable, function such as `average_displacement_error_torch` predictions: shape (B, T, 2) where B is the number of prediction requests in the batch. ground_truth: shape (T, 2), there is only one ground truth trajectory for each prediction request.<|endoftext|>
1eabc2ab2f054087916d7e3088f59c6920f4746669a135b9d5e962f73dc02902
def compute_all_aggregator_metrics(per_plan_confidences: np.ndarray, predictions: np.ndarray, ground_truth: np.ndarray, metric_name: Optional[str]=None): 'Batch size B, we assume consistent number of predictions D per scene.\n\n per_plan_confidences: np.ndarray, shape (B, D), we assume that all\n prediction requests have the same number of proposed plans here.\n predictions: np.ndarray, shape (B, D, T, 2)\n ground_truth: np.ndarray, shape (B, T, 2), there is only one\n ground_truth trajectory for each prediction request.\n metric_name: Optional[str], if specified, compute a particular metric only.\n ' metrics_dict = defaultdict(list) if (metric_name is None): base_metrics = VALID_BASE_METRICS else: base_metrics = [] for metric in VALID_BASE_METRICS: if (metric.upper() in metric_name): base_metrics.append(metric) if (not base_metrics): raise ValueError(f'Invalid metric name {metric_name} specified.') if (metric_name is None): aggregators = VALID_AGGREGATORS else: aggregators = [] for agg in VALID_AGGREGATORS: if (agg in metric_name): aggregators.append(agg) if (not aggregators): raise ValueError(f'Invalid metric name {metric_name} specified.') for base_metric_name in base_metrics: if (base_metric_name == 'ade'): base_metric = average_displacement_error elif (base_metric_name == 'fde'): base_metric = final_displacement_error else: raise NotImplementedError for (index, (req_preds, req_gt, req_plan_confs)) in enumerate(zip(predictions, ground_truth, per_plan_confidences)): req_plan_losses = base_metric(predicted=req_preds, ground_truth=req_gt) for aggregator in aggregators: metric_key = f'{aggregator}{base_metric_name.upper()}' metrics_dict[metric_key].append(aggregate_prediction_request_losses(aggregator=aggregator, per_plan_losses=req_plan_losses, per_plan_weights=_softmax_normalize(req_plan_confs))) metrics_dict = {key: np.stack(values) for (key, values) in metrics_dict.items()} return metrics_dict
Batch size B, we assume consistent number of predictions D per scene. per_plan_confidences: np.ndarray, shape (B, D), we assume that all prediction requests have the same number of proposed plans here. predictions: np.ndarray, shape (B, D, T, 2) ground_truth: np.ndarray, shape (B, T, 2), there is only one ground_truth trajectory for each prediction request. metric_name: Optional[str], if specified, compute a particular metric only.
sdc/ysdc_dataset_api/evaluation/metrics.py
compute_all_aggregator_metrics
lkra/shifts
156
python
def compute_all_aggregator_metrics(per_plan_confidences: np.ndarray, predictions: np.ndarray, ground_truth: np.ndarray, metric_name: Optional[str]=None): 'Batch size B, we assume consistent number of predictions D per scene.\n\n per_plan_confidences: np.ndarray, shape (B, D), we assume that all\n prediction requests have the same number of proposed plans here.\n predictions: np.ndarray, shape (B, D, T, 2)\n ground_truth: np.ndarray, shape (B, T, 2), there is only one\n ground_truth trajectory for each prediction request.\n metric_name: Optional[str], if specified, compute a particular metric only.\n ' metrics_dict = defaultdict(list) if (metric_name is None): base_metrics = VALID_BASE_METRICS else: base_metrics = [] for metric in VALID_BASE_METRICS: if (metric.upper() in metric_name): base_metrics.append(metric) if (not base_metrics): raise ValueError(f'Invalid metric name {metric_name} specified.') if (metric_name is None): aggregators = VALID_AGGREGATORS else: aggregators = [] for agg in VALID_AGGREGATORS: if (agg in metric_name): aggregators.append(agg) if (not aggregators): raise ValueError(f'Invalid metric name {metric_name} specified.') for base_metric_name in base_metrics: if (base_metric_name == 'ade'): base_metric = average_displacement_error elif (base_metric_name == 'fde'): base_metric = final_displacement_error else: raise NotImplementedError for (index, (req_preds, req_gt, req_plan_confs)) in enumerate(zip(predictions, ground_truth, per_plan_confidences)): req_plan_losses = base_metric(predicted=req_preds, ground_truth=req_gt) for aggregator in aggregators: metric_key = f'{aggregator}{base_metric_name.upper()}' metrics_dict[metric_key].append(aggregate_prediction_request_losses(aggregator=aggregator, per_plan_losses=req_plan_losses, per_plan_weights=_softmax_normalize(req_plan_confs))) metrics_dict = {key: np.stack(values) for (key, values) in metrics_dict.items()} return metrics_dict
def compute_all_aggregator_metrics(per_plan_confidences: np.ndarray, predictions: np.ndarray, ground_truth: np.ndarray, metric_name: Optional[str]=None): 'Batch size B, we assume consistent number of predictions D per scene.\n\n per_plan_confidences: np.ndarray, shape (B, D), we assume that all\n prediction requests have the same number of proposed plans here.\n predictions: np.ndarray, shape (B, D, T, 2)\n ground_truth: np.ndarray, shape (B, T, 2), there is only one\n ground_truth trajectory for each prediction request.\n metric_name: Optional[str], if specified, compute a particular metric only.\n ' metrics_dict = defaultdict(list) if (metric_name is None): base_metrics = VALID_BASE_METRICS else: base_metrics = [] for metric in VALID_BASE_METRICS: if (metric.upper() in metric_name): base_metrics.append(metric) if (not base_metrics): raise ValueError(f'Invalid metric name {metric_name} specified.') if (metric_name is None): aggregators = VALID_AGGREGATORS else: aggregators = [] for agg in VALID_AGGREGATORS: if (agg in metric_name): aggregators.append(agg) if (not aggregators): raise ValueError(f'Invalid metric name {metric_name} specified.') for base_metric_name in base_metrics: if (base_metric_name == 'ade'): base_metric = average_displacement_error elif (base_metric_name == 'fde'): base_metric = final_displacement_error else: raise NotImplementedError for (index, (req_preds, req_gt, req_plan_confs)) in enumerate(zip(predictions, ground_truth, per_plan_confidences)): req_plan_losses = base_metric(predicted=req_preds, ground_truth=req_gt) for aggregator in aggregators: metric_key = f'{aggregator}{base_metric_name.upper()}' metrics_dict[metric_key].append(aggregate_prediction_request_losses(aggregator=aggregator, per_plan_losses=req_plan_losses, per_plan_weights=_softmax_normalize(req_plan_confs))) metrics_dict = {key: np.stack(values) for (key, values) in metrics_dict.items()} return metrics_dict<|docstring|>Batch size B, we assume consistent number of predictions D per scene. per_plan_confidences: np.ndarray, shape (B, D), we assume that all prediction requests have the same number of proposed plans here. predictions: np.ndarray, shape (B, D, T, 2) ground_truth: np.ndarray, shape (B, T, 2), there is only one ground_truth trajectory for each prediction request. metric_name: Optional[str], if specified, compute a particular metric only.<|endoftext|>
fe310f7336a6c53112eff6d33c0e1824fc4a6dbba2473c069ec3f92c6e317c4a
def __init__(self, E_init=None, S_init=None, **kwargs): 'Initialize the Camel.\n\n\t\tArgs:\n\t\t\tE_init (Optional[float]): Starting endurance of Camel.\n\t\t\tS_init (Optional[float]): Stating supply of Camel.\n\t\t\tkwargs (Dict[str, Any]): Additional arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Individual.__init__`\n\t\t' Individual.__init__(self, **kwargs) (self.E, self.E_past) = (E_init, E_init) (self.S, self.S_past) = (S_init, S_init) (self.x_past, self.f_past) = (self.x, self.f) self.steps = 0
Initialize the Camel. Args: E_init (Optional[float]): Starting endurance of Camel. S_init (Optional[float]): Stating supply of Camel. kwargs (Dict[str, Any]): Additional arguments. See Also: * :func:`WeOptPy.algorithms.Individual.__init__`
WeOptPy/algorithms/ca.py
__init__
kb2623/WeOptPy
1
python
def __init__(self, E_init=None, S_init=None, **kwargs): 'Initialize the Camel.\n\n\t\tArgs:\n\t\t\tE_init (Optional[float]): Starting endurance of Camel.\n\t\t\tS_init (Optional[float]): Stating supply of Camel.\n\t\t\tkwargs (Dict[str, Any]): Additional arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Individual.__init__`\n\t\t' Individual.__init__(self, **kwargs) (self.E, self.E_past) = (E_init, E_init) (self.S, self.S_past) = (S_init, S_init) (self.x_past, self.f_past) = (self.x, self.f) self.steps = 0
def __init__(self, E_init=None, S_init=None, **kwargs): 'Initialize the Camel.\n\n\t\tArgs:\n\t\t\tE_init (Optional[float]): Starting endurance of Camel.\n\t\t\tS_init (Optional[float]): Stating supply of Camel.\n\t\t\tkwargs (Dict[str, Any]): Additional arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Individual.__init__`\n\t\t' Individual.__init__(self, **kwargs) (self.E, self.E_past) = (E_init, E_init) (self.S, self.S_past) = (S_init, S_init) (self.x_past, self.f_past) = (self.x, self.f) self.steps = 0<|docstring|>Initialize the Camel. Args: E_init (Optional[float]): Starting endurance of Camel. S_init (Optional[float]): Stating supply of Camel. kwargs (Dict[str, Any]): Additional arguments. See Also: * :func:`WeOptPy.algorithms.Individual.__init__`<|endoftext|>
09e9b85f7a3b4c95a08bccc9ce12d772439eb0f347554d328a88233f9baabede
def nextt(self, T_min, T_max, rnd=rand): 'Apply nextT function on Camel.\n\n\t\tArgs:\n\t\t\tT_min (float): TODO\n\t\t\tT_max (float): TODO\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' self.T = (((T_max - T_min) * rnd.rand()) + T_min)
Apply nextT function on Camel. Args: T_min (float): TODO T_max (float): TODO rnd (Optional[mtrand.RandomState]): Random number generator.
WeOptPy/algorithms/ca.py
nextt
kb2623/WeOptPy
1
python
def nextt(self, T_min, T_max, rnd=rand): 'Apply nextT function on Camel.\n\n\t\tArgs:\n\t\t\tT_min (float): TODO\n\t\t\tT_max (float): TODO\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' self.T = (((T_max - T_min) * rnd.rand()) + T_min)
def nextt(self, T_min, T_max, rnd=rand): 'Apply nextT function on Camel.\n\n\t\tArgs:\n\t\t\tT_min (float): TODO\n\t\t\tT_max (float): TODO\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' self.T = (((T_max - T_min) * rnd.rand()) + T_min)<|docstring|>Apply nextT function on Camel. Args: T_min (float): TODO T_max (float): TODO rnd (Optional[mtrand.RandomState]): Random number generator.<|endoftext|>
b7cccf3a4179772dcc2872c07c799bbe543cde4aace9c77c3555d844d6424948
def nexts(self, omega, n_gens): 'Apply nextS on Camel.\n\n\t\tArgs:\n\t\t\tomega (float): TODO.\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations.\n\t\t' self.S = (self.S_past * (1 - ((omega * self.steps) / n_gens)))
Apply nextS on Camel. Args: omega (float): TODO. n_gens (int): Number of Camel Algorithm iterations/generations.
WeOptPy/algorithms/ca.py
nexts
kb2623/WeOptPy
1
python
def nexts(self, omega, n_gens): 'Apply nextS on Camel.\n\n\t\tArgs:\n\t\t\tomega (float): TODO.\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations.\n\t\t' self.S = (self.S_past * (1 - ((omega * self.steps) / n_gens)))
def nexts(self, omega, n_gens): 'Apply nextS on Camel.\n\n\t\tArgs:\n\t\t\tomega (float): TODO.\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations.\n\t\t' self.S = (self.S_past * (1 - ((omega * self.steps) / n_gens)))<|docstring|>Apply nextS on Camel. Args: omega (float): TODO. n_gens (int): Number of Camel Algorithm iterations/generations.<|endoftext|>
c72bc1ab55210221230f1f03f99a060b5ffa40344a837500ea46f9fee2512ca6
def nexte(self, n_gens, T_max): 'Apply function nextE on function on Camel.\n\n\t\tArgs:\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations\n\t\t\tT_max (float): Maximum temperature of environment\n\t\t' self.E = ((self.E_past * (1 - (self.T / T_max))) * (1 - (self.steps / n_gens)))
Apply function nextE on function on Camel. Args: n_gens (int): Number of Camel Algorithm iterations/generations T_max (float): Maximum temperature of environment
WeOptPy/algorithms/ca.py
nexte
kb2623/WeOptPy
1
python
def nexte(self, n_gens, T_max): 'Apply function nextE on function on Camel.\n\n\t\tArgs:\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations\n\t\t\tT_max (float): Maximum temperature of environment\n\t\t' self.E = ((self.E_past * (1 - (self.T / T_max))) * (1 - (self.steps / n_gens)))
def nexte(self, n_gens, T_max): 'Apply function nextE on function on Camel.\n\n\t\tArgs:\n\t\t\tn_gens (int): Number of Camel Algorithm iterations/generations\n\t\t\tT_max (float): Maximum temperature of environment\n\t\t' self.E = ((self.E_past * (1 - (self.T / T_max))) * (1 - (self.steps / n_gens)))<|docstring|>Apply function nextE on function on Camel. Args: n_gens (int): Number of Camel Algorithm iterations/generations T_max (float): Maximum temperature of environment<|endoftext|>
b765ba33ac0a9596b84d79f5454394fdf43977b1d11d5dcd4a69157ce51aebae
def nextx(self, cb, E_init, S_init, task, rnd=rand): 'Apply function nextX on Camel.\n\n\t\tThis method/function move this Camel to new position in search space.\n\n\t\tArgs:\n\t\t\tcb (Camel): Best Camel in population.\n\t\t\tE_init (float): Starting endurance of camel.\n\t\t\tS_init (float): Starting supply of camel.\n\t\t\ttask (Task): Optimization task.\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' delta = ((- 1) + (rnd.rand() * 2)) self.x = (self.x_past + (((delta * (1 - (self.E / E_init))) * np.exp((1 - (self.S / S_init)))) * (cb - self.x_past))) if (not task.is_feasible(self.x)): self.x = self.x_past else: self.f = task.eval(self.x)
Apply function nextX on Camel. This method/function move this Camel to new position in search space. Args: cb (Camel): Best Camel in population. E_init (float): Starting endurance of camel. S_init (float): Starting supply of camel. task (Task): Optimization task. rnd (Optional[mtrand.RandomState]): Random number generator.
WeOptPy/algorithms/ca.py
nextx
kb2623/WeOptPy
1
python
def nextx(self, cb, E_init, S_init, task, rnd=rand): 'Apply function nextX on Camel.\n\n\t\tThis method/function move this Camel to new position in search space.\n\n\t\tArgs:\n\t\t\tcb (Camel): Best Camel in population.\n\t\t\tE_init (float): Starting endurance of camel.\n\t\t\tS_init (float): Starting supply of camel.\n\t\t\ttask (Task): Optimization task.\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' delta = ((- 1) + (rnd.rand() * 2)) self.x = (self.x_past + (((delta * (1 - (self.E / E_init))) * np.exp((1 - (self.S / S_init)))) * (cb - self.x_past))) if (not task.is_feasible(self.x)): self.x = self.x_past else: self.f = task.eval(self.x)
def nextx(self, cb, E_init, S_init, task, rnd=rand): 'Apply function nextX on Camel.\n\n\t\tThis method/function move this Camel to new position in search space.\n\n\t\tArgs:\n\t\t\tcb (Camel): Best Camel in population.\n\t\t\tE_init (float): Starting endurance of camel.\n\t\t\tS_init (float): Starting supply of camel.\n\t\t\ttask (Task): Optimization task.\n\t\t\trnd (Optional[mtrand.RandomState]): Random number generator.\n\t\t' delta = ((- 1) + (rnd.rand() * 2)) self.x = (self.x_past + (((delta * (1 - (self.E / E_init))) * np.exp((1 - (self.S / S_init)))) * (cb - self.x_past))) if (not task.is_feasible(self.x)): self.x = self.x_past else: self.f = task.eval(self.x)<|docstring|>Apply function nextX on Camel. This method/function move this Camel to new position in search space. Args: cb (Camel): Best Camel in population. E_init (float): Starting endurance of camel. S_init (float): Starting supply of camel. task (Task): Optimization task. rnd (Optional[mtrand.RandomState]): Random number generator.<|endoftext|>
46db094e851eae0ec29fa70369f27c6e820e8669a591552ef4f8918d49a07438
def next(self): 'Save new position of Camel to old position.' (self.x_past, self.f_past, self.E_past, self.S_past) = (self.x.copy(), self.f, self.E, self.S) self.steps += 1 return self
Save new position of Camel to old position.
WeOptPy/algorithms/ca.py
next
kb2623/WeOptPy
1
python
def next(self): (self.x_past, self.f_past, self.E_past, self.S_past) = (self.x.copy(), self.f, self.E, self.S) self.steps += 1 return self
def next(self): (self.x_past, self.f_past, self.E_past, self.S_past) = (self.x.copy(), self.f, self.E, self.S) self.steps += 1 return self<|docstring|>Save new position of Camel to old position.<|endoftext|>
bc7fa6c45fea5c1d015fad5babc7bd6a37f9e2dbe370752589e5a864c2dc5aaa
def refill(self, S=None, E=None): 'Apply this function to Camel.\n\n\t\tArgs:\n\t\t\tS (float): New value of Camel supply.\n\t\t\tE (float): New value of Camel endurance.\n\t\t' (self.S, self.E) = (S, E)
Apply this function to Camel. Args: S (float): New value of Camel supply. E (float): New value of Camel endurance.
WeOptPy/algorithms/ca.py
refill
kb2623/WeOptPy
1
python
def refill(self, S=None, E=None): 'Apply this function to Camel.\n\n\t\tArgs:\n\t\t\tS (float): New value of Camel supply.\n\t\t\tE (float): New value of Camel endurance.\n\t\t' (self.S, self.E) = (S, E)
def refill(self, S=None, E=None): 'Apply this function to Camel.\n\n\t\tArgs:\n\t\t\tS (float): New value of Camel supply.\n\t\t\tE (float): New value of Camel endurance.\n\t\t' (self.S, self.E) = (S, E)<|docstring|>Apply this function to Camel. Args: S (float): New value of Camel supply. E (float): New value of Camel endurance.<|endoftext|>
a653079c05774a95339cb0efa037e44109860abc920530bbc828b48cc81e30a2
@staticmethod def algorithm_info(): 'Get information about algorithm.\n\n\t\tReturns:\n\t\t\tstr: Algorithm information\n\t\t' return 'Ali, Ramzy. (2016). Novel Optimization Algorithm Inspired by Camel Traveling Behavior. Iraq J. Electrical and Electronic Engineering. 12. 167-177.'
Get information about algorithm. Returns: str: Algorithm information
WeOptPy/algorithms/ca.py
algorithm_info
kb2623/WeOptPy
1
python
@staticmethod def algorithm_info(): 'Get information about algorithm.\n\n\t\tReturns:\n\t\t\tstr: Algorithm information\n\t\t' return 'Ali, Ramzy. (2016). Novel Optimization Algorithm Inspired by Camel Traveling Behavior. Iraq J. Electrical and Electronic Engineering. 12. 167-177.'
@staticmethod def algorithm_info(): 'Get information about algorithm.\n\n\t\tReturns:\n\t\t\tstr: Algorithm information\n\t\t' return 'Ali, Ramzy. (2016). Novel Optimization Algorithm Inspired by Camel Traveling Behavior. Iraq J. Electrical and Electronic Engineering. 12. 167-177.'<|docstring|>Get information about algorithm. Returns: str: Algorithm information<|endoftext|>
c8768cd46da45bba232fc70429fd4132ae3ca499cd2da1ffba11e940eab3656d
@staticmethod def type_parameters(): 'Get dictionary with functions for checking values of parameters.\n\n\t\tReturns:\n\t\t\tDict[str, Callable]:\n\t\t\t\t* omega (Callable[[Union[int, float]], bool])\n\t\t\t\t* mu (Callable[[float], bool])\n\t\t\t\t* alpha (Callable[[float], bool])\n\t\t\t\t* S_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* E_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* T_min (Callable[[Union[float, int], bool])\n\t\t\t\t* T_max (Callable[[Union[float, int], bool])\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.typeParameters`\n\t\t' d = Algorithm.type_parameters() d.update({'omega': (lambda x: isinstance(x, (float, int))), 'mu': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'alpha': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'S_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'E_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_min': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_max': (lambda x: (isinstance(x, (float, int)) and (x > 0)))}) return d
Get dictionary with functions for checking values of parameters. Returns: Dict[str, Callable]: * omega (Callable[[Union[int, float]], bool]) * mu (Callable[[float], bool]) * alpha (Callable[[float], bool]) * S_init (Callable[[Union[float, int]], bool]) * E_init (Callable[[Union[float, int]], bool]) * T_min (Callable[[Union[float, int], bool]) * T_max (Callable[[Union[float, int], bool]) See Also: * :func:`WeOptPy.algorithms.Algorithm.typeParameters`
WeOptPy/algorithms/ca.py
type_parameters
kb2623/WeOptPy
1
python
@staticmethod def type_parameters(): 'Get dictionary with functions for checking values of parameters.\n\n\t\tReturns:\n\t\t\tDict[str, Callable]:\n\t\t\t\t* omega (Callable[[Union[int, float]], bool])\n\t\t\t\t* mu (Callable[[float], bool])\n\t\t\t\t* alpha (Callable[[float], bool])\n\t\t\t\t* S_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* E_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* T_min (Callable[[Union[float, int], bool])\n\t\t\t\t* T_max (Callable[[Union[float, int], bool])\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.typeParameters`\n\t\t' d = Algorithm.type_parameters() d.update({'omega': (lambda x: isinstance(x, (float, int))), 'mu': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'alpha': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'S_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'E_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_min': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_max': (lambda x: (isinstance(x, (float, int)) and (x > 0)))}) return d
@staticmethod def type_parameters(): 'Get dictionary with functions for checking values of parameters.\n\n\t\tReturns:\n\t\t\tDict[str, Callable]:\n\t\t\t\t* omega (Callable[[Union[int, float]], bool])\n\t\t\t\t* mu (Callable[[float], bool])\n\t\t\t\t* alpha (Callable[[float], bool])\n\t\t\t\t* S_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* E_init (Callable[[Union[float, int]], bool])\n\t\t\t\t* T_min (Callable[[Union[float, int], bool])\n\t\t\t\t* T_max (Callable[[Union[float, int], bool])\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.typeParameters`\n\t\t' d = Algorithm.type_parameters() d.update({'omega': (lambda x: isinstance(x, (float, int))), 'mu': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'alpha': (lambda x: (isinstance(x, float) and (0 <= x <= 1))), 'S_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'E_init': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_min': (lambda x: (isinstance(x, (float, int)) and (x > 0))), 'T_max': (lambda x: (isinstance(x, (float, int)) and (x > 0)))}) return d<|docstring|>Get dictionary with functions for checking values of parameters. Returns: Dict[str, Callable]: * omega (Callable[[Union[int, float]], bool]) * mu (Callable[[float], bool]) * alpha (Callable[[float], bool]) * S_init (Callable[[Union[float, int]], bool]) * E_init (Callable[[Union[float, int]], bool]) * T_min (Callable[[Union[float, int], bool]) * T_max (Callable[[Union[float, int], bool]) See Also: * :func:`WeOptPy.algorithms.Algorithm.typeParameters`<|endoftext|>
359828e041c868ae7b5222f9017076fa7def10fe7a399d5c86361007f21aaa4d
def set_parameters(self, n=50, omega=0.25, mu=0.5, alpha=0.5, S_init=10, E_init=10, T_min=(- 10), T_max=10, **ukwargs): 'Set the arguments of an algorithm.\n\n\t\tArguments:\n\t\t\tn (Optional[int]): Population size :math:`\\in [1, \\infty)`.\n\t\t\tT_min (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`.\n\t\t\tT_max (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`.\n\t\t\tomega (Optional[float]): Burden factor :math:`\\in [0, 1]`.\n\t\t\tmu (Optional[float]): Dying rate :math:`\\in [0, 1]`.\n\t\t\tS_init (Optional[float]): Initial supply :math:`\\in (0, \\infty)`.\n\t\t\tE_init (Optional[float]): Initial endurance :math:`\\in (0, \\infty)`.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.setParameters`\n\t\t' Algorithm.set_parameters(self, n=n, itype=Camel, init_pop_func=ukwargs.pop('init_pop_func', self.init_pop), **ukwargs) (self.omega, self.mu, self.alpha, self.S_init, self.E_init, self.T_min, self.T_max) = (omega, mu, alpha, S_init, E_init, T_min, T_max)
Set the arguments of an algorithm. Arguments: n (Optional[int]): Population size :math:`\in [1, \infty)`. T_min (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`. T_max (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`. omega (Optional[float]): Burden factor :math:`\in [0, 1]`. mu (Optional[float]): Dying rate :math:`\in [0, 1]`. S_init (Optional[float]): Initial supply :math:`\in (0, \infty)`. E_init (Optional[float]): Initial endurance :math:`\in (0, \infty)`. See Also: * :func:`WeOptPy.algorithms.Algorithm.setParameters`
WeOptPy/algorithms/ca.py
set_parameters
kb2623/WeOptPy
1
python
def set_parameters(self, n=50, omega=0.25, mu=0.5, alpha=0.5, S_init=10, E_init=10, T_min=(- 10), T_max=10, **ukwargs): 'Set the arguments of an algorithm.\n\n\t\tArguments:\n\t\t\tn (Optional[int]): Population size :math:`\\in [1, \\infty)`.\n\t\t\tT_min (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`.\n\t\t\tT_max (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`.\n\t\t\tomega (Optional[float]): Burden factor :math:`\\in [0, 1]`.\n\t\t\tmu (Optional[float]): Dying rate :math:`\\in [0, 1]`.\n\t\t\tS_init (Optional[float]): Initial supply :math:`\\in (0, \\infty)`.\n\t\t\tE_init (Optional[float]): Initial endurance :math:`\\in (0, \\infty)`.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.setParameters`\n\t\t' Algorithm.set_parameters(self, n=n, itype=Camel, init_pop_func=ukwargs.pop('init_pop_func', self.init_pop), **ukwargs) (self.omega, self.mu, self.alpha, self.S_init, self.E_init, self.T_min, self.T_max) = (omega, mu, alpha, S_init, E_init, T_min, T_max)
def set_parameters(self, n=50, omega=0.25, mu=0.5, alpha=0.5, S_init=10, E_init=10, T_min=(- 10), T_max=10, **ukwargs): 'Set the arguments of an algorithm.\n\n\t\tArguments:\n\t\t\tn (Optional[int]): Population size :math:`\\in [1, \\infty)`.\n\t\t\tT_min (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`.\n\t\t\tT_max (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`.\n\t\t\tomega (Optional[float]): Burden factor :math:`\\in [0, 1]`.\n\t\t\tmu (Optional[float]): Dying rate :math:`\\in [0, 1]`.\n\t\t\tS_init (Optional[float]): Initial supply :math:`\\in (0, \\infty)`.\n\t\t\tE_init (Optional[float]): Initial endurance :math:`\\in (0, \\infty)`.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.setParameters`\n\t\t' Algorithm.set_parameters(self, n=n, itype=Camel, init_pop_func=ukwargs.pop('init_pop_func', self.init_pop), **ukwargs) (self.omega, self.mu, self.alpha, self.S_init, self.E_init, self.T_min, self.T_max) = (omega, mu, alpha, S_init, E_init, T_min, T_max)<|docstring|>Set the arguments of an algorithm. Arguments: n (Optional[int]): Population size :math:`\in [1, \infty)`. T_min (Optional[float]): Minimum temperature, must be true :math:`$T_{min} < T_{max}`. T_max (Optional[float]): Maximum temperature, must be true :math:`T_{min} < T_{max}`. omega (Optional[float]): Burden factor :math:`\in [0, 1]`. mu (Optional[float]): Dying rate :math:`\in [0, 1]`. S_init (Optional[float]): Initial supply :math:`\in (0, \infty)`. E_init (Optional[float]): Initial endurance :math:`\in (0, \infty)`. See Also: * :func:`WeOptPy.algorithms.Algorithm.setParameters`<|endoftext|>
962533c0ab9b24c1ee5ae2af841b2020ccbdd5dbcc4bbb872956d00774947c34
def get_parameters(self): 'Get parameters of the algorithm.\n\n\t\tReturns:\n\t\t\tDict[str, Any]:\n\t\t' d = Algorithm.get_parameters(self) d.update({'omega': self.omega, 'mu': self.mu, 'alpha': self.alpha, 'S_init': self.S_init, 'E_init': self.E_init, 'T_min': self.T_min, 'T_max': self.T_max}) return d
Get parameters of the algorithm. Returns: Dict[str, Any]:
WeOptPy/algorithms/ca.py
get_parameters
kb2623/WeOptPy
1
python
def get_parameters(self): 'Get parameters of the algorithm.\n\n\t\tReturns:\n\t\t\tDict[str, Any]:\n\t\t' d = Algorithm.get_parameters(self) d.update({'omega': self.omega, 'mu': self.mu, 'alpha': self.alpha, 'S_init': self.S_init, 'E_init': self.E_init, 'T_min': self.T_min, 'T_max': self.T_max}) return d
def get_parameters(self): 'Get parameters of the algorithm.\n\n\t\tReturns:\n\t\t\tDict[str, Any]:\n\t\t' d = Algorithm.get_parameters(self) d.update({'omega': self.omega, 'mu': self.mu, 'alpha': self.alpha, 'S_init': self.S_init, 'E_init': self.E_init, 'T_min': self.T_min, 'T_max': self.T_max}) return d<|docstring|>Get parameters of the algorithm. Returns: Dict[str, Any]:<|endoftext|>
cf12cc8fe3048dadc1fbaec63ee8010c4a4e10e04c03a85b86e7d91dd75e9158
def init_pop(self, task, n, rnd, itype, *args, **kwargs): 'Initialize starting population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tn (int): Number of camels in population.\n\t\t\trnd (mtrand.RandomState): Random number generator.\n\t\t\titype (Individual): Individual type.\n\t\t\twargs (list): Additional arguments.\n\t\t\tkwargs (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. Initialize population of camels.\n\t\t\t\t2. Initialized populations function/fitness values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\t\t' caravan = objects2array([itype(E_init=self.E_init, S_init=self.S_init, task=task, rnd=rnd, e=True) for _ in range(n)]) return (caravan, np.asarray([c.f for c in caravan]), args, kwargs)
Initialize starting population. Args: task (Task): Optimization task. n (int): Number of camels in population. rnd (mtrand.RandomState): Random number generator. itype (Individual): Individual type. wargs (list): Additional arguments. kwargs (dict): Additional keyword arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, list, dict]: 1. Initialize population of camels. 2. Initialized populations function/fitness values. 3. Additional arguments. 4. Additional keyword arguments.
WeOptPy/algorithms/ca.py
init_pop
kb2623/WeOptPy
1
python
def init_pop(self, task, n, rnd, itype, *args, **kwargs): 'Initialize starting population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tn (int): Number of camels in population.\n\t\t\trnd (mtrand.RandomState): Random number generator.\n\t\t\titype (Individual): Individual type.\n\t\t\twargs (list): Additional arguments.\n\t\t\tkwargs (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. Initialize population of camels.\n\t\t\t\t2. Initialized populations function/fitness values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\t\t' caravan = objects2array([itype(E_init=self.E_init, S_init=self.S_init, task=task, rnd=rnd, e=True) for _ in range(n)]) return (caravan, np.asarray([c.f for c in caravan]), args, kwargs)
def init_pop(self, task, n, rnd, itype, *args, **kwargs): 'Initialize starting population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tn (int): Number of camels in population.\n\t\t\trnd (mtrand.RandomState): Random number generator.\n\t\t\titype (Individual): Individual type.\n\t\t\twargs (list): Additional arguments.\n\t\t\tkwargs (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. Initialize population of camels.\n\t\t\t\t2. Initialized populations function/fitness values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\t\t' caravan = objects2array([itype(E_init=self.E_init, S_init=self.S_init, task=task, rnd=rnd, e=True) for _ in range(n)]) return (caravan, np.asarray([c.f for c in caravan]), args, kwargs)<|docstring|>Initialize starting population. Args: task (Task): Optimization task. n (int): Number of camels in population. rnd (mtrand.RandomState): Random number generator. itype (Individual): Individual type. wargs (list): Additional arguments. kwargs (dict): Additional keyword arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, list, dict]: 1. Initialize population of camels. 2. Initialized populations function/fitness values. 3. Additional arguments. 4. Additional keyword arguments.<|endoftext|>
eda859e7a49817994f170149d1afc260ad552a7caef5cbdebf51e0aa66d85501
def walk(self, c, cb, task): 'Move the camel in search space.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel that we want to move.\n\t\t\tcb (Camel): Best know camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel that moved in the search space.\n\t\t' c.nextt(self.T_min, self.T_max, self.Rand) c.nexts(self.omega, task.nGEN) c.nexte(task.nGEN, self.T_max) c.nextx(cb, self.E_init, self.S_init, task, self.Rand) return c
Move the camel in search space. Args: c (Camel): Camel that we want to move. cb (Camel): Best know camel. task (Task): Optimization task. Returns: Camel: Camel that moved in the search space.
WeOptPy/algorithms/ca.py
walk
kb2623/WeOptPy
1
python
def walk(self, c, cb, task): 'Move the camel in search space.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel that we want to move.\n\t\t\tcb (Camel): Best know camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel that moved in the search space.\n\t\t' c.nextt(self.T_min, self.T_max, self.Rand) c.nexts(self.omega, task.nGEN) c.nexte(task.nGEN, self.T_max) c.nextx(cb, self.E_init, self.S_init, task, self.Rand) return c
def walk(self, c, cb, task): 'Move the camel in search space.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel that we want to move.\n\t\t\tcb (Camel): Best know camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel that moved in the search space.\n\t\t' c.nextt(self.T_min, self.T_max, self.Rand) c.nexts(self.omega, task.nGEN) c.nexte(task.nGEN, self.T_max) c.nextx(cb, self.E_init, self.S_init, task, self.Rand) return c<|docstring|>Move the camel in search space. Args: c (Camel): Camel that we want to move. cb (Camel): Best know camel. task (Task): Optimization task. Returns: Camel: Camel that moved in the search space.<|endoftext|>
f65f931c27c532546f443ae562c6a25cca610638b8732844b5572b0b64728ad4
def oasis(self, c, rn, alpha): 'Apply oasis function to camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply oasis on.\n\t\t\trn (float): Random number.\n\t\t\talpha (float): View range of Camel.\n\n\t\tReturns:\n\t\t\tCamel: Camel with applied oasis on.\n\t\t' if ((rn > (1 - alpha)) and (c.f < c.f_past)): c.refill(self.S_init, self.E_init) return c
Apply oasis function to camel. Args: c (Camel): Camel to apply oasis on. rn (float): Random number. alpha (float): View range of Camel. Returns: Camel: Camel with applied oasis on.
WeOptPy/algorithms/ca.py
oasis
kb2623/WeOptPy
1
python
def oasis(self, c, rn, alpha): 'Apply oasis function to camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply oasis on.\n\t\t\trn (float): Random number.\n\t\t\talpha (float): View range of Camel.\n\n\t\tReturns:\n\t\t\tCamel: Camel with applied oasis on.\n\t\t' if ((rn > (1 - alpha)) and (c.f < c.f_past)): c.refill(self.S_init, self.E_init) return c
def oasis(self, c, rn, alpha): 'Apply oasis function to camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply oasis on.\n\t\t\trn (float): Random number.\n\t\t\talpha (float): View range of Camel.\n\n\t\tReturns:\n\t\t\tCamel: Camel with applied oasis on.\n\t\t' if ((rn > (1 - alpha)) and (c.f < c.f_past)): c.refill(self.S_init, self.E_init) return c<|docstring|>Apply oasis function to camel. Args: c (Camel): Camel to apply oasis on. rn (float): Random number. alpha (float): View range of Camel. Returns: Camel: Camel with applied oasis on.<|endoftext|>
fa492f4d90a5a50d5b35d39c1fe63f7df7919bfb66a17b3d43bd1903dcb3db35
def life_cycle(self, c, mu, task): 'Apply life cycle to Camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply life cycle.\n\t\t\tmu (float): Vision range of camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel with life cycle applied to it.\n\t\t' if (c.f_past < (mu * c.f)): return Camel(self.E_init, self.S_init, rnd=self.Rand, task=task) else: return c.next()
Apply life cycle to Camel. Args: c (Camel): Camel to apply life cycle. mu (float): Vision range of camel. task (Task): Optimization task. Returns: Camel: Camel with life cycle applied to it.
WeOptPy/algorithms/ca.py
life_cycle
kb2623/WeOptPy
1
python
def life_cycle(self, c, mu, task): 'Apply life cycle to Camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply life cycle.\n\t\t\tmu (float): Vision range of camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel with life cycle applied to it.\n\t\t' if (c.f_past < (mu * c.f)): return Camel(self.E_init, self.S_init, rnd=self.Rand, task=task) else: return c.next()
def life_cycle(self, c, mu, task): 'Apply life cycle to Camel.\n\n\t\tArgs:\n\t\t\tc (Camel): Camel to apply life cycle.\n\t\t\tmu (float): Vision range of camel.\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tCamel: Camel with life cycle applied to it.\n\t\t' if (c.f_past < (mu * c.f)): return Camel(self.E_init, self.S_init, rnd=self.Rand, task=task) else: return c.next()<|docstring|>Apply life cycle to Camel. Args: c (Camel): Camel to apply life cycle. mu (float): Vision range of camel. task (Task): Optimization task. Returns: Camel: Camel with life cycle applied to it.<|endoftext|>
1331a033cbe429bd373c1779945dbd298cf7d7ead4d527c9996204d476cf0a90
def init_population(self, task): 'Initialize population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. New population of Camels.\n\t\t\t\t2. New population fitness/function values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.initPopulation`\n\t\t' (caravan, fcaravan, args, kwargs) = Algorithm.init_population(self, task) return (caravan, fcaravan, args, kwargs)
Initialize population. Args: task (Task): Optimization task. Returns: Tuple[numpy.ndarray, numpy.ndarray, list, dict]: 1. New population of Camels. 2. New population fitness/function values. 3. Additional arguments. 4. Additional keyword arguments. See Also: * :func:`WeOptPy.algorithms.Algorithm.initPopulation`
WeOptPy/algorithms/ca.py
init_population
kb2623/WeOptPy
1
python
def init_population(self, task): 'Initialize population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. New population of Camels.\n\t\t\t\t2. New population fitness/function values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.initPopulation`\n\t\t' (caravan, fcaravan, args, kwargs) = Algorithm.init_population(self, task) return (caravan, fcaravan, args, kwargs)
def init_population(self, task): 'Initialize population.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, list, dict]:\n\t\t\t\t1. New population of Camels.\n\t\t\t\t2. New population fitness/function values.\n\t\t\t\t3. Additional arguments.\n\t\t\t\t4. Additional keyword arguments.\n\n\t\tSee Also:\n\t\t\t* :func:`WeOptPy.algorithms.Algorithm.initPopulation`\n\t\t' (caravan, fcaravan, args, kwargs) = Algorithm.init_population(self, task) return (caravan, fcaravan, args, kwargs)<|docstring|>Initialize population. Args: task (Task): Optimization task. Returns: Tuple[numpy.ndarray, numpy.ndarray, list, dict]: 1. New population of Camels. 2. New population fitness/function values. 3. Additional arguments. 4. Additional keyword arguments. See Also: * :func:`WeOptPy.algorithms.Algorithm.initPopulation`<|endoftext|>
146284e918e03fcfd5a2472387269c2fb1fbbd14ee99c337431cb88e71f1ae31
def run_iteration(self, task, caravan, fcaravan, cb, fcb, *args, **dparams): 'Core function of Camel Algorithm.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tcaravan (numpy.ndarray[Camel]): Current population of Camels.\n\t\t\tfcaravan (numpy.ndarray[float]): Current population fitness/function values.\n\t\t\tcb (Camel): Current best Camel.\n\t\t\tfcb (float): Current best Camel fitness/function value.\n\t\t\targs (list): Additional arguments.\n\t\t\tdparams (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:\n\t\t\t\t1. New population.\n\t\t\t\t2. New population function/fitness value.\n\t\t\t\t3. New global best solution.\n\t\t\t\t4. New global best fitness/objective value.\n\t\t\t\t5. Additional arguments.\n\t\t\t\t6. Additional keyword arguments.\n\t\t' ncaravan = objects2array([self.walk(c, cb, task) for c in caravan]) ncaravan = objects2array([self.oasis(c, self.rand(), self.alpha) for c in ncaravan]) ncaravan = objects2array([self.life_cycle(c, self.mu, task) for c in ncaravan]) fncaravan = np.asarray([c.f for c in ncaravan]) (cb, fcb) = self.get_best(ncaravan, fncaravan, cb, fcb) return (ncaravan, fncaravan, cb, fcb, args, dparams)
Core function of Camel Algorithm. Args: task (Task): Optimization task. caravan (numpy.ndarray[Camel]): Current population of Camels. fcaravan (numpy.ndarray[float]): Current population fitness/function values. cb (Camel): Current best Camel. fcb (float): Current best Camel fitness/function value. args (list): Additional arguments. dparams (dict): Additional keyword arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]: 1. New population. 2. New population function/fitness value. 3. New global best solution. 4. New global best fitness/objective value. 5. Additional arguments. 6. Additional keyword arguments.
WeOptPy/algorithms/ca.py
run_iteration
kb2623/WeOptPy
1
python
def run_iteration(self, task, caravan, fcaravan, cb, fcb, *args, **dparams): 'Core function of Camel Algorithm.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tcaravan (numpy.ndarray[Camel]): Current population of Camels.\n\t\t\tfcaravan (numpy.ndarray[float]): Current population fitness/function values.\n\t\t\tcb (Camel): Current best Camel.\n\t\t\tfcb (float): Current best Camel fitness/function value.\n\t\t\targs (list): Additional arguments.\n\t\t\tdparams (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:\n\t\t\t\t1. New population.\n\t\t\t\t2. New population function/fitness value.\n\t\t\t\t3. New global best solution.\n\t\t\t\t4. New global best fitness/objective value.\n\t\t\t\t5. Additional arguments.\n\t\t\t\t6. Additional keyword arguments.\n\t\t' ncaravan = objects2array([self.walk(c, cb, task) for c in caravan]) ncaravan = objects2array([self.oasis(c, self.rand(), self.alpha) for c in ncaravan]) ncaravan = objects2array([self.life_cycle(c, self.mu, task) for c in ncaravan]) fncaravan = np.asarray([c.f for c in ncaravan]) (cb, fcb) = self.get_best(ncaravan, fncaravan, cb, fcb) return (ncaravan, fncaravan, cb, fcb, args, dparams)
def run_iteration(self, task, caravan, fcaravan, cb, fcb, *args, **dparams): 'Core function of Camel Algorithm.\n\n\t\tArgs:\n\t\t\ttask (Task): Optimization task.\n\t\t\tcaravan (numpy.ndarray[Camel]): Current population of Camels.\n\t\t\tfcaravan (numpy.ndarray[float]): Current population fitness/function values.\n\t\t\tcb (Camel): Current best Camel.\n\t\t\tfcb (float): Current best Camel fitness/function value.\n\t\t\targs (list): Additional arguments.\n\t\t\tdparams (dict): Additional keyword arguments.\n\n\t\tReturns:\n\t\t\tTuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]:\n\t\t\t\t1. New population.\n\t\t\t\t2. New population function/fitness value.\n\t\t\t\t3. New global best solution.\n\t\t\t\t4. New global best fitness/objective value.\n\t\t\t\t5. Additional arguments.\n\t\t\t\t6. Additional keyword arguments.\n\t\t' ncaravan = objects2array([self.walk(c, cb, task) for c in caravan]) ncaravan = objects2array([self.oasis(c, self.rand(), self.alpha) for c in ncaravan]) ncaravan = objects2array([self.life_cycle(c, self.mu, task) for c in ncaravan]) fncaravan = np.asarray([c.f for c in ncaravan]) (cb, fcb) = self.get_best(ncaravan, fncaravan, cb, fcb) return (ncaravan, fncaravan, cb, fcb, args, dparams)<|docstring|>Core function of Camel Algorithm. Args: task (Task): Optimization task. caravan (numpy.ndarray[Camel]): Current population of Camels. fcaravan (numpy.ndarray[float]): Current population fitness/function values. cb (Camel): Current best Camel. fcb (float): Current best Camel fitness/function value. args (list): Additional arguments. dparams (dict): Additional keyword arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]: 1. New population. 2. New population function/fitness value. 3. New global best solution. 4. New global best fitness/objective value. 5. Additional arguments. 6. Additional keyword arguments.<|endoftext|>
0b74ed0384c73458f8f26ddb06ecbbd6a5b9269f9db9f0049fbdd4b6986d0f11
def swap_case(string: str) -> str: '\n >>> swap_case(\'HackerRank.com presents "Pythonist 2".\')\n \'hACKERrANK.COM PRESENTS "pYTHONIST 2".\'\n ' return string.swapcase()
>>> swap_case('HackerRank.com presents "Pythonist 2".') 'hACKERrANK.COM PRESENTS "pYTHONIST 2".'
python/easy/strings/swap_case.py
swap_case
Razor-87/hackerrank
0
python
def swap_case(string: str) -> str: '\n >>> swap_case(\'HackerRank.com presents "Pythonist 2".\')\n \'hACKERrANK.COM PRESENTS "pYTHONIST 2".\'\n ' return string.swapcase()
def swap_case(string: str) -> str: '\n >>> swap_case(\'HackerRank.com presents "Pythonist 2".\')\n \'hACKERrANK.COM PRESENTS "pYTHONIST 2".\'\n ' return string.swapcase()<|docstring|>>>> swap_case('HackerRank.com presents "Pythonist 2".') 'hACKERrANK.COM PRESENTS "pYTHONIST 2".'<|endoftext|>
fd57a070f97a0c874d6bd922760750354e1ec459b7958f2029eb0c35a002784a
async def async_get_config_entry_diagnostics(hass: HomeAssistant, entry: ConfigEntry) -> dict: 'Return diagnostics the Tradfri platform.' entry_data = hass.data[DOMAIN][entry.entry_id] coordinator_data = entry_data[COORDINATOR] device_registry = dr.async_get(hass) device = cast(dr.DeviceEntry, device_registry.async_get_device(identifiers={(DOMAIN, entry.data[CONF_GATEWAY_ID])})) device_data: list = [] for coordinator in coordinator_data[COORDINATOR_LIST]: device_data.append(coordinator.device.device_info.model_number) return {'gateway_version': device.sw_version, 'device_data': sorted(device_data)}
Return diagnostics the Tradfri platform.
homeassistant/components/tradfri/diagnostics.py
async_get_config_entry_diagnostics
a-p-z/core
30,023
python
async def async_get_config_entry_diagnostics(hass: HomeAssistant, entry: ConfigEntry) -> dict: entry_data = hass.data[DOMAIN][entry.entry_id] coordinator_data = entry_data[COORDINATOR] device_registry = dr.async_get(hass) device = cast(dr.DeviceEntry, device_registry.async_get_device(identifiers={(DOMAIN, entry.data[CONF_GATEWAY_ID])})) device_data: list = [] for coordinator in coordinator_data[COORDINATOR_LIST]: device_data.append(coordinator.device.device_info.model_number) return {'gateway_version': device.sw_version, 'device_data': sorted(device_data)}
async def async_get_config_entry_diagnostics(hass: HomeAssistant, entry: ConfigEntry) -> dict: entry_data = hass.data[DOMAIN][entry.entry_id] coordinator_data = entry_data[COORDINATOR] device_registry = dr.async_get(hass) device = cast(dr.DeviceEntry, device_registry.async_get_device(identifiers={(DOMAIN, entry.data[CONF_GATEWAY_ID])})) device_data: list = [] for coordinator in coordinator_data[COORDINATOR_LIST]: device_data.append(coordinator.device.device_info.model_number) return {'gateway_version': device.sw_version, 'device_data': sorted(device_data)}<|docstring|>Return diagnostics the Tradfri platform.<|endoftext|>
fe2c61b1efc500ee7b862baeb0c783e4affc147ee353458318c91f8617394f61
def get_parser(): 'Initialize the parser for the command line interface and bind the\n autocompletion functionality' parser = argparse.ArgumentParser(description=('Command line tool for extracting text from any document. ' % locals())) parser.add_argument('filename', help='Filename to extract text.').completer = argcomplete.completers.FilesCompleter parser.add_argument('-e', '--encoding', type=str, default=DEFAULT_ENCODING, choices=_get_available_encodings(), help='Specify the encoding of the output.') parser.add_argument('-m', '--method', default='', help='specify a method of extraction for formats that support it') parser.add_argument('-o', '--output', type=argparse.FileType('w'), default='-', help='output raw text in this file') parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) argcomplete.autocomplete(parser) return parser
Initialize the parser for the command line interface and bind the autocompletion functionality
textract/cli.py
get_parser
anderser/textract
2
python
def get_parser(): 'Initialize the parser for the command line interface and bind the\n autocompletion functionality' parser = argparse.ArgumentParser(description=('Command line tool for extracting text from any document. ' % locals())) parser.add_argument('filename', help='Filename to extract text.').completer = argcomplete.completers.FilesCompleter parser.add_argument('-e', '--encoding', type=str, default=DEFAULT_ENCODING, choices=_get_available_encodings(), help='Specify the encoding of the output.') parser.add_argument('-m', '--method', default=, help='specify a method of extraction for formats that support it') parser.add_argument('-o', '--output', type=argparse.FileType('w'), default='-', help='output raw text in this file') parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) argcomplete.autocomplete(parser) return parser
def get_parser(): 'Initialize the parser for the command line interface and bind the\n autocompletion functionality' parser = argparse.ArgumentParser(description=('Command line tool for extracting text from any document. ' % locals())) parser.add_argument('filename', help='Filename to extract text.').completer = argcomplete.completers.FilesCompleter parser.add_argument('-e', '--encoding', type=str, default=DEFAULT_ENCODING, choices=_get_available_encodings(), help='Specify the encoding of the output.') parser.add_argument('-m', '--method', default=, help='specify a method of extraction for formats that support it') parser.add_argument('-o', '--output', type=argparse.FileType('w'), default='-', help='output raw text in this file') parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) argcomplete.autocomplete(parser) return parser<|docstring|>Initialize the parser for the command line interface and bind the autocompletion functionality<|endoftext|>
f49eb24d9e5b54870d757d96e75a26303c4dd7ee765da504ff179f36bb80809d
def _get_available_encodings(): 'Get a list of the available encodings to make it easy to\n tab-complete the command line interface.\n\n Inspiration from http://stackoverflow.com/a/3824405/564709\n ' available_encodings = set(encodings.aliases.aliases.values()) paths = [os.path.dirname(encodings.__file__)] for (importer, modname, ispkg) in pkgutil.walk_packages(path=paths): available_encodings.add(modname) available_encodings = list(available_encodings) available_encodings.sort() return available_encodings
Get a list of the available encodings to make it easy to tab-complete the command line interface. Inspiration from http://stackoverflow.com/a/3824405/564709
textract/cli.py
_get_available_encodings
anderser/textract
2
python
def _get_available_encodings(): 'Get a list of the available encodings to make it easy to\n tab-complete the command line interface.\n\n Inspiration from http://stackoverflow.com/a/3824405/564709\n ' available_encodings = set(encodings.aliases.aliases.values()) paths = [os.path.dirname(encodings.__file__)] for (importer, modname, ispkg) in pkgutil.walk_packages(path=paths): available_encodings.add(modname) available_encodings = list(available_encodings) available_encodings.sort() return available_encodings
def _get_available_encodings(): 'Get a list of the available encodings to make it easy to\n tab-complete the command line interface.\n\n Inspiration from http://stackoverflow.com/a/3824405/564709\n ' available_encodings = set(encodings.aliases.aliases.values()) paths = [os.path.dirname(encodings.__file__)] for (importer, modname, ispkg) in pkgutil.walk_packages(path=paths): available_encodings.add(modname) available_encodings = list(available_encodings) available_encodings.sort() return available_encodings<|docstring|>Get a list of the available encodings to make it easy to tab-complete the command line interface. Inspiration from http://stackoverflow.com/a/3824405/564709<|endoftext|>
7eebfca43e7093dfc714e17b453c1df32dc00a55b4a4d53d8e6ad7fff5c8a451
def DictKeys(keys): '\n Checks if dict has all given keys\n\n :param keys:\n :type keys:\n\n >>> _dd(DictKeys([\'a\',\'b\']).check({\'a\':1,\'b\':2,}))\n "{\'a\': 1, \'b\': 2}"\n >>> extract_error(DictKeys([\'a\',\'b\']), {\'a\':1,\'b\':2,\'c\':3,})\n {\'c\': \'c is not allowed key\'}\n >>> extract_error(DictKeys([\'key\',\'key2\']), {\'key\':\'val\'})\n {\'key2\': \'is required\'}\n ' def MissingKey(val): raise DataError(('%s is not in Dict' % val)) req = [(Key(key), Any) for key in keys] return Dict(dict(req))
Checks if dict has all given keys :param keys: :type keys: >>> _dd(DictKeys(['a','b']).check({'a':1,'b':2,})) "{'a': 1, 'b': 2}" >>> extract_error(DictKeys(['a','b']), {'a':1,'b':2,'c':3,}) {'c': 'c is not allowed key'} >>> extract_error(DictKeys(['key','key2']), {'key':'val'}) {'key2': 'is required'}
trafaret/__init__.py
DictKeys
asvetlov/trafaret
0
python
def DictKeys(keys): '\n Checks if dict has all given keys\n\n :param keys:\n :type keys:\n\n >>> _dd(DictKeys([\'a\',\'b\']).check({\'a\':1,\'b\':2,}))\n "{\'a\': 1, \'b\': 2}"\n >>> extract_error(DictKeys([\'a\',\'b\']), {\'a\':1,\'b\':2,\'c\':3,})\n {\'c\': \'c is not allowed key\'}\n >>> extract_error(DictKeys([\'key\',\'key2\']), {\'key\':\'val\'})\n {\'key2\': \'is required\'}\n ' def MissingKey(val): raise DataError(('%s is not in Dict' % val)) req = [(Key(key), Any) for key in keys] return Dict(dict(req))
def DictKeys(keys): '\n Checks if dict has all given keys\n\n :param keys:\n :type keys:\n\n >>> _dd(DictKeys([\'a\',\'b\']).check({\'a\':1,\'b\':2,}))\n "{\'a\': 1, \'b\': 2}"\n >>> extract_error(DictKeys([\'a\',\'b\']), {\'a\':1,\'b\':2,\'c\':3,})\n {\'c\': \'c is not allowed key\'}\n >>> extract_error(DictKeys([\'key\',\'key2\']), {\'key\':\'val\'})\n {\'key2\': \'is required\'}\n ' def MissingKey(val): raise DataError(('%s is not in Dict' % val)) req = [(Key(key), Any) for key in keys] return Dict(dict(req))<|docstring|>Checks if dict has all given keys :param keys: :type keys: >>> _dd(DictKeys(['a','b']).check({'a':1,'b':2,})) "{'a': 1, 'b': 2}" >>> extract_error(DictKeys(['a','b']), {'a':1,'b':2,'c':3,}) {'c': 'c is not allowed key'} >>> extract_error(DictKeys(['key','key2']), {'key':'val'}) {'key2': 'is required'}<|endoftext|>
7a797297f33725f97e935c8918dd2b196f6777f732aa580659486072c66308d3
def guard(trafaret=None, **kwargs): '\n Decorator for protecting function with trafarets\n\n >>> @guard(a=String, b=Int, c=String)\n ... def fn(a, b, c="default"):\n ... \'\'\'docstring\'\'\'\n ... return (a, b, c)\n ...\n >>> fn.__module__ = None\n >>> help(fn)\n Help on function fn:\n <BLANKLINE>\n fn(*args, **kwargs)\n guarded with <Dict(a=<String>, b=<Int>, c=<String>)>\n <BLANKLINE>\n docstring\n <BLANKLINE>\n >>> fn("foo", 1)\n (\'foo\', 1, \'default\')\n >>> extract_error(fn, "foo", 1, 2)\n {\'c\': \'value is not a string\'}\n >>> extract_error(fn, "foo")\n {\'b\': \'is required\'}\n >>> g = guard(Dict())\n >>> c = Forward()\n >>> c << Dict(name=str, children=List[c])\n >>> g = guard(c)\n >>> g = guard(Int())\n Traceback (most recent call last):\n ...\n RuntimeError: trafaret should be instance of Dict or Forward\n ' if (trafaret and (not isinstance(trafaret, Dict)) and (not isinstance(trafaret, Forward))): raise RuntimeError('trafaret should be instance of Dict or Forward') elif (trafaret and kwargs): raise RuntimeError('choose one way of initialization, trafaret or kwargs') if (not trafaret): trafaret = Dict(**kwargs) def wrapper(fn): argspec = inspect.getargspec(fn) @functools.wraps(fn) def decor(*args, **kwargs): fnargs = argspec.args if (fnargs[0] in ['self', 'cls']): fnargs = fnargs[1:] checkargs = args[1:] else: checkargs = args try: call_args = dict(itertools.chain(zip(fnargs, checkargs), kwargs.items())) for (name, default) in zip(reversed(fnargs), (argspec.defaults or ())): if (name not in call_args): call_args[name] = default converted = trafaret.check(call_args) except DataError as err: raise GuardError(error=err.error) return fn(**converted) decor.__doc__ = (('guarded with %r\n\n' % trafaret) + (decor.__doc__ or '')) return decor return wrapper
Decorator for protecting function with trafarets >>> @guard(a=String, b=Int, c=String) ... def fn(a, b, c="default"): ... '''docstring''' ... return (a, b, c) ... >>> fn.__module__ = None >>> help(fn) Help on function fn: <BLANKLINE> fn(*args, **kwargs) guarded with <Dict(a=<String>, b=<Int>, c=<String>)> <BLANKLINE> docstring <BLANKLINE> >>> fn("foo", 1) ('foo', 1, 'default') >>> extract_error(fn, "foo", 1, 2) {'c': 'value is not a string'} >>> extract_error(fn, "foo") {'b': 'is required'} >>> g = guard(Dict()) >>> c = Forward() >>> c << Dict(name=str, children=List[c]) >>> g = guard(c) >>> g = guard(Int()) Traceback (most recent call last): ... RuntimeError: trafaret should be instance of Dict or Forward
trafaret/__init__.py
guard
asvetlov/trafaret
0
python
def guard(trafaret=None, **kwargs): '\n Decorator for protecting function with trafarets\n\n >>> @guard(a=String, b=Int, c=String)\n ... def fn(a, b, c="default"):\n ... \'\'\'docstring\'\'\'\n ... return (a, b, c)\n ...\n >>> fn.__module__ = None\n >>> help(fn)\n Help on function fn:\n <BLANKLINE>\n fn(*args, **kwargs)\n guarded with <Dict(a=<String>, b=<Int>, c=<String>)>\n <BLANKLINE>\n docstring\n <BLANKLINE>\n >>> fn("foo", 1)\n (\'foo\', 1, \'default\')\n >>> extract_error(fn, "foo", 1, 2)\n {\'c\': \'value is not a string\'}\n >>> extract_error(fn, "foo")\n {\'b\': \'is required\'}\n >>> g = guard(Dict())\n >>> c = Forward()\n >>> c << Dict(name=str, children=List[c])\n >>> g = guard(c)\n >>> g = guard(Int())\n Traceback (most recent call last):\n ...\n RuntimeError: trafaret should be instance of Dict or Forward\n ' if (trafaret and (not isinstance(trafaret, Dict)) and (not isinstance(trafaret, Forward))): raise RuntimeError('trafaret should be instance of Dict or Forward') elif (trafaret and kwargs): raise RuntimeError('choose one way of initialization, trafaret or kwargs') if (not trafaret): trafaret = Dict(**kwargs) def wrapper(fn): argspec = inspect.getargspec(fn) @functools.wraps(fn) def decor(*args, **kwargs): fnargs = argspec.args if (fnargs[0] in ['self', 'cls']): fnargs = fnargs[1:] checkargs = args[1:] else: checkargs = args try: call_args = dict(itertools.chain(zip(fnargs, checkargs), kwargs.items())) for (name, default) in zip(reversed(fnargs), (argspec.defaults or ())): if (name not in call_args): call_args[name] = default converted = trafaret.check(call_args) except DataError as err: raise GuardError(error=err.error) return fn(**converted) decor.__doc__ = (('guarded with %r\n\n' % trafaret) + (decor.__doc__ or )) return decor return wrapper
def guard(trafaret=None, **kwargs): '\n Decorator for protecting function with trafarets\n\n >>> @guard(a=String, b=Int, c=String)\n ... def fn(a, b, c="default"):\n ... \'\'\'docstring\'\'\'\n ... return (a, b, c)\n ...\n >>> fn.__module__ = None\n >>> help(fn)\n Help on function fn:\n <BLANKLINE>\n fn(*args, **kwargs)\n guarded with <Dict(a=<String>, b=<Int>, c=<String>)>\n <BLANKLINE>\n docstring\n <BLANKLINE>\n >>> fn("foo", 1)\n (\'foo\', 1, \'default\')\n >>> extract_error(fn, "foo", 1, 2)\n {\'c\': \'value is not a string\'}\n >>> extract_error(fn, "foo")\n {\'b\': \'is required\'}\n >>> g = guard(Dict())\n >>> c = Forward()\n >>> c << Dict(name=str, children=List[c])\n >>> g = guard(c)\n >>> g = guard(Int())\n Traceback (most recent call last):\n ...\n RuntimeError: trafaret should be instance of Dict or Forward\n ' if (trafaret and (not isinstance(trafaret, Dict)) and (not isinstance(trafaret, Forward))): raise RuntimeError('trafaret should be instance of Dict or Forward') elif (trafaret and kwargs): raise RuntimeError('choose one way of initialization, trafaret or kwargs') if (not trafaret): trafaret = Dict(**kwargs) def wrapper(fn): argspec = inspect.getargspec(fn) @functools.wraps(fn) def decor(*args, **kwargs): fnargs = argspec.args if (fnargs[0] in ['self', 'cls']): fnargs = fnargs[1:] checkargs = args[1:] else: checkargs = args try: call_args = dict(itertools.chain(zip(fnargs, checkargs), kwargs.items())) for (name, default) in zip(reversed(fnargs), (argspec.defaults or ())): if (name not in call_args): call_args[name] = default converted = trafaret.check(call_args) except DataError as err: raise GuardError(error=err.error) return fn(**converted) decor.__doc__ = (('guarded with %r\n\n' % trafaret) + (decor.__doc__ or )) return decor return wrapper<|docstring|>Decorator for protecting function with trafarets >>> @guard(a=String, b=Int, c=String) ... def fn(a, b, c="default"): ... '''docstring''' ... return (a, b, c) ... >>> fn.__module__ = None >>> help(fn) Help on function fn: <BLANKLINE> fn(*args, **kwargs) guarded with <Dict(a=<String>, b=<Int>, c=<String>)> <BLANKLINE> docstring <BLANKLINE> >>> fn("foo", 1) ('foo', 1, 'default') >>> extract_error(fn, "foo", 1, 2) {'c': 'value is not a string'} >>> extract_error(fn, "foo") {'b': 'is required'} >>> g = guard(Dict()) >>> c = Forward() >>> c << Dict(name=str, children=List[c]) >>> g = guard(c) >>> g = guard(Int()) Traceback (most recent call last): ... RuntimeError: trafaret should be instance of Dict or Forward<|endoftext|>
42aba558e0308276ea7698bab26b03d49ad7a38e9155772c30e40df4d447b302
def ignore(val): '\n Stub to ignore value from trafaret\n Use it like:\n\n >>> a = Int >> ignore\n >>> a.check(7)\n ' pass
Stub to ignore value from trafaret Use it like: >>> a = Int >> ignore >>> a.check(7)
trafaret/__init__.py
ignore
asvetlov/trafaret
0
python
def ignore(val): '\n Stub to ignore value from trafaret\n Use it like:\n\n >>> a = Int >> ignore\n >>> a.check(7)\n ' pass
def ignore(val): '\n Stub to ignore value from trafaret\n Use it like:\n\n >>> a = Int >> ignore\n >>> a.check(7)\n ' pass<|docstring|>Stub to ignore value from trafaret Use it like: >>> a = Int >> ignore >>> a.check(7)<|endoftext|>
4d4b63dbfd3139cf4dbe3d682ae3500a6afd4bbddba3647f9c0705927d1fb244
def catch_error(checker, *a, **kw): '\n Helper for tests - catch error and return it as dict\n ' try: if hasattr(checker, 'check'): return checker.check(*a, **kw) elif callable(checker): return checker(*a, **kw) except DataError as error: return error
Helper for tests - catch error and return it as dict
trafaret/__init__.py
catch_error
asvetlov/trafaret
0
python
def catch_error(checker, *a, **kw): '\n \n ' try: if hasattr(checker, 'check'): return checker.check(*a, **kw) elif callable(checker): return checker(*a, **kw) except DataError as error: return error
def catch_error(checker, *a, **kw): '\n \n ' try: if hasattr(checker, 'check'): return checker.check(*a, **kw) elif callable(checker): return checker(*a, **kw) except DataError as error: return error<|docstring|>Helper for tests - catch error and return it as dict<|endoftext|>
9b31e8f883abf9e1da74512ca8f6571c14965994b821d57dc5190976113e926a
def extract_error(checker, *a, **kw): '\n Helper for tests - catch error and return it as dict\n ' res = catch_error(checker, *a, **kw) if isinstance(res, DataError): return res.as_dict() return res
Helper for tests - catch error and return it as dict
trafaret/__init__.py
extract_error
asvetlov/trafaret
0
python
def extract_error(checker, *a, **kw): '\n \n ' res = catch_error(checker, *a, **kw) if isinstance(res, DataError): return res.as_dict() return res
def extract_error(checker, *a, **kw): '\n \n ' res = catch_error(checker, *a, **kw) if isinstance(res, DataError): return res.as_dict() return res<|docstring|>Helper for tests - catch error and return it as dict<|endoftext|>
7e9cc4fda779af52ca65806a5a6e4c9f417516f2658675f378507c10948a267a
def check(self, value): '\n Common logic. In subclasses you need to implement check_value or\n check_and_return.\n ' if hasattr(self, 'check_value'): self.check_value(value) return self._convert(value) if hasattr(self, 'check_and_return'): return self._convert(self.check_and_return(value)) cls = ('%s.%s' % (type(self).__module__, type(self).__name__)) raise NotImplementedError(("You must implement check_value or check_and_return methods '%s'" % cls))
Common logic. In subclasses you need to implement check_value or check_and_return.
trafaret/__init__.py
check
asvetlov/trafaret
0
python
def check(self, value): '\n Common logic. In subclasses you need to implement check_value or\n check_and_return.\n ' if hasattr(self, 'check_value'): self.check_value(value) return self._convert(value) if hasattr(self, 'check_and_return'): return self._convert(self.check_and_return(value)) cls = ('%s.%s' % (type(self).__module__, type(self).__name__)) raise NotImplementedError(("You must implement check_value or check_and_return methods '%s'" % cls))
def check(self, value): '\n Common logic. In subclasses you need to implement check_value or\n check_and_return.\n ' if hasattr(self, 'check_value'): self.check_value(value) return self._convert(value) if hasattr(self, 'check_and_return'): return self._convert(self.check_and_return(value)) cls = ('%s.%s' % (type(self).__module__, type(self).__name__)) raise NotImplementedError(("You must implement check_value or check_and_return methods '%s'" % cls))<|docstring|>Common logic. In subclasses you need to implement check_value or check_and_return.<|endoftext|>
353109eeb2bb2cfd59cb4d641436db34bb3ff27c111e701d4d9b1ca23c7674b7
def converter(self, value): '\n You can change converter with `>>` operator or append method\n ' return value
You can change converter with `>>` operator or append method
trafaret/__init__.py
converter
asvetlov/trafaret
0
python
def converter(self, value): '\n \n ' return value
def converter(self, value): '\n \n ' return value<|docstring|>You can change converter with `>>` operator or append method<|endoftext|>
3f354170276c06ae10ea70171495ca087abb443b9f7410961d2b8933ee8e2853
def _failure(self, error=None): '\n Shortcut method for raising validation error\n ' raise DataError(error=error)
Shortcut method for raising validation error
trafaret/__init__.py
_failure
asvetlov/trafaret
0
python
def _failure(self, error=None): '\n \n ' raise DataError(error=error)
def _failure(self, error=None): '\n \n ' raise DataError(error=error)<|docstring|>Shortcut method for raising validation error<|endoftext|>
5edb649caa822ad50c742af603a4e724d3f9c4196f1a0de91e15e4f2c4664282
def _trafaret(self, trafaret): '\n Helper for complex trafarets, takes trafaret instance or class\n and returns trafaret instance\n ' if (isinstance(trafaret, Trafaret) or inspect.isroutine(trafaret)): return trafaret elif issubclass(trafaret, Trafaret): return trafaret() elif isinstance(trafaret, type): return Type(trafaret) else: raise RuntimeError(('%r should be instance or subclass of Trafaret' % trafaret))
Helper for complex trafarets, takes trafaret instance or class and returns trafaret instance
trafaret/__init__.py
_trafaret
asvetlov/trafaret
0
python
def _trafaret(self, trafaret): '\n Helper for complex trafarets, takes trafaret instance or class\n and returns trafaret instance\n ' if (isinstance(trafaret, Trafaret) or inspect.isroutine(trafaret)): return trafaret elif issubclass(trafaret, Trafaret): return trafaret() elif isinstance(trafaret, type): return Type(trafaret) else: raise RuntimeError(('%r should be instance or subclass of Trafaret' % trafaret))
def _trafaret(self, trafaret): '\n Helper for complex trafarets, takes trafaret instance or class\n and returns trafaret instance\n ' if (isinstance(trafaret, Trafaret) or inspect.isroutine(trafaret)): return trafaret elif issubclass(trafaret, Trafaret): return trafaret() elif isinstance(trafaret, type): return Type(trafaret) else: raise RuntimeError(('%r should be instance or subclass of Trafaret' % trafaret))<|docstring|>Helper for complex trafarets, takes trafaret instance or class and returns trafaret instance<|endoftext|>
438d04374fc898bba2dea6285e88c20e7302d015ffb18bde71aceb43d72870c7
def append(self, converter): '\n Appends new converter to list.\n ' if hasattr(self, 'converters'): self.converters.append(converter) else: self.converters = [converter] return self
Appends new converter to list.
trafaret/__init__.py
append
asvetlov/trafaret
0
python
def append(self, converter): '\n \n ' if hasattr(self, 'converters'): self.converters.append(converter) else: self.converters = [converter] return self
def append(self, converter): '\n \n ' if hasattr(self, 'converters'): self.converters.append(converter) else: self.converters = [converter] return self<|docstring|>Appends new converter to list.<|endoftext|>
5cebf9ef1d52e3d786ba88606fac0517c6d45af09b562f9af21eadf2b6201404
def setUp(self): 'Initialise variables and mock functions.' super(CreateCloneVolumeTestCase, self).setUp() self.dest_volume = fake_volume.fake_volume_obj(self.ctx) mock.patch.object(self.requester, 'volumeClone', self.clone_request).start() mock.patch.object(self.requester, 'cloneDetail', self.clone_detail_request).start() mock.patch.object(self.requester, 'volumeDetailByName', self.volume_detail_request).start() self.volume_detail_response = {'status': 0, 'volumeInfoResult': {'volumeId': 1234567}} clone_success = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_pending = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_response_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_success['result'] = six.text_type(self.DETAIL_OPTIONS['success']) clone_pending['result'] = six.text_type(self.DETAIL_OPTIONS['pending']) clone_fail['result'] = six.text_type(self.DETAIL_OPTIONS['failure']) clone_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['clone_detail'] = {'success': clone_success, 'fail': clone_fail, 'pending': clone_pending, 'request_fail': clone_response_fail} self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response['result'] = '1234' self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['success'] self.test_pending = False self.test_pending_count = 0
Initialise variables and mock functions.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
setUp
bswartz/cinder
11
python
def setUp(self): super(CreateCloneVolumeTestCase, self).setUp() self.dest_volume = fake_volume.fake_volume_obj(self.ctx) mock.patch.object(self.requester, 'volumeClone', self.clone_request).start() mock.patch.object(self.requester, 'cloneDetail', self.clone_detail_request).start() mock.patch.object(self.requester, 'volumeDetailByName', self.volume_detail_request).start() self.volume_detail_response = {'status': 0, 'volumeInfoResult': {'volumeId': 1234567}} clone_success = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_pending = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_response_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_success['result'] = six.text_type(self.DETAIL_OPTIONS['success']) clone_pending['result'] = six.text_type(self.DETAIL_OPTIONS['pending']) clone_fail['result'] = six.text_type(self.DETAIL_OPTIONS['failure']) clone_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['clone_detail'] = {'success': clone_success, 'fail': clone_fail, 'pending': clone_pending, 'request_fail': clone_response_fail} self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response['result'] = '1234' self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['success'] self.test_pending = False self.test_pending_count = 0
def setUp(self): super(CreateCloneVolumeTestCase, self).setUp() self.dest_volume = fake_volume.fake_volume_obj(self.ctx) mock.patch.object(self.requester, 'volumeClone', self.clone_request).start() mock.patch.object(self.requester, 'cloneDetail', self.clone_detail_request).start() mock.patch.object(self.requester, 'volumeDetailByName', self.volume_detail_request).start() self.volume_detail_response = {'status': 0, 'volumeInfoResult': {'volumeId': 1234567}} clone_success = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_pending = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_response_fail = copy.deepcopy(self.FAKE_SOAP_RESPONSE['standard']['success']) clone_success['result'] = six.text_type(self.DETAIL_OPTIONS['success']) clone_pending['result'] = six.text_type(self.DETAIL_OPTIONS['pending']) clone_fail['result'] = six.text_type(self.DETAIL_OPTIONS['failure']) clone_response_fail['status'] = 1 self.FAKE_SOAP_RESPONSE['clone_detail'] = {'success': clone_success, 'fail': clone_fail, 'pending': clone_pending, 'request_fail': clone_response_fail} self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response['result'] = '1234' self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['success'] self.test_pending = False self.test_pending_count = 0<|docstring|>Initialise variables and mock functions.<|endoftext|>
a29401fefec5c58ff440d735f607951a906fe229328b2cf894f2f586995ecda9
def clone_request(self, *cmd, **kwargs): 'Mock function for the createVolumeFromSnapshot function.' return self.response
Mock function for the createVolumeFromSnapshot function.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
clone_request
bswartz/cinder
11
python
def clone_request(self, *cmd, **kwargs): return self.response
def clone_request(self, *cmd, **kwargs): return self.response<|docstring|>Mock function for the createVolumeFromSnapshot function.<|endoftext|>
aab8698e64fa225d85826399d27b8de85736c27d3b86063b7ff69aed4351e53e
def clone_detail_request(self, *cmd, **kwargs): 'Mock function for the restoreDetail function.' if self.test_pending: if (self.test_pending_count == 0): self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['clone_detail']['success'] else: return self.response_detail
Mock function for the restoreDetail function.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
clone_detail_request
bswartz/cinder
11
python
def clone_detail_request(self, *cmd, **kwargs): if self.test_pending: if (self.test_pending_count == 0): self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['clone_detail']['success'] else: return self.response_detail
def clone_detail_request(self, *cmd, **kwargs): if self.test_pending: if (self.test_pending_count == 0): self.test_pending_count += 1 return self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] else: return self.FAKE_SOAP_RESPONSE['clone_detail']['success'] else: return self.response_detail<|docstring|>Mock function for the restoreDetail function.<|endoftext|>
fb41bdebf7c9261cb770fe9f9779f8d7a599818edc5cdf8e5a664267e054e92c
def volume_detail_request(self, *cmd, **kwargs): 'Mock function for the volumeDetail function.' return self.volume_detail_response
Mock function for the volumeDetail function.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
volume_detail_request
bswartz/cinder
11
python
def volume_detail_request(self, *cmd, **kwargs): return self.volume_detail_response
def volume_detail_request(self, *cmd, **kwargs): return self.volume_detail_response<|docstring|>Mock function for the volumeDetail function.<|endoftext|>
8984fb4980e3ac88ee283b834c92a54b1a4b1b2f4b2e1b6aadb83a9f798cf171
def test_create_cloned_volume(self): 'Normal case.' expected = 1234567 actual = self.driver.create_cloned_volume(self.dest_volume, self.volume) self.assertEqual(expected, actual['provider_location'])
Normal case.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume
bswartz/cinder
11
python
def test_create_cloned_volume(self): expected = 1234567 actual = self.driver.create_cloned_volume(self.dest_volume, self.volume) self.assertEqual(expected, actual['provider_location'])
def test_create_cloned_volume(self): expected = 1234567 actual = self.driver.create_cloned_volume(self.dest_volume, self.volume) self.assertEqual(expected, actual['provider_location'])<|docstring|>Normal case.<|endoftext|>
4726ebc3cfe3198edbff32a4a8846d11eb4f6de65b4b9027c2fd2fee37701905
def test_create_clone_volume_fail(self): 'Clone volume request to DISCO fails.' self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
Clone volume request to DISCO fails.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_clone_volume_fail
bswartz/cinder
11
python
def test_create_clone_volume_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
def test_create_clone_volume_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)<|docstring|>Clone volume request to DISCO fails.<|endoftext|>
da288db297962b4532592096be7decbf880847b8e294607f8c46fd3a9c5e80f1
def test_create_cloned_volume_fail_not_immediate(self): 'Get clone detail returns that the clone fails.' self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
Get clone detail returns that the clone fails.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume_fail_not_immediate
bswartz/cinder
11
python
def test_create_cloned_volume_fail_not_immediate(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
def test_create_cloned_volume_fail_not_immediate(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)<|docstring|>Get clone detail returns that the clone fails.<|endoftext|>
8c2543e46b29ef2c9a7852ef57edee97496e116caa1c60acc8dfe659d8305cfc
def test_create_cloned_volume_fail_not_immediate_response_fail(self): 'Get clone detail request to DISCO fails.' self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['request_fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
Get clone detail request to DISCO fails.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume_fail_not_immediate_response_fail
bswartz/cinder
11
python
def test_create_cloned_volume_fail_not_immediate_response_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['request_fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
def test_create_cloned_volume_fail_not_immediate_response_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['request_fail'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)<|docstring|>Get clone detail request to DISCO fails.<|endoftext|>
faaaf040c57d5e6e71af22f32ab798cc6b5de085d396115ca1c52278e52358d1
def test_create_cloned_volume_fail_not_immediate_request_fail(self): 'Get clone detail returns the task is pending then complete.' self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_cloned_volume()
Get clone detail returns the task is pending then complete.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume_fail_not_immediate_request_fail
bswartz/cinder
11
python
def test_create_cloned_volume_fail_not_immediate_request_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_cloned_volume()
def test_create_cloned_volume_fail_not_immediate_request_fail(self): self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.test_pending = True self.test_create_cloned_volume()<|docstring|>Get clone detail returns the task is pending then complete.<|endoftext|>
a2fe240aa0af58bf41f17d46995d10f989f7254cc9289f0f9351bf4cee2b2ea9
@mock.patch.object(time, 'time') def test_create_cloned_volume_timeout(self, mock_time): 'Clone request timeout.' timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.clone_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
Clone request timeout.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume_timeout
bswartz/cinder
11
python
@mock.patch.object(time, 'time') def test_create_cloned_volume_timeout(self, mock_time): timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.clone_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
@mock.patch.object(time, 'time') def test_create_cloned_volume_timeout(self, mock_time): timeout = 3 mock_time.side_effect = utils.generate_timeout_series(timeout) self.driver.configuration.clone_check_timeout = timeout self.response = self.FAKE_SOAP_RESPONSE['standard']['success'] self.response_detail = self.FAKE_SOAP_RESPONSE['clone_detail']['pending'] self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)<|docstring|>Clone request timeout.<|endoftext|>
136a69edf12dac1616a153afc488423e8c3604771f06c15f14f5b7cac043ff77
def test_create_cloned_volume_volume_detail_fail(self): 'Get volume detail request to DISCO fails.' self.volume_detail_response['status'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
Get volume detail request to DISCO fails.
cinder/tests/unit/volume/drivers/disco/test_create_cloned_volume.py
test_create_cloned_volume_volume_detail_fail
bswartz/cinder
11
python
def test_create_cloned_volume_volume_detail_fail(self): self.volume_detail_response['status'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)
def test_create_cloned_volume_volume_detail_fail(self): self.volume_detail_response['status'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.test_create_cloned_volume)<|docstring|>Get volume detail request to DISCO fails.<|endoftext|>
51746fa35b2076c3024d1996cba65920519adb4e9a62b6d03ca316848aa556e1
def encode(ctx: typer.Context, input: str=typer.Argument(..., help='Input data word as hex string'), n_bits: int=typer.Argument(..., help='Length of the input data word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Speciy how the parity bits are placed in the encoded message')): '\n Encode the provided input data word, which is interpreted\n as being word of the specified number of bits.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits = int(n_bits) if (n_bits < 4): raise ValueError('Cannot encode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] encoded_binary_string = hamming_codec.encode(input_data, n_bits, parity_location) encoded_int = int(encoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', '')}, size = {n_bits} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Encoded value : {hex(encoded_int)}') print(f'Encoded value (bin) : 0b{encoded_binary_string}, size = {len(encoded_binary_string)} bits') else: print(f'{hex(encoded_int)} {len(encoded_binary_string)}', file=sys.stdout)
Encode the provided input data word, which is interpreted as being word of the specified number of bits.
src/python/cli/codec_cli.py
encode
dantrim/hamming-codec
7
python
def encode(ctx: typer.Context, input: str=typer.Argument(..., help='Input data word as hex string'), n_bits: int=typer.Argument(..., help='Length of the input data word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Speciy how the parity bits are placed in the encoded message')): '\n Encode the provided input data word, which is interpreted\n as being word of the specified number of bits.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits = int(n_bits) if (n_bits < 4): raise ValueError('Cannot encode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] encoded_binary_string = hamming_codec.encode(input_data, n_bits, parity_location) encoded_int = int(encoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', )}, size = {n_bits} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Encoded value : {hex(encoded_int)}') print(f'Encoded value (bin) : 0b{encoded_binary_string}, size = {len(encoded_binary_string)} bits') else: print(f'{hex(encoded_int)} {len(encoded_binary_string)}', file=sys.stdout)
def encode(ctx: typer.Context, input: str=typer.Argument(..., help='Input data word as hex string'), n_bits: int=typer.Argument(..., help='Length of the input data word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Speciy how the parity bits are placed in the encoded message')): '\n Encode the provided input data word, which is interpreted\n as being word of the specified number of bits.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits = int(n_bits) if (n_bits < 4): raise ValueError('Cannot encode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] encoded_binary_string = hamming_codec.encode(input_data, n_bits, parity_location) encoded_int = int(encoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', )}, size = {n_bits} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Encoded value : {hex(encoded_int)}') print(f'Encoded value (bin) : 0b{encoded_binary_string}, size = {len(encoded_binary_string)} bits') else: print(f'{hex(encoded_int)} {len(encoded_binary_string)}', file=sys.stdout)<|docstring|>Encode the provided input data word, which is interpreted as being word of the specified number of bits.<|endoftext|>
de3673c1d99bca8d14020e5ec640543bdc92a1f276f5890deb00626aaac9989b
def decode(ctx: typer.Context, input: str=typer.Argument(..., help='Input message to decode as a hex string'), n_bits: int=typer.Argument(..., help='Length of the input message word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Specify how the parity bits are placed in the encoded message'), n_parity_bits: int=typer.Argument(0, help='Number of parity bits in the message (required for non-default parity location choice)')): '\n Decode the input message that is the specified number of bits in\n length.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits_input = int(n_bits) if (n_bits_input < 4): raise ValueError('Cannot decode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits_input}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] if ((n_parity_bits == 0) and (parity_location != hamming_codec.ParityLocation.DEFAULT)): raise ValueError('For non-default parity bit locations, the number of parity bits must be specified') decoded_binary_string = hamming_codec.decode(input_data, n_bits_input, parity_location, n_parity_bits) decoded_int = int(decoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', '')}, size = {n_bits_input} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Decoded value : {hex(decoded_int)}') print(f'Decoded value (bin) : 0b{decoded_binary_string}, size = {len(decoded_binary_string)} bits') else: print(f'{hex(decoded_int)} {len(decoded_binary_string)}', file=sys.stdout)
Decode the input message that is the specified number of bits in length.
src/python/cli/codec_cli.py
decode
dantrim/hamming-codec
7
python
def decode(ctx: typer.Context, input: str=typer.Argument(..., help='Input message to decode as a hex string'), n_bits: int=typer.Argument(..., help='Length of the input message word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Specify how the parity bits are placed in the encoded message'), n_parity_bits: int=typer.Argument(0, help='Number of parity bits in the message (required for non-default parity location choice)')): '\n Decode the input message that is the specified number of bits in\n length.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits_input = int(n_bits) if (n_bits_input < 4): raise ValueError('Cannot decode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits_input}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] if ((n_parity_bits == 0) and (parity_location != hamming_codec.ParityLocation.DEFAULT)): raise ValueError('For non-default parity bit locations, the number of parity bits must be specified') decoded_binary_string = hamming_codec.decode(input_data, n_bits_input, parity_location, n_parity_bits) decoded_int = int(decoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', )}, size = {n_bits_input} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Decoded value : {hex(decoded_int)}') print(f'Decoded value (bin) : 0b{decoded_binary_string}, size = {len(decoded_binary_string)} bits') else: print(f'{hex(decoded_int)} {len(decoded_binary_string)}', file=sys.stdout)
def decode(ctx: typer.Context, input: str=typer.Argument(..., help='Input message to decode as a hex string'), n_bits: int=typer.Argument(..., help='Length of the input message word in number of bits'), parity_location: ParityLocationChoices=typer.Argument('DEFAULT', case_sensitive=False, help='Specify how the parity bits are placed in the encoded message'), n_parity_bits: int=typer.Argument(0, help='Number of parity bits in the message (required for non-default parity location choice)')): '\n Decode the input message that is the specified number of bits in\n length.\n ' try: input_data = int(input, 16) except ValueError: print('Input data must be a valid hexadecimal string') sys.exit(1) n_bits_input = int(n_bits) if (n_bits_input < 4): raise ValueError('Cannot decode values that are less than 4 bits in length!') input_data_binary_string = f'{bin(input_data)[2:]:0>{n_bits_input}}' parity_location_map = {'DEFAULT': hamming_codec.ParityLocation.DEFAULT, 'MSB': hamming_codec.ParityLocation.MSB, 'LSB': hamming_codec.ParityLocation.LSB} if (parity_location not in parity_location_map): raise ValueError(f'Invalid parity location provided: "{parity_location}"') parity_location = parity_location_map[parity_location] if ((n_parity_bits == 0) and (parity_location != hamming_codec.ParityLocation.DEFAULT)): raise ValueError('For non-default parity bit locations, the number of parity bits must be specified') decoded_binary_string = hamming_codec.decode(input_data, n_bits_input, parity_location, n_parity_bits) decoded_int = int(decoded_binary_string, 2) if ctx.obj['VERBOSE']: print(f"Input value : 0x{input.replace('0x', )}, size = {n_bits_input} bits") print(f'Input value (bin) : 0b{input_data_binary_string}') print(f'Decoded value : {hex(decoded_int)}') print(f'Decoded value (bin) : 0b{decoded_binary_string}, size = {len(decoded_binary_string)} bits') else: print(f'{hex(decoded_int)} {len(decoded_binary_string)}', file=sys.stdout)<|docstring|>Decode the input message that is the specified number of bits in length.<|endoftext|>
619449e6758f6c7dea71e81273c91e942d916eaeb8825271d57bacea7a2c4e90
def create_sql_connection_admintable() -> SQLConnectionTableAdmin: 'Create the table structure with the SQL connections for Admin.\n\n :return: SQL Connection Table Admin object.\n ' op_column = OperationsColumn(verbose_name=_('Operations'), template_file='connection/includes/partial_adminop.html', template_context=(lambda record: {'id': record['id'], 'edit_url': reverse('connection:sqlconn_edit', kwargs={'pk': record['id']}), 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']}), 'clone_url': reverse('connection:sqlconn_clone', kwargs={'pk': record['id']}), 'delete_url': reverse('connection:sqlconn_delete', kwargs={'pk': record['id']})})) return SQLConnectionTableAdmin(models.SQLConnection.objects.values('id', 'name', 'description_text', 'enabled'), orderable=False, extra_columns=[('operations', op_column)])
Create the table structure with the SQL connections for Admin. :return: SQL Connection Table Admin object.
ontask/connection/services/sql.py
create_sql_connection_admintable
ubc/ontask_b
33
python
def create_sql_connection_admintable() -> SQLConnectionTableAdmin: 'Create the table structure with the SQL connections for Admin.\n\n :return: SQL Connection Table Admin object.\n ' op_column = OperationsColumn(verbose_name=_('Operations'), template_file='connection/includes/partial_adminop.html', template_context=(lambda record: {'id': record['id'], 'edit_url': reverse('connection:sqlconn_edit', kwargs={'pk': record['id']}), 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']}), 'clone_url': reverse('connection:sqlconn_clone', kwargs={'pk': record['id']}), 'delete_url': reverse('connection:sqlconn_delete', kwargs={'pk': record['id']})})) return SQLConnectionTableAdmin(models.SQLConnection.objects.values('id', 'name', 'description_text', 'enabled'), orderable=False, extra_columns=[('operations', op_column)])
def create_sql_connection_admintable() -> SQLConnectionTableAdmin: 'Create the table structure with the SQL connections for Admin.\n\n :return: SQL Connection Table Admin object.\n ' op_column = OperationsColumn(verbose_name=_('Operations'), template_file='connection/includes/partial_adminop.html', template_context=(lambda record: {'id': record['id'], 'edit_url': reverse('connection:sqlconn_edit', kwargs={'pk': record['id']}), 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']}), 'clone_url': reverse('connection:sqlconn_clone', kwargs={'pk': record['id']}), 'delete_url': reverse('connection:sqlconn_delete', kwargs={'pk': record['id']})})) return SQLConnectionTableAdmin(models.SQLConnection.objects.values('id', 'name', 'description_text', 'enabled'), orderable=False, extra_columns=[('operations', op_column)])<|docstring|>Create the table structure with the SQL connections for Admin. :return: SQL Connection Table Admin object.<|endoftext|>
de009895470c20e4e58a7b5dbb03218094732a9538882f9e69526e9979514af7
def sql_connection_select_table(select_url: str) -> SQLConnectionTableSelect: 'Create the table structure with the SQL connections for Running.\n\n :param select_url: URL to use for the select link in every row\n :return: SQL Connection Table Run object.\n ' operation_column = OperationsColumn(verbose_name='', template_file='connection/includes/partial_select.html', template_context=(lambda record: {'id': record['id'], 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']})})) return SQLConnectionTableSelect(models.SQLConnection.objects.filter(enabled=True).values('id', 'name', 'description_text'), select_url=select_url, orderable=False, extra_columns=[('operations', operation_column)])
Create the table structure with the SQL connections for Running. :param select_url: URL to use for the select link in every row :return: SQL Connection Table Run object.
ontask/connection/services/sql.py
sql_connection_select_table
ubc/ontask_b
33
python
def sql_connection_select_table(select_url: str) -> SQLConnectionTableSelect: 'Create the table structure with the SQL connections for Running.\n\n :param select_url: URL to use for the select link in every row\n :return: SQL Connection Table Run object.\n ' operation_column = OperationsColumn(verbose_name=, template_file='connection/includes/partial_select.html', template_context=(lambda record: {'id': record['id'], 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']})})) return SQLConnectionTableSelect(models.SQLConnection.objects.filter(enabled=True).values('id', 'name', 'description_text'), select_url=select_url, orderable=False, extra_columns=[('operations', operation_column)])
def sql_connection_select_table(select_url: str) -> SQLConnectionTableSelect: 'Create the table structure with the SQL connections for Running.\n\n :param select_url: URL to use for the select link in every row\n :return: SQL Connection Table Run object.\n ' operation_column = OperationsColumn(verbose_name=, template_file='connection/includes/partial_select.html', template_context=(lambda record: {'id': record['id'], 'view_url': reverse('connection:sqlconn_view', kwargs={'pk': record['id']})})) return SQLConnectionTableSelect(models.SQLConnection.objects.filter(enabled=True).values('id', 'name', 'description_text'), select_url=select_url, orderable=False, extra_columns=[('operations', operation_column)])<|docstring|>Create the table structure with the SQL connections for Running. :param select_url: URL to use for the select link in every row :return: SQL Connection Table Run object.<|endoftext|>
4b8bf3f13fec9fbb93bfa67a0b226946eff782b626a28c69aafa0347da00ac03
@staticmethod def render_enabled(record): 'Render the boolean to allow changes.' return render_to_string('connection/includes/partial_enable.html', {'id': record['id'], 'enabled': record['enabled'], 'toggle_url': reverse('connection:sqlconn_toggle', kwargs={'pk': record['id']})})
Render the boolean to allow changes.
ontask/connection/services/sql.py
render_enabled
ubc/ontask_b
33
python
@staticmethod def render_enabled(record): return render_to_string('connection/includes/partial_enable.html', {'id': record['id'], 'enabled': record['enabled'], 'toggle_url': reverse('connection:sqlconn_toggle', kwargs={'pk': record['id']})})
@staticmethod def render_enabled(record): return render_to_string('connection/includes/partial_enable.html', {'id': record['id'], 'enabled': record['enabled'], 'toggle_url': reverse('connection:sqlconn_toggle', kwargs={'pk': record['id']})})<|docstring|>Render the boolean to allow changes.<|endoftext|>
de572005d2f5382e62eaa34201bb2511082cfe5065eefa5e5bf76a1bc3077830
def __init__(self, *args, **kwargs): 'Store the select url string to use when rendering name.' self.select_url = kwargs.pop('select_url') super().__init__(*args, **kwargs)
Store the select url string to use when rendering name.
ontask/connection/services/sql.py
__init__
ubc/ontask_b
33
python
def __init__(self, *args, **kwargs): self.select_url = kwargs.pop('select_url') super().__init__(*args, **kwargs)
def __init__(self, *args, **kwargs): self.select_url = kwargs.pop('select_url') super().__init__(*args, **kwargs)<|docstring|>Store the select url string to use when rendering name.<|endoftext|>
96574426e0d17187319cca39c47698dc14869541813e1f8b4bd167d04cc8b73b
def render_name(self, record): 'Render the name as a link.' return format_html('<a href="{0}">{1}</a>', reverse(self.select_url, kwargs={'pk': record['id']}), record['name'])
Render the name as a link.
ontask/connection/services/sql.py
render_name
ubc/ontask_b
33
python
def render_name(self, record): return format_html('<a href="{0}">{1}</a>', reverse(self.select_url, kwargs={'pk': record['id']}), record['name'])
def render_name(self, record): return format_html('<a href="{0}">{1}</a>', reverse(self.select_url, kwargs={'pk': record['id']}), record['name'])<|docstring|>Render the name as a link.<|endoftext|>
257f6a3829dc52ce994814efacf0d63b223af585e53de18fe809c5c21e749db6
def redirect_pagetype(request, typeofpage): '\n Used to redirect to a page for a different language\n e.g. from English language about us page to French version\n Request: redirect/aboutus\n Response fr/about-us\n ' ret = '/' cur_language = translation.get_language() try: sid = settings.LANGUAGE_SITE_MAP[cur_language] setattr(request, 'site_id', sid) request.session['site_id'] = sid except KeyError: msg = ('Please add language %s to settings.LANGUAGE_SITE_MAP' % cur_language) sys.stderr.write((msg + '\n')) sys.stderr.flush() try: ptype = Pagetype.objects.get(title=typeofpage) pid = Mlpage.objects.get(pagetype=ptype.id) thispage = Page.objects.get(id=pid.page_ptr_id, status=2) if (thispage.slug != '/'): ret = ('/' + thispage.slug) except ObjectDoesNotExist: pass except: sys.stderr.write((('redirect_pagetype: ' + typeofpage) + '\n')) return HttpResponseRedirect(ret)
Used to redirect to a page for a different language e.g. from English language about us page to French version Request: redirect/aboutus Response fr/about-us
mldemo/views.py
redirect_pagetype
alan-hicks/mldemo
4
python
def redirect_pagetype(request, typeofpage): '\n Used to redirect to a page for a different language\n e.g. from English language about us page to French version\n Request: redirect/aboutus\n Response fr/about-us\n ' ret = '/' cur_language = translation.get_language() try: sid = settings.LANGUAGE_SITE_MAP[cur_language] setattr(request, 'site_id', sid) request.session['site_id'] = sid except KeyError: msg = ('Please add language %s to settings.LANGUAGE_SITE_MAP' % cur_language) sys.stderr.write((msg + '\n')) sys.stderr.flush() try: ptype = Pagetype.objects.get(title=typeofpage) pid = Mlpage.objects.get(pagetype=ptype.id) thispage = Page.objects.get(id=pid.page_ptr_id, status=2) if (thispage.slug != '/'): ret = ('/' + thispage.slug) except ObjectDoesNotExist: pass except: sys.stderr.write((('redirect_pagetype: ' + typeofpage) + '\n')) return HttpResponseRedirect(ret)
def redirect_pagetype(request, typeofpage): '\n Used to redirect to a page for a different language\n e.g. from English language about us page to French version\n Request: redirect/aboutus\n Response fr/about-us\n ' ret = '/' cur_language = translation.get_language() try: sid = settings.LANGUAGE_SITE_MAP[cur_language] setattr(request, 'site_id', sid) request.session['site_id'] = sid except KeyError: msg = ('Please add language %s to settings.LANGUAGE_SITE_MAP' % cur_language) sys.stderr.write((msg + '\n')) sys.stderr.flush() try: ptype = Pagetype.objects.get(title=typeofpage) pid = Mlpage.objects.get(pagetype=ptype.id) thispage = Page.objects.get(id=pid.page_ptr_id, status=2) if (thispage.slug != '/'): ret = ('/' + thispage.slug) except ObjectDoesNotExist: pass except: sys.stderr.write((('redirect_pagetype: ' + typeofpage) + '\n')) return HttpResponseRedirect(ret)<|docstring|>Used to redirect to a page for a different language e.g. from English language about us page to French version Request: redirect/aboutus Response fr/about-us<|endoftext|>
c8e2ad89ee0e2f773e8eabc9e2a51fe8f8c3f3539253cde35ee90a13b277284c
def home(request): '\n Home page request\n ' filter_page = ('audience', 'you', 'marketing') fp_pages = Page.objects.filter(content_model='mlpage', mlpage__pagetype__title__in=filter_page).order_by('_order') thispage = Page.objects.get(slug='/') sql = "SELECT s.domain, pp.slug,\n substr(s.domain, 1 + position('/' IN s.domain)) as language_code\n FROM mldemo_mlpage AS p\n INNER JOIN mldemo_pagetype AS t ON p.pagetype_id = t.id\n INNER JOIN pages_page AS pp ON pp.id = p.page_ptr_id\n INNER JOIN django_site AS s ON pp.site_id = s.id\n WHERE t.title = %s" if thispage.mlpage.pagetype: cursor = connection.cursor() cursor.execute(sql, [thispage.mlpage.pagetype.title]) hreflang_list = cursor.fetchall() else: hreflang_list = {} context = {'languages': settings.LANGUAGES, 'hreflang_list': hreflang_list, 'fp_pages': fp_pages, 'page': thispage} return render(request, 'mldemo/home.html', context)
Home page request
mldemo/views.py
home
alan-hicks/mldemo
4
python
def home(request): '\n \n ' filter_page = ('audience', 'you', 'marketing') fp_pages = Page.objects.filter(content_model='mlpage', mlpage__pagetype__title__in=filter_page).order_by('_order') thispage = Page.objects.get(slug='/') sql = "SELECT s.domain, pp.slug,\n substr(s.domain, 1 + position('/' IN s.domain)) as language_code\n FROM mldemo_mlpage AS p\n INNER JOIN mldemo_pagetype AS t ON p.pagetype_id = t.id\n INNER JOIN pages_page AS pp ON pp.id = p.page_ptr_id\n INNER JOIN django_site AS s ON pp.site_id = s.id\n WHERE t.title = %s" if thispage.mlpage.pagetype: cursor = connection.cursor() cursor.execute(sql, [thispage.mlpage.pagetype.title]) hreflang_list = cursor.fetchall() else: hreflang_list = {} context = {'languages': settings.LANGUAGES, 'hreflang_list': hreflang_list, 'fp_pages': fp_pages, 'page': thispage} return render(request, 'mldemo/home.html', context)
def home(request): '\n \n ' filter_page = ('audience', 'you', 'marketing') fp_pages = Page.objects.filter(content_model='mlpage', mlpage__pagetype__title__in=filter_page).order_by('_order') thispage = Page.objects.get(slug='/') sql = "SELECT s.domain, pp.slug,\n substr(s.domain, 1 + position('/' IN s.domain)) as language_code\n FROM mldemo_mlpage AS p\n INNER JOIN mldemo_pagetype AS t ON p.pagetype_id = t.id\n INNER JOIN pages_page AS pp ON pp.id = p.page_ptr_id\n INNER JOIN django_site AS s ON pp.site_id = s.id\n WHERE t.title = %s" if thispage.mlpage.pagetype: cursor = connection.cursor() cursor.execute(sql, [thispage.mlpage.pagetype.title]) hreflang_list = cursor.fetchall() else: hreflang_list = {} context = {'languages': settings.LANGUAGES, 'hreflang_list': hreflang_list, 'fp_pages': fp_pages, 'page': thispage} return render(request, 'mldemo/home.html', context)<|docstring|>Home page request<|endoftext|>
7871d0c6f8c8510d8b4fed53a1146b3520363f1a29bc0c13db592fa1d82c3545
@property def rewards_steps_sessions(self): 'Returns n_best results sorted from the highest to the smallest.' return reversed([heapq.heappop(self._heap) for i in range(len(self._heap))])
Returns n_best results sorted from the highest to the smallest.
e2end/training.py
rewards_steps_sessions
oplatek/e2end
14
python
@property def rewards_steps_sessions(self): return reversed([heapq.heappop(self._heap) for i in range(len(self._heap))])
@property def rewards_steps_sessions(self): return reversed([heapq.heappop(self._heap) for i in range(len(self._heap))])<|docstring|>Returns n_best results sorted from the highest to the smallest.<|endoftext|>
47f06d674e7061c517b07cacd99252bc4301cf01e588cafb6272724af14269d7
def highest_reward(self): ' -666 is dummy value if there is no model logged' if (not self._heap): return (- 666) else: reward = heapq.nlargest(1, self._heap)[0][0] return reward
-666 is dummy value if there is no model logged
e2end/training.py
highest_reward
oplatek/e2end
14
python
def highest_reward(self): ' ' if (not self._heap): return (- 666) else: reward = heapq.nlargest(1, self._heap)[0][0] return reward
def highest_reward(self): ' ' if (not self._heap): return (- 666) else: reward = heapq.nlargest(1, self._heap)[0][0] return reward<|docstring|>-666 is dummy value if there is no model logged<|endoftext|>
2fc844ab4604027dd9cdd94b4e5733569278f96fba46e389bfc88b54c5e47a1b
@staticmethod def chunks_with_size(li, chunk_size=0, remove_incomplete_item=True): '\n list를 각 chunk가 chunk_size크기가 되도록 나눈다.\n 모두 chunk_size 크기를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param chunk_size: 0=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if ((len(li) < 1) or (chunk_size < 1)): return [li] chunk_size = int(chunk_size) li2 = [] for i in range(0, len(li), chunk_size): item = li[i:(i + chunk_size)] if (remove_incomplete_item and (len(item) < chunk_size)): continue li2.append(item) return li2
list를 각 chunk가 chunk_size크기가 되도록 나눈다. 모두 chunk_size 크기를 갖고, 마지막에만 가장 작은 배열이 남는다. :param li: :param chunk_size: 0=나누지 않음. :param remove_incomplete_item: :return:
bage_utils/list_util.py
chunks_with_size
bage79/nlp4kor
60
python
@staticmethod def chunks_with_size(li, chunk_size=0, remove_incomplete_item=True): '\n list를 각 chunk가 chunk_size크기가 되도록 나눈다.\n 모두 chunk_size 크기를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param chunk_size: 0=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if ((len(li) < 1) or (chunk_size < 1)): return [li] chunk_size = int(chunk_size) li2 = [] for i in range(0, len(li), chunk_size): item = li[i:(i + chunk_size)] if (remove_incomplete_item and (len(item) < chunk_size)): continue li2.append(item) return li2
@staticmethod def chunks_with_size(li, chunk_size=0, remove_incomplete_item=True): '\n list를 각 chunk가 chunk_size크기가 되도록 나눈다.\n 모두 chunk_size 크기를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param chunk_size: 0=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if ((len(li) < 1) or (chunk_size < 1)): return [li] chunk_size = int(chunk_size) li2 = [] for i in range(0, len(li), chunk_size): item = li[i:(i + chunk_size)] if (remove_incomplete_item and (len(item) < chunk_size)): continue li2.append(item) return li2<|docstring|>list를 각 chunk가 chunk_size크기가 되도록 나눈다. 모두 chunk_size 크기를 갖고, 마지막에만 가장 작은 배열이 남는다. :param li: :param chunk_size: 0=나누지 않음. :param remove_incomplete_item: :return:<|endoftext|>
780abfa75441852d68c2c60bd8328c80de20710a1962a21d5cff6afdbb1061b6
@staticmethod def chunks_with_splits(li, max_split=1, remove_incomplete_item=True): '\n list를 chunk의 총 개수가 max_split 개수가 되도록 나눈다.\n 모두 똑같은 길이를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param max_split: 1=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if (max_split <= 1): return [li] min_chunk_size = (len(li) // max_split) return ListUtil.chunks_with_size(li, min_chunk_size, remove_incomplete_item=remove_incomplete_item)
list를 chunk의 총 개수가 max_split 개수가 되도록 나눈다. 모두 똑같은 길이를 갖고, 마지막에만 가장 작은 배열이 남는다. :param li: :param max_split: 1=나누지 않음. :param remove_incomplete_item: :return:
bage_utils/list_util.py
chunks_with_splits
bage79/nlp4kor
60
python
@staticmethod def chunks_with_splits(li, max_split=1, remove_incomplete_item=True): '\n list를 chunk의 총 개수가 max_split 개수가 되도록 나눈다.\n 모두 똑같은 길이를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param max_split: 1=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if (max_split <= 1): return [li] min_chunk_size = (len(li) // max_split) return ListUtil.chunks_with_size(li, min_chunk_size, remove_incomplete_item=remove_incomplete_item)
@staticmethod def chunks_with_splits(li, max_split=1, remove_incomplete_item=True): '\n list를 chunk의 총 개수가 max_split 개수가 되도록 나눈다.\n 모두 똑같은 길이를 갖고, 마지막에만 가장 작은 배열이 남는다.\n :param li:\n :param max_split: 1=나누지 않음.\n :param remove_incomplete_item:\n :return:\n ' if (max_split <= 1): return [li] min_chunk_size = (len(li) // max_split) return ListUtil.chunks_with_size(li, min_chunk_size, remove_incomplete_item=remove_incomplete_item)<|docstring|>list를 chunk의 총 개수가 max_split 개수가 되도록 나눈다. 모두 똑같은 길이를 갖고, 마지막에만 가장 작은 배열이 남는다. :param li: :param max_split: 1=나누지 않음. :param remove_incomplete_item: :return:<|endoftext|>
421458d64662c488d3b70ef6030934528ef7130f87f4268159e63ad6d803cc47
@staticmethod def chunks_banlanced(li, max_split=2): '\n list를 max_split 수 대로 균형있게 자른다. (각각 최대한 비슷한 길이가 됨.)\n 프로세스에 job을 분배할 때 사용.\n :param li: Split 대상 List\n :param max_split: Split 수\n :return Lists in list\n ' if (max_split < 2): return li min_chunk_size = (len(li) // max_split) max_chunk_size = (min_chunk_size + 1) if (min_chunk_size == 0): return [li] max_chunk_split = (len(li) % max_split) min_chunk_split = (max_split - max_chunk_split) li2 = [] li2.extend(list(ListUtil.chunks_with_size(li[:(min_chunk_size * min_chunk_split)], min_chunk_size))) li2.extend(list(ListUtil.chunks_with_size(li[(min_chunk_size * min_chunk_split):], max_chunk_size))) for a in li2: (yield a)
list를 max_split 수 대로 균형있게 자른다. (각각 최대한 비슷한 길이가 됨.) 프로세스에 job을 분배할 때 사용. :param li: Split 대상 List :param max_split: Split 수 :return Lists in list
bage_utils/list_util.py
chunks_banlanced
bage79/nlp4kor
60
python
@staticmethod def chunks_banlanced(li, max_split=2): '\n list를 max_split 수 대로 균형있게 자른다. (각각 최대한 비슷한 길이가 됨.)\n 프로세스에 job을 분배할 때 사용.\n :param li: Split 대상 List\n :param max_split: Split 수\n :return Lists in list\n ' if (max_split < 2): return li min_chunk_size = (len(li) // max_split) max_chunk_size = (min_chunk_size + 1) if (min_chunk_size == 0): return [li] max_chunk_split = (len(li) % max_split) min_chunk_split = (max_split - max_chunk_split) li2 = [] li2.extend(list(ListUtil.chunks_with_size(li[:(min_chunk_size * min_chunk_split)], min_chunk_size))) li2.extend(list(ListUtil.chunks_with_size(li[(min_chunk_size * min_chunk_split):], max_chunk_size))) for a in li2: (yield a)
@staticmethod def chunks_banlanced(li, max_split=2): '\n list를 max_split 수 대로 균형있게 자른다. (각각 최대한 비슷한 길이가 됨.)\n 프로세스에 job을 분배할 때 사용.\n :param li: Split 대상 List\n :param max_split: Split 수\n :return Lists in list\n ' if (max_split < 2): return li min_chunk_size = (len(li) // max_split) max_chunk_size = (min_chunk_size + 1) if (min_chunk_size == 0): return [li] max_chunk_split = (len(li) % max_split) min_chunk_split = (max_split - max_chunk_split) li2 = [] li2.extend(list(ListUtil.chunks_with_size(li[:(min_chunk_size * min_chunk_split)], min_chunk_size))) li2.extend(list(ListUtil.chunks_with_size(li[(min_chunk_size * min_chunk_split):], max_chunk_size))) for a in li2: (yield a)<|docstring|>list를 max_split 수 대로 균형있게 자른다. (각각 최대한 비슷한 길이가 됨.) 프로세스에 job을 분배할 때 사용. :param li: Split 대상 List :param max_split: Split 수 :return Lists in list<|endoftext|>
97c54e6327f30ed6fc5b81410ec91cf4dd9a1670ab4278ec9f6cc390d83b3b4c
def GetTagKeyFromNamespacedName(namespaced_name): 'Gets the tag key from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagKey resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 2): raise InvalidInputError('TagKey namespaced name [{}] invalid'.format(namespaced_name)) with endpoints.CrmEndpointOverrides('global'): name = '/'.join(['organizations', parts[0]]) req = ListResourceFns['tagKeys'](parent=name, pageSize=MAX_TAG_KEYS) service = ServiceFns['tagKeys']() try: response = service.List(req) except HttpForbiddenError: print("TagKey [{}] does not exist or user does not have permissions to resolve namespaced name. Retry using tagKey's resource name, such as tagKeys/123.".format(namespaced_name)) raise for key in response.tagKeys: if (key.namespacedName == namespaced_name): return key raise InvalidInputError('TagKey [{}] not found'.format(namespaced_name))
Gets the tag key from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name Returns: TagKey resource Raises: InvalidInputError: bad input
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
GetTagKeyFromNamespacedName
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def GetTagKeyFromNamespacedName(namespaced_name): 'Gets the tag key from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagKey resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 2): raise InvalidInputError('TagKey namespaced name [{}] invalid'.format(namespaced_name)) with endpoints.CrmEndpointOverrides('global'): name = '/'.join(['organizations', parts[0]]) req = ListResourceFns['tagKeys'](parent=name, pageSize=MAX_TAG_KEYS) service = ServiceFns['tagKeys']() try: response = service.List(req) except HttpForbiddenError: print("TagKey [{}] does not exist or user does not have permissions to resolve namespaced name. Retry using tagKey's resource name, such as tagKeys/123.".format(namespaced_name)) raise for key in response.tagKeys: if (key.namespacedName == namespaced_name): return key raise InvalidInputError('TagKey [{}] not found'.format(namespaced_name))
def GetTagKeyFromNamespacedName(namespaced_name): 'Gets the tag key from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagKey resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 2): raise InvalidInputError('TagKey namespaced name [{}] invalid'.format(namespaced_name)) with endpoints.CrmEndpointOverrides('global'): name = '/'.join(['organizations', parts[0]]) req = ListResourceFns['tagKeys'](parent=name, pageSize=MAX_TAG_KEYS) service = ServiceFns['tagKeys']() try: response = service.List(req) except HttpForbiddenError: print("TagKey [{}] does not exist or user does not have permissions to resolve namespaced name. Retry using tagKey's resource name, such as tagKeys/123.".format(namespaced_name)) raise for key in response.tagKeys: if (key.namespacedName == namespaced_name): return key raise InvalidInputError('TagKey [{}] not found'.format(namespaced_name))<|docstring|>Gets the tag key from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name Returns: TagKey resource Raises: InvalidInputError: bad input<|endoftext|>
426be0557b8d1a56922211dba2bf7687e55b9f713cf7e31847f02f8cbd9cf7c7
def GetTagValueFromNamespacedName(namespaced_name): 'Gets the tag value from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagValue resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 3): raise InvalidInputError('TagValue namespaced name [{}] invalid'.format(namespaced_name)) name = GetTagKeyFromNamespacedName('/'.join(parts[:2])).name with endpoints.CrmEndpointOverrides('global'): req = ListResourceFns['tagValues'](parent=name) service = ServiceFns['tagValues']() response = service.List(req) for value in response.tagValues: if (value.namespacedName == namespaced_name): return value raise InvalidInputError('TagValue [{}] not found'.format(namespaced_name))
Gets the tag value from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name Returns: TagValue resource Raises: InvalidInputError: bad input
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
GetTagValueFromNamespacedName
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def GetTagValueFromNamespacedName(namespaced_name): 'Gets the tag value from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagValue resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 3): raise InvalidInputError('TagValue namespaced name [{}] invalid'.format(namespaced_name)) name = GetTagKeyFromNamespacedName('/'.join(parts[:2])).name with endpoints.CrmEndpointOverrides('global'): req = ListResourceFns['tagValues'](parent=name) service = ServiceFns['tagValues']() response = service.List(req) for value in response.tagValues: if (value.namespacedName == namespaced_name): return value raise InvalidInputError('TagValue [{}] not found'.format(namespaced_name))
def GetTagValueFromNamespacedName(namespaced_name): 'Gets the tag value from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n\n Returns:\n TagValue resource\n\n Raises:\n InvalidInputError: bad input\n ' parts = namespaced_name.split('/') if (len(parts) != 3): raise InvalidInputError('TagValue namespaced name [{}] invalid'.format(namespaced_name)) name = GetTagKeyFromNamespacedName('/'.join(parts[:2])).name with endpoints.CrmEndpointOverrides('global'): req = ListResourceFns['tagValues'](parent=name) service = ServiceFns['tagValues']() response = service.List(req) for value in response.tagValues: if (value.namespacedName == namespaced_name): return value raise InvalidInputError('TagValue [{}] not found'.format(namespaced_name))<|docstring|>Gets the tag value from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name Returns: TagValue resource Raises: InvalidInputError: bad input<|endoftext|>
06220d4c0904d81c4a43233b6095841dad362b6f9b13fc2874d87d5e6e0d3191
def GetResourceFromNamespacedName(namespaced_name, resource_type): "Gets the resource from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n resource_type: the type of the resource ie: 'tagKeys', 'tagValues'. Used to\n determine which GET function to call\n\n Returns:\n resource\n " with endpoints.CrmEndpointOverrides('global'): service = ServiceFns[resource_type]() req = GetResourceFns[resource_type](name=namespaced_name) response = service.Get(req) return response
Gets the resource from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name resource_type: the type of the resource ie: 'tagKeys', 'tagValues'. Used to determine which GET function to call Returns: resource
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
GetResourceFromNamespacedName
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def GetResourceFromNamespacedName(namespaced_name, resource_type): "Gets the resource from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n resource_type: the type of the resource ie: 'tagKeys', 'tagValues'. Used to\n determine which GET function to call\n\n Returns:\n resource\n " with endpoints.CrmEndpointOverrides('global'): service = ServiceFns[resource_type]() req = GetResourceFns[resource_type](name=namespaced_name) response = service.Get(req) return response
def GetResourceFromNamespacedName(namespaced_name, resource_type): "Gets the resource from the namespaced name.\n\n Args:\n namespaced_name: Could be the resource name or namespaced name\n resource_type: the type of the resource ie: 'tagKeys', 'tagValues'. Used to\n determine which GET function to call\n\n Returns:\n resource\n " with endpoints.CrmEndpointOverrides('global'): service = ServiceFns[resource_type]() req = GetResourceFns[resource_type](name=namespaced_name) response = service.Get(req) return response<|docstring|>Gets the resource from the namespaced name. Args: namespaced_name: Could be the resource name or namespaced name resource_type: the type of the resource ie: 'tagKeys', 'tagValues'. Used to determine which GET function to call Returns: resource<|endoftext|>
df4edc28d4eee6837e5eef0b230036f39a873ef459b6e79e32f07fb86940a105
def ProjectNameToBinding(project_name, tag_value, location=None): 'Returns the binding name given a project name and tag value.\n\n Requires binding list permission.\n\n Args:\n project_name: project name provided, fully qualified resource name\n tag_value: tag value to match the binding name to\n location: region or zone\n\n Returns:\n binding_name\n\n Raises:\n InvalidInputError: project not found\n ' service = ServiceFns['tagBindings']() with endpoints.CrmEndpointOverrides(location): req = ListResourceFns['tagBindings'](parent=project_name) response = service.List(req) for bn in response.tagBindings: if (bn.tagValue == tag_value): return bn.name raise InvalidInputError('Binding not found for parent [{}], tagValue [{}]'.format(project_name, tag_value))
Returns the binding name given a project name and tag value. Requires binding list permission. Args: project_name: project name provided, fully qualified resource name tag_value: tag value to match the binding name to location: region or zone Returns: binding_name Raises: InvalidInputError: project not found
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
ProjectNameToBinding
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def ProjectNameToBinding(project_name, tag_value, location=None): 'Returns the binding name given a project name and tag value.\n\n Requires binding list permission.\n\n Args:\n project_name: project name provided, fully qualified resource name\n tag_value: tag value to match the binding name to\n location: region or zone\n\n Returns:\n binding_name\n\n Raises:\n InvalidInputError: project not found\n ' service = ServiceFns['tagBindings']() with endpoints.CrmEndpointOverrides(location): req = ListResourceFns['tagBindings'](parent=project_name) response = service.List(req) for bn in response.tagBindings: if (bn.tagValue == tag_value): return bn.name raise InvalidInputError('Binding not found for parent [{}], tagValue [{}]'.format(project_name, tag_value))
def ProjectNameToBinding(project_name, tag_value, location=None): 'Returns the binding name given a project name and tag value.\n\n Requires binding list permission.\n\n Args:\n project_name: project name provided, fully qualified resource name\n tag_value: tag value to match the binding name to\n location: region or zone\n\n Returns:\n binding_name\n\n Raises:\n InvalidInputError: project not found\n ' service = ServiceFns['tagBindings']() with endpoints.CrmEndpointOverrides(location): req = ListResourceFns['tagBindings'](parent=project_name) response = service.List(req) for bn in response.tagBindings: if (bn.tagValue == tag_value): return bn.name raise InvalidInputError('Binding not found for parent [{}], tagValue [{}]'.format(project_name, tag_value))<|docstring|>Returns the binding name given a project name and tag value. Requires binding list permission. Args: project_name: project name provided, fully qualified resource name tag_value: tag value to match the binding name to location: region or zone Returns: binding_name Raises: InvalidInputError: project not found<|endoftext|>
ca2fcc1e725fe805dbe64212795f7ed3699514527f6e13cfc69d1561de49d9d3
def GetCanonicalResourceName(resource_name, location, release_track): 'Returns the correct canonical name for the given resource.\n\n Args:\n resource_name: name of the resource\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n resource_name: either the original resource name, or correct canonical name\n ' gce_compute_instance_name_pattern = 'compute.googleapis.com/projects/([^/]+)/.*instances/([^/]+)' gce_search = re.search(gce_compute_instance_name_pattern, resource_name) if gce_search: if (not location): raise exceptions.InvalidArgumentException('--location', 'Please specify an appropriate cloud location with the --location flag.') (project_identifier, instance_identifier) = (gce_search.group(1), gce_search.group(2)) if re.search('([a-z]([-a-z0-9]*[a-z0-9])?)', instance_identifier): resource_name = resource_name.replace(('instances/%s' % instance_identifier), ('instances/%s' % _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track))) return resource_name
Returns the correct canonical name for the given resource. Args: resource_name: name of the resource location: location in which the resource lives release_track: release stage of current endpoint Returns: resource_name: either the original resource name, or correct canonical name
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
GetCanonicalResourceName
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def GetCanonicalResourceName(resource_name, location, release_track): 'Returns the correct canonical name for the given resource.\n\n Args:\n resource_name: name of the resource\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n resource_name: either the original resource name, or correct canonical name\n ' gce_compute_instance_name_pattern = 'compute.googleapis.com/projects/([^/]+)/.*instances/([^/]+)' gce_search = re.search(gce_compute_instance_name_pattern, resource_name) if gce_search: if (not location): raise exceptions.InvalidArgumentException('--location', 'Please specify an appropriate cloud location with the --location flag.') (project_identifier, instance_identifier) = (gce_search.group(1), gce_search.group(2)) if re.search('([a-z]([-a-z0-9]*[a-z0-9])?)', instance_identifier): resource_name = resource_name.replace(('instances/%s' % instance_identifier), ('instances/%s' % _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track))) return resource_name
def GetCanonicalResourceName(resource_name, location, release_track): 'Returns the correct canonical name for the given resource.\n\n Args:\n resource_name: name of the resource\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n resource_name: either the original resource name, or correct canonical name\n ' gce_compute_instance_name_pattern = 'compute.googleapis.com/projects/([^/]+)/.*instances/([^/]+)' gce_search = re.search(gce_compute_instance_name_pattern, resource_name) if gce_search: if (not location): raise exceptions.InvalidArgumentException('--location', 'Please specify an appropriate cloud location with the --location flag.') (project_identifier, instance_identifier) = (gce_search.group(1), gce_search.group(2)) if re.search('([a-z]([-a-z0-9]*[a-z0-9])?)', instance_identifier): resource_name = resource_name.replace(('instances/%s' % instance_identifier), ('instances/%s' % _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track))) return resource_name<|docstring|>Returns the correct canonical name for the given resource. Args: resource_name: name of the resource location: location in which the resource lives release_track: release stage of current endpoint Returns: resource_name: either the original resource name, or correct canonical name<|endoftext|>
b941e33e4f6e9f36a9c16eeb8310386eb3b265cca6930a2fca4869bb11cb1780
def _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track): 'Returns the correct canonical name for the given gce compute instance.\n\n Args:\n project_identifier: project number of the compute instance\n instance_identifier: name of the instance\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n instance_id: returns the canonical instance id\n ' compute_holder = base_classes.ComputeApiHolder(release_track) client = compute_holder.client request = (client.apitools_client.instances, 'Get', client.messages.ComputeInstancesGetRequest(instance=instance_identifier, project=project_identifier, zone=location)) errors_to_collect = [] instances = client.MakeRequests([request], errors_to_collect=errors_to_collect) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) return str(instances[0].id)
Returns the correct canonical name for the given gce compute instance. Args: project_identifier: project number of the compute instance instance_identifier: name of the instance location: location in which the resource lives release_track: release stage of current endpoint Returns: instance_id: returns the canonical instance id
lib/googlecloudsdk/command_lib/resource_manager/tag_utils.py
_GetGceInstanceCanonicalName
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track): 'Returns the correct canonical name for the given gce compute instance.\n\n Args:\n project_identifier: project number of the compute instance\n instance_identifier: name of the instance\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n instance_id: returns the canonical instance id\n ' compute_holder = base_classes.ComputeApiHolder(release_track) client = compute_holder.client request = (client.apitools_client.instances, 'Get', client.messages.ComputeInstancesGetRequest(instance=instance_identifier, project=project_identifier, zone=location)) errors_to_collect = [] instances = client.MakeRequests([request], errors_to_collect=errors_to_collect) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) return str(instances[0].id)
def _GetGceInstanceCanonicalName(project_identifier, instance_identifier, location, release_track): 'Returns the correct canonical name for the given gce compute instance.\n\n Args:\n project_identifier: project number of the compute instance\n instance_identifier: name of the instance\n location: location in which the resource lives\n release_track: release stage of current endpoint\n\n Returns:\n instance_id: returns the canonical instance id\n ' compute_holder = base_classes.ComputeApiHolder(release_track) client = compute_holder.client request = (client.apitools_client.instances, 'Get', client.messages.ComputeInstancesGetRequest(instance=instance_identifier, project=project_identifier, zone=location)) errors_to_collect = [] instances = client.MakeRequests([request], errors_to_collect=errors_to_collect) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) return str(instances[0].id)<|docstring|>Returns the correct canonical name for the given gce compute instance. Args: project_identifier: project number of the compute instance instance_identifier: name of the instance location: location in which the resource lives release_track: release stage of current endpoint Returns: instance_id: returns the canonical instance id<|endoftext|>
92295d7c8deb04b900954c7a8fe6968f523c6c0ba7cf1dee1e88b5a0566f57ac
def define_options(self) -> Optional[Any]: '\n Declare the options for the RabbitMQ builder.\n\n :return: The supported options\n ' return {'basename': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'namespace': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'config': {'enabled_plugins': OptionDef(default_value=['rabbitmq_peer_discovery_k8s'], allowed_types=[Sequence]), 'rabbitmq_conf': OptionDef(allowed_types=[str, ConfigFile]), 'erlang_cookie': OptionDef(required=True, default_value=str(uuid.uuid4()), format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, dict, KData_Secret]), 'loglevel': OptionDef(required=True, default_value='info', allowed_types=[str]), 'enable_prometheus': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]), 'load_definitions': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, KData_Secret]), 'authorization': {'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'serviceaccount_use': OptionDef(allowed_types=[str]), 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool])}}, 'container': {'busybox': OptionDef(required=True, default_value='busybox:1.32.0', allowed_types=[str]), 'rabbitmq': OptionDef(required=True, default_value='rabbitmq:3.8.9-alpine', allowed_types=[str])}, 'kubernetes': {'volumes': {'data': OptionDef(required=True, format=OptionDefFormat.KDATA_VOLUME, allowed_types=[dict, *KDataHelper_Volume.allowed_kdata()])}, 'resources': {'statefulset': OptionDef(allowed_types=[dict])}}}
Declare the options for the RabbitMQ builder. :return: The supported options
kg_rabbitmq/option.py
define_options
RangelReale/kg_rabbitmq
0
python
def define_options(self) -> Optional[Any]: '\n Declare the options for the RabbitMQ builder.\n\n :return: The supported options\n ' return {'basename': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'namespace': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'config': {'enabled_plugins': OptionDef(default_value=['rabbitmq_peer_discovery_k8s'], allowed_types=[Sequence]), 'rabbitmq_conf': OptionDef(allowed_types=[str, ConfigFile]), 'erlang_cookie': OptionDef(required=True, default_value=str(uuid.uuid4()), format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, dict, KData_Secret]), 'loglevel': OptionDef(required=True, default_value='info', allowed_types=[str]), 'enable_prometheus': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]), 'load_definitions': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, KData_Secret]), 'authorization': {'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'serviceaccount_use': OptionDef(allowed_types=[str]), 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool])}}, 'container': {'busybox': OptionDef(required=True, default_value='busybox:1.32.0', allowed_types=[str]), 'rabbitmq': OptionDef(required=True, default_value='rabbitmq:3.8.9-alpine', allowed_types=[str])}, 'kubernetes': {'volumes': {'data': OptionDef(required=True, format=OptionDefFormat.KDATA_VOLUME, allowed_types=[dict, *KDataHelper_Volume.allowed_kdata()])}, 'resources': {'statefulset': OptionDef(allowed_types=[dict])}}}
def define_options(self) -> Optional[Any]: '\n Declare the options for the RabbitMQ builder.\n\n :return: The supported options\n ' return {'basename': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'namespace': OptionDef(required=True, default_value='rabbitmq', allowed_types=[str]), 'config': {'enabled_plugins': OptionDef(default_value=['rabbitmq_peer_discovery_k8s'], allowed_types=[Sequence]), 'rabbitmq_conf': OptionDef(allowed_types=[str, ConfigFile]), 'erlang_cookie': OptionDef(required=True, default_value=str(uuid.uuid4()), format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, dict, KData_Secret]), 'loglevel': OptionDef(required=True, default_value='info', allowed_types=[str]), 'enable_prometheus': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]), 'load_definitions': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, KData_Secret]), 'authorization': {'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'serviceaccount_use': OptionDef(allowed_types=[str]), 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]), 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool])}}, 'container': {'busybox': OptionDef(required=True, default_value='busybox:1.32.0', allowed_types=[str]), 'rabbitmq': OptionDef(required=True, default_value='rabbitmq:3.8.9-alpine', allowed_types=[str])}, 'kubernetes': {'volumes': {'data': OptionDef(required=True, format=OptionDefFormat.KDATA_VOLUME, allowed_types=[dict, *KDataHelper_Volume.allowed_kdata()])}, 'resources': {'statefulset': OptionDef(allowed_types=[dict])}}}<|docstring|>Declare the options for the RabbitMQ builder. :return: The supported options<|endoftext|>
68d6d8a7d1a1adb2110925467bc987381f509ee004913283d38f98ab19c13337
def get_version(filename): 'Extract the package version' with open(filename, encoding='utf8') as in_fh: for line in in_fh: if line.startswith('__version__'): return line.split('=')[1].strip()[1:(- 1)] raise ValueError(('Cannot extract version from %s' % filename))
Extract the package version
setup.py
get_version
goerz-testing/pypkg_rtd_02
0
python
def get_version(filename): with open(filename, encoding='utf8') as in_fh: for line in in_fh: if line.startswith('__version__'): return line.split('=')[1].strip()[1:(- 1)] raise ValueError(('Cannot extract version from %s' % filename))
def get_version(filename): with open(filename, encoding='utf8') as in_fh: for line in in_fh: if line.startswith('__version__'): return line.split('=')[1].strip()[1:(- 1)] raise ValueError(('Cannot extract version from %s' % filename))<|docstring|>Extract the package version<|endoftext|>
035d7a546a3646df66e91e00c80ffb771c7d5a30f8e1d0f4b9d24aebc202941d
@staticmethod def new_notion_row(aggregate_root: SmartList) -> 'NotionSmartList': 'Construct a new Notion row from a given aggregate root.' return NotionSmartList(notion_id=BAD_NOTION_ID, ref_id=aggregate_root.ref_id, name=str(aggregate_root.name))
Construct a new Notion row from a given aggregate root.
jupiter/domain/smart_lists/notion_smart_list.py
new_notion_row
horia141/jupiter
15
python
@staticmethod def new_notion_row(aggregate_root: SmartList) -> 'NotionSmartList': return NotionSmartList(notion_id=BAD_NOTION_ID, ref_id=aggregate_root.ref_id, name=str(aggregate_root.name))
@staticmethod def new_notion_row(aggregate_root: SmartList) -> 'NotionSmartList': return NotionSmartList(notion_id=BAD_NOTION_ID, ref_id=aggregate_root.ref_id, name=str(aggregate_root.name))<|docstring|>Construct a new Notion row from a given aggregate root.<|endoftext|>
f24f58bf40572114e8c93f5fd1ec283e6469b2338207fd32c871d1e2e84e2953
def apply_to_aggregate_root(self, aggregate_root: SmartList, modification_time: Timestamp) -> SmartList: 'Obtain the aggregate root form of this, with a possible error.' workspace_name = EntityName.from_raw(self.name) aggregate_root.change_name(workspace_name, modification_time) return aggregate_root
Obtain the aggregate root form of this, with a possible error.
jupiter/domain/smart_lists/notion_smart_list.py
apply_to_aggregate_root
horia141/jupiter
15
python
def apply_to_aggregate_root(self, aggregate_root: SmartList, modification_time: Timestamp) -> SmartList: workspace_name = EntityName.from_raw(self.name) aggregate_root.change_name(workspace_name, modification_time) return aggregate_root
def apply_to_aggregate_root(self, aggregate_root: SmartList, modification_time: Timestamp) -> SmartList: workspace_name = EntityName.from_raw(self.name) aggregate_root.change_name(workspace_name, modification_time) return aggregate_root<|docstring|>Obtain the aggregate root form of this, with a possible error.<|endoftext|>
d5f220f834813dc869b8d917464f33b436bf1299fa8525bb3e13f060f482b6d1
def get_api_url(job_id): ' Return the WDL PersecData API endpoint\n\n The endpoint returned by this module is:\n\n https://api.welldatalabs.com/persecdata/<job_id>\n\n Parameters\n ----------\n job_id: str\n The job_id to search the PerSec API for\n\n Returns\n -------\n url: str\n The PersecData API endpoing\n ' protocol = 'https' base_url = 'api.welldatalabs.com' endpoint = 'persecdata' url = f'{protocol}://{base_url}/{endpoint}/{job_id}' return url
Return the WDL PersecData API endpoint The endpoint returned by this module is: https://api.welldatalabs.com/persecdata/<job_id> Parameters ---------- job_id: str The job_id to search the PerSec API for Returns ------- url: str The PersecData API endpoing
persec_data_api.py
get_api_url
welldatalabs/wdl-python
1
python
def get_api_url(job_id): ' Return the WDL PersecData API endpoint\n\n The endpoint returned by this module is:\n\n https://api.welldatalabs.com/persecdata/<job_id>\n\n Parameters\n ----------\n job_id: str\n The job_id to search the PerSec API for\n\n Returns\n -------\n url: str\n The PersecData API endpoing\n ' protocol = 'https' base_url = 'api.welldatalabs.com' endpoint = 'persecdata' url = f'{protocol}://{base_url}/{endpoint}/{job_id}' return url
def get_api_url(job_id): ' Return the WDL PersecData API endpoint\n\n The endpoint returned by this module is:\n\n https://api.welldatalabs.com/persecdata/<job_id>\n\n Parameters\n ----------\n job_id: str\n The job_id to search the PerSec API for\n\n Returns\n -------\n url: str\n The PersecData API endpoing\n ' protocol = 'https' base_url = 'api.welldatalabs.com' endpoint = 'persecdata' url = f'{protocol}://{base_url}/{endpoint}/{job_id}' return url<|docstring|>Return the WDL PersecData API endpoint The endpoint returned by this module is: https://api.welldatalabs.com/persecdata/<job_id> Parameters ---------- job_id: str The job_id to search the PerSec API for Returns ------- url: str The PersecData API endpoing<|endoftext|>
ebc4f4b7c9ace04903207cb8822d2ee6da743f4a326ecb097b2da8f0074a85c3
def get_api_auth_headers(api_key): ' Return HTTP Authorization header using WDL API key\n\n WDL follows the Authorization: <type> <credentials> pattern\n that was introduced by the W3C in HTTP 1.0. That means the\n value of your Authorization header must be set to:\n\n "Bearer <API Key>".\n\n The API Key provided by api_key is a secret token that Well\n Data Labs issues to your company or authenticated user. If\n you are an existing Well Data Labs customer, you can obtain\n an API Key for your data from example@example.com.\n\n API Keys allow access to customer data just like a username and\n password. They should be protected and should not be shared.\n\n Parameters\n ----------\n api_key: str\n The WDL API key to use for request authentication\n\n Returns\n -------\n headers: dict\n A dictionary containing the HTTP Authorization header\n information to be consumed by the request GET call\n ' assert isinstance(api_key, str) headers = {'Authorization': f'Bearer {api_key}'} return headers
Return HTTP Authorization header using WDL API key WDL follows the Authorization: <type> <credentials> pattern that was introduced by the W3C in HTTP 1.0. That means the value of your Authorization header must be set to: "Bearer <API Key>". The API Key provided by api_key is a secret token that Well Data Labs issues to your company or authenticated user. If you are an existing Well Data Labs customer, you can obtain an API Key for your data from example@example.com. API Keys allow access to customer data just like a username and password. They should be protected and should not be shared. Parameters ---------- api_key: str The WDL API key to use for request authentication Returns ------- headers: dict A dictionary containing the HTTP Authorization header information to be consumed by the request GET call
persec_data_api.py
get_api_auth_headers
welldatalabs/wdl-python
1
python
def get_api_auth_headers(api_key): ' Return HTTP Authorization header using WDL API key\n\n WDL follows the Authorization: <type> <credentials> pattern\n that was introduced by the W3C in HTTP 1.0. That means the\n value of your Authorization header must be set to:\n\n "Bearer <API Key>".\n\n The API Key provided by api_key is a secret token that Well\n Data Labs issues to your company or authenticated user. If\n you are an existing Well Data Labs customer, you can obtain\n an API Key for your data from example@example.com.\n\n API Keys allow access to customer data just like a username and\n password. They should be protected and should not be shared.\n\n Parameters\n ----------\n api_key: str\n The WDL API key to use for request authentication\n\n Returns\n -------\n headers: dict\n A dictionary containing the HTTP Authorization header\n information to be consumed by the request GET call\n ' assert isinstance(api_key, str) headers = {'Authorization': f'Bearer {api_key}'} return headers
def get_api_auth_headers(api_key): ' Return HTTP Authorization header using WDL API key\n\n WDL follows the Authorization: <type> <credentials> pattern\n that was introduced by the W3C in HTTP 1.0. That means the\n value of your Authorization header must be set to:\n\n "Bearer <API Key>".\n\n The API Key provided by api_key is a secret token that Well\n Data Labs issues to your company or authenticated user. If\n you are an existing Well Data Labs customer, you can obtain\n an API Key for your data from example@example.com.\n\n API Keys allow access to customer data just like a username and\n password. They should be protected and should not be shared.\n\n Parameters\n ----------\n api_key: str\n The WDL API key to use for request authentication\n\n Returns\n -------\n headers: dict\n A dictionary containing the HTTP Authorization header\n information to be consumed by the request GET call\n ' assert isinstance(api_key, str) headers = {'Authorization': f'Bearer {api_key}'} return headers<|docstring|>Return HTTP Authorization header using WDL API key WDL follows the Authorization: <type> <credentials> pattern that was introduced by the W3C in HTTP 1.0. That means the value of your Authorization header must be set to: "Bearer <API Key>". The API Key provided by api_key is a secret token that Well Data Labs issues to your company or authenticated user. If you are an existing Well Data Labs customer, you can obtain an API Key for your data from example@example.com. API Keys allow access to customer data just like a username and password. They should be protected and should not be shared. Parameters ---------- api_key: str The WDL API key to use for request authentication Returns ------- headers: dict A dictionary containing the HTTP Authorization header information to be consumed by the request GET call<|endoftext|>
995c979e323976a188a3afbd537decf880ca628ece2d65d560fce21c07cefee9
def save_raw_persec_data(csv_data, filename): ' Save the raw CSV to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function saves the raw CSV to filename.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the raw CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) with open(filename, 'w') as csv_file: csv_file.write(csv_data)
Save the raw CSV to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... This function saves the raw CSV to filename. Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the raw CSV data
persec_data_api.py
save_raw_persec_data
welldatalabs/wdl-python
1
python
def save_raw_persec_data(csv_data, filename): ' Save the raw CSV to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function saves the raw CSV to filename.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the raw CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) with open(filename, 'w') as csv_file: csv_file.write(csv_data)
def save_raw_persec_data(csv_data, filename): ' Save the raw CSV to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function saves the raw CSV to filename.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the raw CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) with open(filename, 'w') as csv_file: csv_file.write(csv_data)<|docstring|>Save the raw CSV to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... This function saves the raw CSV to filename. Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the raw CSV data<|endoftext|>
34ed9dd00115cf38e51da52684d25fd50d7f40e3c665df3358d6a64b1484c07a
def format_persec_column_label(label): ' Return a re-formatted PerSecData column label\n\n This function converts column labels to snake case (all lower-case\n and spaces converted to underscores).\n\n Parameters\n ----------\n label: str\n The original column label to be formatted\n\n Returns\n -------\n formatted_label: str\n The formatted label\n ' assert isinstance(label, str) formatted_label = label.lower().replace(' ', '_') assert (formatted_label.islower() and (' ' not in formatted_label)) return formatted_label
Return a re-formatted PerSecData column label This function converts column labels to snake case (all lower-case and spaces converted to underscores). Parameters ---------- label: str The original column label to be formatted Returns ------- formatted_label: str The formatted label
persec_data_api.py
format_persec_column_label
welldatalabs/wdl-python
1
python
def format_persec_column_label(label): ' Return a re-formatted PerSecData column label\n\n This function converts column labels to snake case (all lower-case\n and spaces converted to underscores).\n\n Parameters\n ----------\n label: str\n The original column label to be formatted\n\n Returns\n -------\n formatted_label: str\n The formatted label\n ' assert isinstance(label, str) formatted_label = label.lower().replace(' ', '_') assert (formatted_label.islower() and (' ' not in formatted_label)) return formatted_label
def format_persec_column_label(label): ' Return a re-formatted PerSecData column label\n\n This function converts column labels to snake case (all lower-case\n and spaces converted to underscores).\n\n Parameters\n ----------\n label: str\n The original column label to be formatted\n\n Returns\n -------\n formatted_label: str\n The formatted label\n ' assert isinstance(label, str) formatted_label = label.lower().replace(' ', '_') assert (formatted_label.islower() and (' ' not in formatted_label)) return formatted_label<|docstring|>Return a re-formatted PerSecData column label This function converts column labels to snake case (all lower-case and spaces converted to underscores). Parameters ---------- label: str The original column label to be formatted Returns ------- formatted_label: str The formatted label<|endoftext|>
bfbae161ac1514c6bd3e7ddd23471d90b293adb9949fc6eba57998c6833ba217
def save_formatted_persec_data(csv_data, filename): ' Save a mildly formatted version of csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function converts the header to snake case (all lower-case and\n spaces converted to underscores) using format_persec_column_label().\n It also casts the JobTime column to a Pandas datatime column prior to\n saving the CSV forcing the datatime format to adhere to "%m/%d/%y %H:%M:%S".\n\n The function also skips the units row. The reason is that the units\n row prevents Python/R from correctly inferring the data type of the\n columns when they are read in.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the formatted CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) persec_df = pd.read_csv(StringIO(csv_data), skiprows=[1]) persec_df.columns = [format_persec_column_label(column) for column in persec_df.columns] assert ('job_time' in persec_df.columns) persec_df = persec_df.assign(job_time=(lambda x: pd.to_datetime(x.job_time, format='%m/%d/%y %H:%M:%S'))) persec_df.to_csv(filename, index=False)
Save a mildly formatted version of csv_data to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... This function converts the header to snake case (all lower-case and spaces converted to underscores) using format_persec_column_label(). It also casts the JobTime column to a Pandas datatime column prior to saving the CSV forcing the datatime format to adhere to "%m/%d/%y %H:%M:%S". The function also skips the units row. The reason is that the units row prevents Python/R from correctly inferring the data type of the columns when they are read in. Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the formatted CSV data
persec_data_api.py
save_formatted_persec_data
welldatalabs/wdl-python
1
python
def save_formatted_persec_data(csv_data, filename): ' Save a mildly formatted version of csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function converts the header to snake case (all lower-case and\n spaces converted to underscores) using format_persec_column_label().\n It also casts the JobTime column to a Pandas datatime column prior to\n saving the CSV forcing the datatime format to adhere to "%m/%d/%y %H:%M:%S".\n\n The function also skips the units row. The reason is that the units\n row prevents Python/R from correctly inferring the data type of the\n columns when they are read in.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the formatted CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) persec_df = pd.read_csv(StringIO(csv_data), skiprows=[1]) persec_df.columns = [format_persec_column_label(column) for column in persec_df.columns] assert ('job_time' in persec_df.columns) persec_df = persec_df.assign(job_time=(lambda x: pd.to_datetime(x.job_time, format='%m/%d/%y %H:%M:%S'))) persec_df.to_csv(filename, index=False)
def save_formatted_persec_data(csv_data, filename): ' Save a mildly formatted version of csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n This function converts the header to snake case (all lower-case and\n spaces converted to underscores) using format_persec_column_label().\n It also casts the JobTime column to a Pandas datatime column prior to\n saving the CSV forcing the datatime format to adhere to "%m/%d/%y %H:%M:%S".\n\n The function also skips the units row. The reason is that the units\n row prevents Python/R from correctly inferring the data type of the\n columns when they are read in.\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the formatted CSV data\n ' assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) persec_df = pd.read_csv(StringIO(csv_data), skiprows=[1]) persec_df.columns = [format_persec_column_label(column) for column in persec_df.columns] assert ('job_time' in persec_df.columns) persec_df = persec_df.assign(job_time=(lambda x: pd.to_datetime(x.job_time, format='%m/%d/%y %H:%M:%S'))) persec_df.to_csv(filename, index=False)<|docstring|>Save a mildly formatted version of csv_data to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... This function converts the header to snake case (all lower-case and spaces converted to underscores) using format_persec_column_label(). It also casts the JobTime column to a Pandas datatime column prior to saving the CSV forcing the datatime format to adhere to "%m/%d/%y %H:%M:%S". The function also skips the units row. The reason is that the units row prevents Python/R from correctly inferring the data type of the columns when they are read in. Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the formatted CSV data<|endoftext|>
9fe2448ed5156b5cf250cec4de199e0f3c1e9bc52faf4e3539f339b4a9540d95
def save_persec_units_data(csv_data, filename): ' Save the units data in csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n The function reads in the first two rows of csv_data, i.e., the\n header row and the units row. It then converts the header to snake\n case (all lower-case and spaces converted to underscores) using\n format_persec_column_label(). It also removes the parentheses\n surrounding the units in the second row of the CSV data.\n Finally, the updated header and units row are written as a CSV\n filename specified by filename.\n\n The units CSV file looks like:\n\n job_time,job_time0,stage_time0,time_to_isip,well_name, api_number,stage_number,treating_pressure,bottomhole_pressure, annulus_pressure,surface_pressure,slurry_rate,clean_volume, slurry_volume,proppant_total,proppant_conc,bottomhole_proppant_conc\n dattime,min,min,min,none,none,none,psi,psi,psi, psi,bpm,bbl,bbl,lbs,lbs/gal,lbs/gal\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the units data\n ' def strip_parentheses(unit): ' Remove parentheses from unit str ' return unit.translate({ord(i): None for i in '()'}) assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) units_df = pd.read_csv(StringIO(csv_data), nrows=1) updated_columns = [format_persec_column_label(column) for column in units_df.columns] updated_units = [strip_parentheses(unit) for unit in units_df.iloc[0]] units_df = pd.DataFrame(data=[updated_units], columns=updated_columns) units_df.to_csv(filename, index=False)
Save the units data in csv_data to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... The function reads in the first two rows of csv_data, i.e., the header row and the units row. It then converts the header to snake case (all lower-case and spaces converted to underscores) using format_persec_column_label(). It also removes the parentheses surrounding the units in the second row of the CSV data. Finally, the updated header and units row are written as a CSV filename specified by filename. The units CSV file looks like: job_time,job_time0,stage_time0,time_to_isip,well_name, api_number,stage_number,treating_pressure,bottomhole_pressure, annulus_pressure,surface_pressure,slurry_rate,clean_volume, slurry_volume,proppant_total,proppant_conc,bottomhole_proppant_conc dattime,min,min,min,none,none,none,psi,psi,psi, psi,bpm,bbl,bbl,lbs,lbs/gal,lbs/gal Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the units data
persec_data_api.py
save_persec_units_data
welldatalabs/wdl-python
1
python
def save_persec_units_data(csv_data, filename): ' Save the units data in csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n The function reads in the first two rows of csv_data, i.e., the\n header row and the units row. It then converts the header to snake\n case (all lower-case and spaces converted to underscores) using\n format_persec_column_label(). It also removes the parentheses\n surrounding the units in the second row of the CSV data.\n Finally, the updated header and units row are written as a CSV\n filename specified by filename.\n\n The units CSV file looks like:\n\n job_time,job_time0,stage_time0,time_to_isip,well_name, api_number,stage_number,treating_pressure,bottomhole_pressure, annulus_pressure,surface_pressure,slurry_rate,clean_volume, slurry_volume,proppant_total,proppant_conc,bottomhole_proppant_conc\n dattime,min,min,min,none,none,none,psi,psi,psi, psi,bpm,bbl,bbl,lbs,lbs/gal,lbs/gal\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the units data\n ' def strip_parentheses(unit): ' Remove parentheses from unit str ' return unit.translate({ord(i): None for i in '()'}) assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) units_df = pd.read_csv(StringIO(csv_data), nrows=1) updated_columns = [format_persec_column_label(column) for column in units_df.columns] updated_units = [strip_parentheses(unit) for unit in units_df.iloc[0]] units_df = pd.DataFrame(data=[updated_units], columns=updated_columns) units_df.to_csv(filename, index=False)
def save_persec_units_data(csv_data, filename): ' Save the units data in csv_data to filename\n\n The raw CSV has the following structure:\n\n JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC\n (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal)\n 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000\n 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000\n ...\n\n The function reads in the first two rows of csv_data, i.e., the\n header row and the units row. It then converts the header to snake\n case (all lower-case and spaces converted to underscores) using\n format_persec_column_label(). It also removes the parentheses\n surrounding the units in the second row of the CSV data.\n Finally, the updated header and units row are written as a CSV\n filename specified by filename.\n\n The units CSV file looks like:\n\n job_time,job_time0,stage_time0,time_to_isip,well_name, api_number,stage_number,treating_pressure,bottomhole_pressure, annulus_pressure,surface_pressure,slurry_rate,clean_volume, slurry_volume,proppant_total,proppant_conc,bottomhole_proppant_conc\n dattime,min,min,min,none,none,none,psi,psi,psi, psi,bpm,bbl,bbl,lbs,lbs/gal,lbs/gal\n\n Note: not every PerSecData CSV files has the same columns.\n\n Parameters\n ----------\n csv_data: str\n The CSV data as a string\n\n filename: str or pathlib.Path\n The target filename for storing the units data\n ' def strip_parentheses(unit): ' Remove parentheses from unit str ' return unit.translate({ord(i): None for i in '()'}) assert isinstance(csv_data, str) assert (isinstance(filename, str) or isinstance(filename, Path)) units_df = pd.read_csv(StringIO(csv_data), nrows=1) updated_columns = [format_persec_column_label(column) for column in units_df.columns] updated_units = [strip_parentheses(unit) for unit in units_df.iloc[0]] units_df = pd.DataFrame(data=[updated_units], columns=updated_columns) units_df.to_csv(filename, index=False)<|docstring|>Save the units data in csv_data to filename The raw CSV has the following structure: JOB TIME,JOB TIME0,STAGE TIME0,TIME TO ISIP,WELL NAME, API NUMBER,STAGE NUMBER,TREATING PRESSURE,BOTTOMHOLE PRESSURE, ANNULUS PRESSURE,SURFACE PRESSURE,SLURRY RATE,CLEAN VOLUME, SLURRY VOLUME,PROPPANT TOTAL,PROPPANT CONC,BOTTOMHOLE PROPPANT CONC (datetime),(min),(min),(min),(none),(none),(none),(psi),(psi),(psi), (psi),(bpm),(bbl),(bbl),(lbs),(lbs/gal),(lbs/gal) 06/17/18 04:15:08,0.033333,0.000000,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,-0.900000, 0.480000,0.000000,7.300000,0.000000,0.000000,0.000000 06/17/18 04:15:09,0.050000,0.016667,,Sample Ball-and-Sleeve, 05-123-00000-00-00,1,-16.310000,0.000000,-8.130000,2.710000, 0.490000,0.000000,7.310000,0.000000,0.000000,0.000000 ... The function reads in the first two rows of csv_data, i.e., the header row and the units row. It then converts the header to snake case (all lower-case and spaces converted to underscores) using format_persec_column_label(). It also removes the parentheses surrounding the units in the second row of the CSV data. Finally, the updated header and units row are written as a CSV filename specified by filename. The units CSV file looks like: job_time,job_time0,stage_time0,time_to_isip,well_name, api_number,stage_number,treating_pressure,bottomhole_pressure, annulus_pressure,surface_pressure,slurry_rate,clean_volume, slurry_volume,proppant_total,proppant_conc,bottomhole_proppant_conc dattime,min,min,min,none,none,none,psi,psi,psi, psi,bpm,bbl,bbl,lbs,lbs/gal,lbs/gal Note: not every PerSecData CSV files has the same columns. Parameters ---------- csv_data: str The CSV data as a string filename: str or pathlib.Path The target filename for storing the units data<|endoftext|>
a7b621c3bfbf1d72788bf14c65b4ed721de35244bc0a21b3279eb4b12be125f0
def handle_200(response, persec_filenames): ' Handle 200: return a Pandas dataframe from JSON object\n\n When the JobHeaders API returns success (200 status_code)\n the response text should be CSV file.\n\n Three different CSVs can be written:\n\n 1) The raw CSV: complete original CSV\n 2) The formatted CSV: snake case header and no units row\n 3) The units CSV: snake case header and units row\n\n An empty string or None key in persec_filenames will cause\n that file type to be skipped.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n ' assert isinstance(response, requests.Response) assert isinstance(persec_filenames, PerSecFilenames) assert (response.status_code == 200) csv_data = response.text if persec_filenames.raw_filename: save_raw_persec_data(csv_data=csv_data, filename=persec_filenames.raw_filename) if persec_filenames.formatted_filename: save_formatted_persec_data(csv_data=csv_data, filename=persec_filenames.formatted_filename) if persec_filenames.units_filename: save_persec_units_data(csv_data=csv_data, filename=persec_filenames.units_filename)
Handle 200: return a Pandas dataframe from JSON object When the JobHeaders API returns success (200 status_code) the response text should be CSV file. Three different CSVs can be written: 1) The raw CSV: complete original CSV 2) The formatted CSV: snake case header and no units row 3) The units CSV: snake case header and units row An empty string or None key in persec_filenames will cause that file type to be skipped. Parameters ---------- response: requests.Response The response from the HTTP request persec_filenames: PerSecFilenames The target filenames for the raw CSV, formatted CSV, and units CSV. An empty or None entry skips writting that file type.
persec_data_api.py
handle_200
welldatalabs/wdl-python
1
python
def handle_200(response, persec_filenames): ' Handle 200: return a Pandas dataframe from JSON object\n\n When the JobHeaders API returns success (200 status_code)\n the response text should be CSV file.\n\n Three different CSVs can be written:\n\n 1) The raw CSV: complete original CSV\n 2) The formatted CSV: snake case header and no units row\n 3) The units CSV: snake case header and units row\n\n An empty string or None key in persec_filenames will cause\n that file type to be skipped.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n ' assert isinstance(response, requests.Response) assert isinstance(persec_filenames, PerSecFilenames) assert (response.status_code == 200) csv_data = response.text if persec_filenames.raw_filename: save_raw_persec_data(csv_data=csv_data, filename=persec_filenames.raw_filename) if persec_filenames.formatted_filename: save_formatted_persec_data(csv_data=csv_data, filename=persec_filenames.formatted_filename) if persec_filenames.units_filename: save_persec_units_data(csv_data=csv_data, filename=persec_filenames.units_filename)
def handle_200(response, persec_filenames): ' Handle 200: return a Pandas dataframe from JSON object\n\n When the JobHeaders API returns success (200 status_code)\n the response text should be CSV file.\n\n Three different CSVs can be written:\n\n 1) The raw CSV: complete original CSV\n 2) The formatted CSV: snake case header and no units row\n 3) The units CSV: snake case header and units row\n\n An empty string or None key in persec_filenames will cause\n that file type to be skipped.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n ' assert isinstance(response, requests.Response) assert isinstance(persec_filenames, PerSecFilenames) assert (response.status_code == 200) csv_data = response.text if persec_filenames.raw_filename: save_raw_persec_data(csv_data=csv_data, filename=persec_filenames.raw_filename) if persec_filenames.formatted_filename: save_formatted_persec_data(csv_data=csv_data, filename=persec_filenames.formatted_filename) if persec_filenames.units_filename: save_persec_units_data(csv_data=csv_data, filename=persec_filenames.units_filename)<|docstring|>Handle 200: return a Pandas dataframe from JSON object When the JobHeaders API returns success (200 status_code) the response text should be CSV file. Three different CSVs can be written: 1) The raw CSV: complete original CSV 2) The formatted CSV: snake case header and no units row 3) The units CSV: snake case header and units row An empty string or None key in persec_filenames will cause that file type to be skipped. Parameters ---------- response: requests.Response The response from the HTTP request persec_filenames: PerSecFilenames The target filenames for the raw CSV, formatted CSV, and units CSV. An empty or None entry skips writting that file type.<|endoftext|>
a89ff892d2b930ff94c470643312adb551194590dbf6419bf3ebae4ea359470a
def handle_400(response): ' Handle 400: output warning and suuggested next steps\n\n There was a change during the week of 8/29/2019 where status code\n 417 would be returned for jobs that had issues being parsed, e.g.,\n the job loader could not detect a job start time. The API now just\n returns 400.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 400) print('HTTP 400 Bad Request!')
Handle 400: output warning and suuggested next steps There was a change during the week of 8/29/2019 where status code 417 would be returned for jobs that had issues being parsed, e.g., the job loader could not detect a job start time. The API now just returns 400. Parameters ---------- response: requests.Response The response from the HTTP request
persec_data_api.py
handle_400
welldatalabs/wdl-python
1
python
def handle_400(response): ' Handle 400: output warning and suuggested next steps\n\n There was a change during the week of 8/29/2019 where status code\n 417 would be returned for jobs that had issues being parsed, e.g.,\n the job loader could not detect a job start time. The API now just\n returns 400.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 400) print('HTTP 400 Bad Request!')
def handle_400(response): ' Handle 400: output warning and suuggested next steps\n\n There was a change during the week of 8/29/2019 where status code\n 417 would be returned for jobs that had issues being parsed, e.g.,\n the job loader could not detect a job start time. The API now just\n returns 400.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 400) print('HTTP 400 Bad Request!')<|docstring|>Handle 400: output warning and suuggested next steps There was a change during the week of 8/29/2019 where status code 417 would be returned for jobs that had issues being parsed, e.g., the job loader could not detect a job start time. The API now just returns 400. Parameters ---------- response: requests.Response The response from the HTTP request<|endoftext|>
a601ee50006c920f8000246b0e2382d4a268e6ee597b40ea5a3923640540ffe1
def handle_401(response): ' Handle 401: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 401) print('HTTP 401 Authentication token is invalid!') print('Verify you are correctly setting the HTTP Authorization Header') print('and are using the correct WDL API key')
Handle 401: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request
persec_data_api.py
handle_401
welldatalabs/wdl-python
1
python
def handle_401(response): ' Handle 401: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 401) print('HTTP 401 Authentication token is invalid!') print('Verify you are correctly setting the HTTP Authorization Header') print('and are using the correct WDL API key')
def handle_401(response): ' Handle 401: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 401) print('HTTP 401 Authentication token is invalid!') print('Verify you are correctly setting the HTTP Authorization Header') print('and are using the correct WDL API key')<|docstring|>Handle 401: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request<|endoftext|>
eca6d0004dae8eae916c79ecd13fc0cf2ddcb55bd93083f95ec74a33b38b16a1
def handle_403(response): ' Handle 403: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 403) print('HTTP 403 Forbidden!') print("A valid token was received, but it doesn't have permissions to PerSecData") print('Contact example@example.com for assistance')
Handle 403: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request
persec_data_api.py
handle_403
welldatalabs/wdl-python
1
python
def handle_403(response): ' Handle 403: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 403) print('HTTP 403 Forbidden!') print("A valid token was received, but it doesn't have permissions to PerSecData") print('Contact example@example.com for assistance')
def handle_403(response): ' Handle 403: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 403) print('HTTP 403 Forbidden!') print("A valid token was received, but it doesn't have permissions to PerSecData") print('Contact example@example.com for assistance')<|docstring|>Handle 403: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request<|endoftext|>
29b1b4b73b26175323e6525d9f96421010a3391e95e8d146f87b07e4e2de6279
def handle_404(response): ' Handle 404: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 404) url = response.request.url print('HTTP 404 Not Found!') print(f'No data found matching the criteria: {url}')
Handle 404: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request
persec_data_api.py
handle_404
welldatalabs/wdl-python
1
python
def handle_404(response): ' Handle 404: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 404) url = response.request.url print('HTTP 404 Not Found!') print(f'No data found matching the criteria: {url}')
def handle_404(response): ' Handle 404: output warning and suuggested next steps\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n ' assert isinstance(response, requests.Response) assert (response.status_code == 404) url = response.request.url print('HTTP 404 Not Found!') print(f'No data found matching the criteria: {url}')<|docstring|>Handle 404: output warning and suuggested next steps Parameters ---------- response: requests.Response The response from the HTTP request<|endoftext|>
769e8fe85be96b4e118fb6f8abdf1d6d2ecdd14a430b7ec28fae9b54d9f2673a
def handle_429(response, default_delay=70): ' Handle 429: output warning and suuggested next steps\n\n The user has exceeded their rate limit. This method will return the\n number of seconds the caller should wait based on the dictated\n rate-limiting logic specified by the API.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests. This\n integer should be non-negative.\n\n Returns\n -------\n delay: int\n The number of seconds the caller should wait until the\n next PerSecData API call. This number will be non-negative.\n ' assert isinstance(response, requests.Response) assert (response.status_code == 429) assert (default_delay >= 0) url = response.request.url delay = default_delay response_retry_delay = response.headers.get('retry-after') print('HTTP 429 API throttled') if response_retry_delay: print(f'Will retry after: {response_retry_delay} seconds...') delay = int(response_retry_delay) else: print(f'Do not know wait time to retry for request: {url}') print(f'Continuing with next request after {default_delay} sec throttle window time...') delay = default_delay assert (delay >= 0) return delay
Handle 429: output warning and suuggested next steps The user has exceeded their rate limit. This method will return the number of seconds the caller should wait based on the dictated rate-limiting logic specified by the API. Parameters ---------- response: requests.Response The response from the HTTP request default_delay: int Default number of seconds to wait between requests. This integer should be non-negative. Returns ------- delay: int The number of seconds the caller should wait until the next PerSecData API call. This number will be non-negative.
persec_data_api.py
handle_429
welldatalabs/wdl-python
1
python
def handle_429(response, default_delay=70): ' Handle 429: output warning and suuggested next steps\n\n The user has exceeded their rate limit. This method will return the\n number of seconds the caller should wait based on the dictated\n rate-limiting logic specified by the API.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests. This\n integer should be non-negative.\n\n Returns\n -------\n delay: int\n The number of seconds the caller should wait until the\n next PerSecData API call. This number will be non-negative.\n ' assert isinstance(response, requests.Response) assert (response.status_code == 429) assert (default_delay >= 0) url = response.request.url delay = default_delay response_retry_delay = response.headers.get('retry-after') print('HTTP 429 API throttled') if response_retry_delay: print(f'Will retry after: {response_retry_delay} seconds...') delay = int(response_retry_delay) else: print(f'Do not know wait time to retry for request: {url}') print(f'Continuing with next request after {default_delay} sec throttle window time...') delay = default_delay assert (delay >= 0) return delay
def handle_429(response, default_delay=70): ' Handle 429: output warning and suuggested next steps\n\n The user has exceeded their rate limit. This method will return the\n number of seconds the caller should wait based on the dictated\n rate-limiting logic specified by the API.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests. This\n integer should be non-negative.\n\n Returns\n -------\n delay: int\n The number of seconds the caller should wait until the\n next PerSecData API call. This number will be non-negative.\n ' assert isinstance(response, requests.Response) assert (response.status_code == 429) assert (default_delay >= 0) url = response.request.url delay = default_delay response_retry_delay = response.headers.get('retry-after') print('HTTP 429 API throttled') if response_retry_delay: print(f'Will retry after: {response_retry_delay} seconds...') delay = int(response_retry_delay) else: print(f'Do not know wait time to retry for request: {url}') print(f'Continuing with next request after {default_delay} sec throttle window time...') delay = default_delay assert (delay >= 0) return delay<|docstring|>Handle 429: output warning and suuggested next steps The user has exceeded their rate limit. This method will return the number of seconds the caller should wait based on the dictated rate-limiting logic specified by the API. Parameters ---------- response: requests.Response The response from the HTTP request default_delay: int Default number of seconds to wait between requests. This integer should be non-negative. Returns ------- delay: int The number of seconds the caller should wait until the next PerSecData API call. This number will be non-negative.<|endoftext|>
c0202f6923ecbddcfa05a7bdb2f9e054dd7bee6adb014c55fc35024b663ab597
def handle_generic_response(response): ' Handle generic response: output warning and suuggested next steps\n\n This is reserved for unhandled status codes. Output a snippet of the\n response text to aid the user with debugging.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests\n ' assert isinstance(response, requests.Response) url = response.request.url print(f'Unhandled HTTP status code: {response.status_code} for request {url}') print(response.headers) print(response.text[:2000])
Handle generic response: output warning and suuggested next steps This is reserved for unhandled status codes. Output a snippet of the response text to aid the user with debugging. Parameters ---------- response: requests.Response The response from the HTTP request default_delay: int Default number of seconds to wait between requests
persec_data_api.py
handle_generic_response
welldatalabs/wdl-python
1
python
def handle_generic_response(response): ' Handle generic response: output warning and suuggested next steps\n\n This is reserved for unhandled status codes. Output a snippet of the\n response text to aid the user with debugging.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests\n ' assert isinstance(response, requests.Response) url = response.request.url print(f'Unhandled HTTP status code: {response.status_code} for request {url}') print(response.headers) print(response.text[:2000])
def handle_generic_response(response): ' Handle generic response: output warning and suuggested next steps\n\n This is reserved for unhandled status codes. Output a snippet of the\n response text to aid the user with debugging.\n\n Parameters\n ----------\n response: requests.Response\n The response from the HTTP request\n\n default_delay: int\n Default number of seconds to wait between requests\n ' assert isinstance(response, requests.Response) url = response.request.url print(f'Unhandled HTTP status code: {response.status_code} for request {url}') print(response.headers) print(response.text[:2000])<|docstring|>Handle generic response: output warning and suuggested next steps This is reserved for unhandled status codes. Output a snippet of the response text to aid the user with debugging. Parameters ---------- response: requests.Response The response from the HTTP request default_delay: int Default number of seconds to wait between requests<|endoftext|>
e25b760abb2c7728e3dd95283e5750770974b06e8f2fd4edcfa86eed38b80cde
def download_job_persec(job_id, api_key, persec_filenames, default_delay=70, max_attempts=3): ' Download PerSecData for job_id and save CSVs given by persec_filenames\n\n Repeatedly try to download the PerSecData data for the job indexed\n by job_id from the WDL API. On success, the PerSecData is save in\n files according to the\n entires of persec_filenames.\n\n Parameters\n ----------\n job_id: str\n The JobId used to search the PerSecAPI\n\n api_key: str\n The WDL API key to use for request authentication\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n\n default_delay: int\n Default number of seconds to wait between requests. This\n number should be non-negative.\n\n max_attempts: int\n The maximum number of times to attempt the download the\n PerSecData for job_id. This number should be positive.\n\n Returns\n -------\n download_successful: bool\n Was the API call and CSV save successful\n ' assert (isinstance(max_attempts, int) and (max_attempts > 0)) assert (isinstance(default_delay, int) and (default_delay >= 0)) url = get_api_url(job_id) headers = get_api_auth_headers(api_key) status_code = None delay_before_next_api_call = default_delay download_successful = False num_attempts = 0 while (num_attempts < max_attempts): print(job_id) response = requests.get(url, headers=headers) status_code = response.status_code num_attempts = (num_attempts + 1) if (status_code == 200): handle_200(response, persec_filenames) download_successful = True elif (status_code == 400): handle_400(response) elif (status_code == 401): handle_401(response) elif (status_code == 403): handle_403(response) elif (status_code == 404): handle_404(response) elif (status_code == 429): delay_before_next_api_call = handle_429(response, default_delay) else: handle_generic_response(response) delay_before_next_api_call = default_delay if (status_code in frozenset((200, 400, 401, 403, 404))): break elif (num_attempts < max_attempts): sleep(delay_before_next_api_call) return download_successful
Download PerSecData for job_id and save CSVs given by persec_filenames Repeatedly try to download the PerSecData data for the job indexed by job_id from the WDL API. On success, the PerSecData is save in files according to the entires of persec_filenames. Parameters ---------- job_id: str The JobId used to search the PerSecAPI api_key: str The WDL API key to use for request authentication persec_filenames: PerSecFilenames The target filenames for the raw CSV, formatted CSV, and units CSV. An empty or None entry skips writting that file type. default_delay: int Default number of seconds to wait between requests. This number should be non-negative. max_attempts: int The maximum number of times to attempt the download the PerSecData for job_id. This number should be positive. Returns ------- download_successful: bool Was the API call and CSV save successful
persec_data_api.py
download_job_persec
welldatalabs/wdl-python
1
python
def download_job_persec(job_id, api_key, persec_filenames, default_delay=70, max_attempts=3): ' Download PerSecData for job_id and save CSVs given by persec_filenames\n\n Repeatedly try to download the PerSecData data for the job indexed\n by job_id from the WDL API. On success, the PerSecData is save in\n files according to the\n entires of persec_filenames.\n\n Parameters\n ----------\n job_id: str\n The JobId used to search the PerSecAPI\n\n api_key: str\n The WDL API key to use for request authentication\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n\n default_delay: int\n Default number of seconds to wait between requests. This\n number should be non-negative.\n\n max_attempts: int\n The maximum number of times to attempt the download the\n PerSecData for job_id. This number should be positive.\n\n Returns\n -------\n download_successful: bool\n Was the API call and CSV save successful\n ' assert (isinstance(max_attempts, int) and (max_attempts > 0)) assert (isinstance(default_delay, int) and (default_delay >= 0)) url = get_api_url(job_id) headers = get_api_auth_headers(api_key) status_code = None delay_before_next_api_call = default_delay download_successful = False num_attempts = 0 while (num_attempts < max_attempts): print(job_id) response = requests.get(url, headers=headers) status_code = response.status_code num_attempts = (num_attempts + 1) if (status_code == 200): handle_200(response, persec_filenames) download_successful = True elif (status_code == 400): handle_400(response) elif (status_code == 401): handle_401(response) elif (status_code == 403): handle_403(response) elif (status_code == 404): handle_404(response) elif (status_code == 429): delay_before_next_api_call = handle_429(response, default_delay) else: handle_generic_response(response) delay_before_next_api_call = default_delay if (status_code in frozenset((200, 400, 401, 403, 404))): break elif (num_attempts < max_attempts): sleep(delay_before_next_api_call) return download_successful
def download_job_persec(job_id, api_key, persec_filenames, default_delay=70, max_attempts=3): ' Download PerSecData for job_id and save CSVs given by persec_filenames\n\n Repeatedly try to download the PerSecData data for the job indexed\n by job_id from the WDL API. On success, the PerSecData is save in\n files according to the\n entires of persec_filenames.\n\n Parameters\n ----------\n job_id: str\n The JobId used to search the PerSecAPI\n\n api_key: str\n The WDL API key to use for request authentication\n\n persec_filenames: PerSecFilenames\n The target filenames for the raw CSV, formatted CSV, and\n units CSV. An empty or None entry skips writting that\n file type.\n\n default_delay: int\n Default number of seconds to wait between requests. This\n number should be non-negative.\n\n max_attempts: int\n The maximum number of times to attempt the download the\n PerSecData for job_id. This number should be positive.\n\n Returns\n -------\n download_successful: bool\n Was the API call and CSV save successful\n ' assert (isinstance(max_attempts, int) and (max_attempts > 0)) assert (isinstance(default_delay, int) and (default_delay >= 0)) url = get_api_url(job_id) headers = get_api_auth_headers(api_key) status_code = None delay_before_next_api_call = default_delay download_successful = False num_attempts = 0 while (num_attempts < max_attempts): print(job_id) response = requests.get(url, headers=headers) status_code = response.status_code num_attempts = (num_attempts + 1) if (status_code == 200): handle_200(response, persec_filenames) download_successful = True elif (status_code == 400): handle_400(response) elif (status_code == 401): handle_401(response) elif (status_code == 403): handle_403(response) elif (status_code == 404): handle_404(response) elif (status_code == 429): delay_before_next_api_call = handle_429(response, default_delay) else: handle_generic_response(response) delay_before_next_api_call = default_delay if (status_code in frozenset((200, 400, 401, 403, 404))): break elif (num_attempts < max_attempts): sleep(delay_before_next_api_call) return download_successful<|docstring|>Download PerSecData for job_id and save CSVs given by persec_filenames Repeatedly try to download the PerSecData data for the job indexed by job_id from the WDL API. On success, the PerSecData is save in files according to the entires of persec_filenames. Parameters ---------- job_id: str The JobId used to search the PerSecAPI api_key: str The WDL API key to use for request authentication persec_filenames: PerSecFilenames The target filenames for the raw CSV, formatted CSV, and units CSV. An empty or None entry skips writting that file type. default_delay: int Default number of seconds to wait between requests. This number should be non-negative. max_attempts: int The maximum number of times to attempt the download the PerSecData for job_id. This number should be positive. Returns ------- download_successful: bool Was the API call and CSV save successful<|endoftext|>
7f7cdd438c638533fec272b665b6535f606089c480a89f12754d86ed10fb8395
def default_raw_csv_filename(job_id): ' Returns the filename for the raw CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'original_{job_id}.csv')
Returns the filename for the raw CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.
persec_data_api.py
default_raw_csv_filename
welldatalabs/wdl-python
1
python
def default_raw_csv_filename(job_id): ' Returns the filename for the raw CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'original_{job_id}.csv')
def default_raw_csv_filename(job_id): ' Returns the filename for the raw CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'original_{job_id}.csv')<|docstring|>Returns the filename for the raw CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.<|endoftext|>
f0b8f29a2116060971b7669b9bcde90e5b67ada0e30f4d65c90aa8b02409acbe
def default_formatted_csv_filename(job_id): ' Returns the filename for the formatted CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'formatted_{job_id}.csv')
Returns the filename for the formatted CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.
persec_data_api.py
default_formatted_csv_filename
welldatalabs/wdl-python
1
python
def default_formatted_csv_filename(job_id): ' Returns the filename for the formatted CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'formatted_{job_id}.csv')
def default_formatted_csv_filename(job_id): ' Returns the filename for the formatted CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'formatted_{job_id}.csv')<|docstring|>Returns the filename for the formatted CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.<|endoftext|>
0afd32b1232e2b637e7694ba7f02d080f5054cb4f4cab859629f4fd3fe4a8064
def default_units_csv_filename(job_id): ' Returns the filename for the units CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'units_{job_id}.csv')
Returns the filename for the units CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.
persec_data_api.py
default_units_csv_filename
welldatalabs/wdl-python
1
python
def default_units_csv_filename(job_id): ' Returns the filename for the units CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'units_{job_id}.csv')
def default_units_csv_filename(job_id): ' Returns the filename for the units CSV associated with job_id\n\n Parameters\n ----------\n job_id: str\n The job_id to generate the filename for\n\n Returns\n -------\n filename: pathlib.Path\n The filename associated with job_id which will be appended\n to some base path by the caller.\n ' assert isinstance(job_id, str) return Path(f'units_{job_id}.csv')<|docstring|>Returns the filename for the units CSV associated with job_id Parameters ---------- job_id: str The job_id to generate the filename for Returns ------- filename: pathlib.Path The filename associated with job_id which will be appended to some base path by the caller.<|endoftext|>
2165c30fe9f417ee37711bbb0954b03da10f433db5c4afe280dfe5f50daa48c7
def nosave_filename(job_id): ' Returns None to indicate not to save a CSV file\n\n Parameters\n ----------\n job_id: str\n The job_id associated with the job being processed\n ' return None
Returns None to indicate not to save a CSV file Parameters ---------- job_id: str The job_id associated with the job being processed
persec_data_api.py
nosave_filename
welldatalabs/wdl-python
1
python
def nosave_filename(job_id): ' Returns None to indicate not to save a CSV file\n\n Parameters\n ----------\n job_id: str\n The job_id associated with the job being processed\n ' return None
def nosave_filename(job_id): ' Returns None to indicate not to save a CSV file\n\n Parameters\n ----------\n job_id: str\n The job_id associated with the job being processed\n ' return None<|docstring|>Returns None to indicate not to save a CSV file Parameters ---------- job_id: str The job_id associated with the job being processed<|endoftext|>