body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def main():
' Driver '
| 4,307,824,353,820,192,300
|
Driver
|
common.py
|
main
|
ajyl/KEMP
|
python
|
def main():
' '
|
def __init__(self, dataset_spec: DatasetSpec=None, dev_strs: List[str]=None, v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
'\n base class for storing general specifications of the neural network\n '
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec, dev_strs=dev_strs, v_keychains=v_keychains, keep_v_keychains=keep_v_keychains, build_mode=build_mode, **kwargs)
if ('subnets' in self):
for (k, subet_spec) in self.subnets.items():
if ('network_spec_class' in subet_spec):
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k], **dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
| 6,158,320,943,500,329,000
|
base class for storing general specifications of the neural network
|
ivy_builder/specs/network_spec.py
|
__init__
|
ivy-dl/builder
|
python
|
def __init__(self, dataset_spec: DatasetSpec=None, dev_strs: List[str]=None, v_keychains=None, keep_v_keychains=False, build_mode='explicit', **kwargs) -> None:
'\n \n '
kw = locals_to_kwargs(locals())
super().__init__(dataset_spec=dataset_spec, dev_strs=dev_strs, v_keychains=v_keychains, keep_v_keychains=keep_v_keychains, build_mode=build_mode, **kwargs)
if ('subnets' in self):
for (k, subet_spec) in self.subnets.items():
if ('network_spec_class' in subet_spec):
if isinstance(subet_spec.network_spec_class, str):
spec_class = load_class_from_str(subet_spec.network_spec_class)
else:
spec_class = subet_spec.network_spec_class
if isinstance(kwargs['subnets'][k], spec_class):
subet_spec = kwargs['subnets'][k]
else:
subet_spec = spec_class(**{**kwargs['subnets'][k], **dict(dataset_spec=dataset_spec, dev_strs=dev_strs)})
self.subnets[k] = subet_spec
if isinstance(subet_spec.network_class, str):
self.subnets[k].network_class = load_class_from_str(subet_spec.network_class)
else:
self.subnets[k].network_class = subet_spec.network_class
self.subnets[k].store_vars = ivy.default(self.subnets[k].if_exists('store_vars'), True)
self.subnets[k].build_mode = ivy.default(self.subnets[k].if_exists('build_mode'), self.build_mode)
self.subnets[k].dataset_spec = dataset_spec
self.subnets[k].dev_strs = dev_strs
self._kwargs = kw
|
def __init__(self, backup_policy=None):
'SetBackupPolicyRequestBody - a model defined in huaweicloud sdk'
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
| -1,608,957,040,764,722,000
|
SetBackupPolicyRequestBody - a model defined in huaweicloud sdk
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__init__
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def __init__(self, backup_policy=None):
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
|
@property
def backup_policy(self):
'Gets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :return: The backup_policy of this SetBackupPolicyRequestBody.\n :rtype: BackupPolicy\n '
return self._backup_policy
| 38,454,043,653,194,840
|
Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
backup_policy
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
@property
def backup_policy(self):
'Gets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :return: The backup_policy of this SetBackupPolicyRequestBody.\n :rtype: BackupPolicy\n '
return self._backup_policy
|
@backup_policy.setter
def backup_policy(self, backup_policy):
'Sets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.\n :type: BackupPolicy\n '
self._backup_policy = backup_policy
| -1,252,242,191,143,817,700
|
Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
backup_policy
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
@backup_policy.setter
def backup_policy(self, backup_policy):
'Sets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.\n :type: BackupPolicy\n '
self._backup_policy = backup_policy
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
| 2,594,216,033,120,720,000
|
Returns the model properties as a dict
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
to_dict
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
to_str
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__repr__
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, SetBackupPolicyRequestBody)):
return False
return (self.__dict__ == other.__dict__)
| -4,800,433,257,394,585,000
|
Returns true if both objects are equal
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__eq__
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def __eq__(self, other):
if (not isinstance(other, SetBackupPolicyRequestBody)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__ne__
|
JeffreyDin/huaweicloud-sdk-python-v3
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def sinkhorn(a, b, C, reg=0.1, method='sinkhorn', maxIter=1000, tau=1000.0, stopThr=1e-09, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n method : str\n method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or\n 'sinkhorn_epsilon_scaling', see those function for specific parameters\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n "
if (method.lower() == 'sinkhorn'):
return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter, stopThr=stopThr, verbose=verbose, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
elif (method.lower() == 'sinkhorn_stabilized'):
return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau, stopThr=stopThr, verbose=verbose, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
elif (method.lower() == 'sinkhorn_epsilon_scaling'):
return sinkhorn_epsilon_scaling(a, b, C, reg, maxIter=maxIter, maxInnerIter=100, tau=tau, scaling_base=0.75, scaling_coef=None, stopThr=stopThr, verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
else:
raise ValueError(("Unknown method '%s'." % method))
| -8,494,778,803,771,725,000
|
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
method : str
method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or
'sinkhorn_epsilon_scaling', see those function for specific parameters
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
|
losses/bregman_pytorch.py
|
sinkhorn
|
SelmanOzleyen/DRDM-Count
|
python
|
def sinkhorn(a, b, C, reg=0.1, method='sinkhorn', maxIter=1000, tau=1000.0, stopThr=1e-09, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
"\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n method : str\n method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or\n 'sinkhorn_epsilon_scaling', see those function for specific parameters\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n "
if (method.lower() == 'sinkhorn'):
return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter, stopThr=stopThr, verbose=verbose, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
elif (method.lower() == 'sinkhorn_stabilized'):
return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau, stopThr=stopThr, verbose=verbose, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
elif (method.lower() == 'sinkhorn_epsilon_scaling'):
return sinkhorn_epsilon_scaling(a, b, C, reg, maxIter=maxIter, maxInnerIter=100, tau=tau, scaling_base=0.75, scaling_coef=None, stopThr=stopThr, verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
else:
raise ValueError(("Unknown method '%s'." % method))
|
def sinkhorn_knopp(a, b, C, reg=0.1, maxIter=1000, stopThr=1e-09, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n '
device = a.device
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
if log:
log = {'err': []}
if (warm_start is not None):
u = warm_start['u']
v = warm_start['v']
else:
u = (torch.ones(na, dtype=a.dtype).to(device) / na)
v = (torch.ones(nb, dtype=b.dtype).to(device) / nb)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
torch.div(C, (- reg), out=K)
torch.exp(K, out=K)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
while ((err > stopThr) and (it <= maxIter)):
(upre, vpre) = (u, v)
torch.matmul(u, K, out=KTu)
v = torch.div(b, (KTu + M_EPS))
torch.matmul(K, v, out=Kv)
u = torch.div(a, (Kv + M_EPS))
if (torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or torch.any(torch.isinf(u)) or torch.any(torch.isinf(v))):
print('Warning: numerical errors at iteration', it)
(u, v) = (upre, vpre)
break
if (log and ((it % eval_freq) == 0)):
b_hat = (torch.matmul(u, K) * v)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = (reg * torch.log((u + M_EPS)))
log['beta'] = (reg * torch.log((v + M_EPS)))
P = ((u.reshape((- 1), 1) * K) * v.reshape(1, (- 1)))
if log:
return (P, log)
else:
return P
| -2,522,005,486,510,639,600
|
Solve the entropic regularization optimal transport
The input should be PyTorch tensors
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
|
losses/bregman_pytorch.py
|
sinkhorn_knopp
|
SelmanOzleyen/DRDM-Count
|
python
|
def sinkhorn_knopp(a, b, C, reg=0.1, maxIter=1000, stopThr=1e-09, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n '
device = a.device
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
if log:
log = {'err': []}
if (warm_start is not None):
u = warm_start['u']
v = warm_start['v']
else:
u = (torch.ones(na, dtype=a.dtype).to(device) / na)
v = (torch.ones(nb, dtype=b.dtype).to(device) / nb)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
torch.div(C, (- reg), out=K)
torch.exp(K, out=K)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
while ((err > stopThr) and (it <= maxIter)):
(upre, vpre) = (u, v)
torch.matmul(u, K, out=KTu)
v = torch.div(b, (KTu + M_EPS))
torch.matmul(K, v, out=Kv)
u = torch.div(a, (Kv + M_EPS))
if (torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or torch.any(torch.isinf(u)) or torch.any(torch.isinf(v))):
print('Warning: numerical errors at iteration', it)
(u, v) = (upre, vpre)
break
if (log and ((it % eval_freq) == 0)):
b_hat = (torch.matmul(u, K) * v)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = (reg * torch.log((u + M_EPS)))
log['beta'] = (reg * torch.log((v + M_EPS)))
P = ((u.reshape((- 1), 1) * K) * v.reshape(1, (- 1)))
if log:
return (P, log)
else:
return P
|
def sinkhorn_stabilized(a, b, C, reg=0.1, maxIter=1000, tau=1000.0, stopThr=1e-09, verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]\n but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n '
device = a.device
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
if log:
log = {'err': []}
if (warm_start is not None):
alpha = warm_start['alpha']
beta = warm_start['beta']
else:
alpha = torch.zeros(na, dtype=a.dtype).to(device)
beta = torch.zeros(nb, dtype=b.dtype).to(device)
u = (torch.ones(na, dtype=a.dtype).to(device) / na)
v = (torch.ones(nb, dtype=b.dtype).to(device) / nb)
def update_K(alpha, beta):
'log space computation'
'memory efficient'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=K)
torch.add(K, (- C), out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
def update_P(alpha, beta, u, v, ab_updated=False):
'log space P (gamma) computation'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=P)
torch.add(P, (- C), out=P)
torch.div(P, reg, out=P)
if (not ab_updated):
torch.add(P, torch.log((u + M_EPS)).reshape((- 1), 1), out=P)
torch.add(P, torch.log((v + M_EPS)).reshape(1, (- 1)), out=P)
torch.exp(P, out=P)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
update_K(alpha, beta)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
ab_updated = False
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
P = torch.empty(C.shape, dtype=C.dtype).to(device)
while ((err > stopThr) and (it <= maxIter)):
(upre, vpre) = (u, v)
torch.matmul(u, K, out=KTu)
v = torch.div(b, (KTu + M_EPS))
torch.matmul(K, v, out=Kv)
u = torch.div(a, (Kv + M_EPS))
ab_updated = False
if ((u.abs().sum() > tau) or (v.abs().sum() > tau)):
alpha += (reg * torch.log((u + M_EPS)))
beta += (reg * torch.log((v + M_EPS)))
u.fill_((1.0 / na))
v.fill_((1.0 / nb))
update_K(alpha, beta)
ab_updated = True
if (log and ((it % eval_freq) == 0)):
update_P(alpha, beta, u, v, ab_updated)
b_hat = torch.sum(P, 0)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = (alpha + (reg * torch.log((u + M_EPS))))
log['beta'] = (beta + (reg * torch.log((v + M_EPS))))
update_P(alpha, beta, u, v, False)
if log:
return (P, log)
else:
return P
| 664,508,433,491,841,800
|
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]
but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
|
losses/bregman_pytorch.py
|
sinkhorn_stabilized
|
SelmanOzleyen/DRDM-Count
|
python
|
def sinkhorn_stabilized(a, b, C, reg=0.1, maxIter=1000, tau=1000.0, stopThr=1e-09, verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]\n but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n '
device = a.device
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
if log:
log = {'err': []}
if (warm_start is not None):
alpha = warm_start['alpha']
beta = warm_start['beta']
else:
alpha = torch.zeros(na, dtype=a.dtype).to(device)
beta = torch.zeros(nb, dtype=b.dtype).to(device)
u = (torch.ones(na, dtype=a.dtype).to(device) / na)
v = (torch.ones(nb, dtype=b.dtype).to(device) / nb)
def update_K(alpha, beta):
'log space computation'
'memory efficient'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=K)
torch.add(K, (- C), out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
def update_P(alpha, beta, u, v, ab_updated=False):
'log space P (gamma) computation'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=P)
torch.add(P, (- C), out=P)
torch.div(P, reg, out=P)
if (not ab_updated):
torch.add(P, torch.log((u + M_EPS)).reshape((- 1), 1), out=P)
torch.add(P, torch.log((v + M_EPS)).reshape(1, (- 1)), out=P)
torch.exp(P, out=P)
K = torch.empty(C.shape, dtype=C.dtype).to(device)
update_K(alpha, beta)
b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)
it = 1
err = 1
ab_updated = False
KTu = torch.empty(v.shape, dtype=v.dtype).to(device)
Kv = torch.empty(u.shape, dtype=u.dtype).to(device)
P = torch.empty(C.shape, dtype=C.dtype).to(device)
while ((err > stopThr) and (it <= maxIter)):
(upre, vpre) = (u, v)
torch.matmul(u, K, out=KTu)
v = torch.div(b, (KTu + M_EPS))
torch.matmul(K, v, out=Kv)
u = torch.div(a, (Kv + M_EPS))
ab_updated = False
if ((u.abs().sum() > tau) or (v.abs().sum() > tau)):
alpha += (reg * torch.log((u + M_EPS)))
beta += (reg * torch.log((v + M_EPS)))
u.fill_((1.0 / na))
v.fill_((1.0 / nb))
update_K(alpha, beta)
ab_updated = True
if (log and ((it % eval_freq) == 0)):
update_P(alpha, beta, u, v, ab_updated)
b_hat = torch.sum(P, 0)
err = (b - b_hat).pow(2).sum().item()
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['u'] = u
log['v'] = v
log['alpha'] = (alpha + (reg * torch.log((u + M_EPS))))
log['beta'] = (beta + (reg * torch.log((v + M_EPS))))
update_P(alpha, beta, u, v, False)
if log:
return (P, log)
else:
return P
|
def sinkhorn_epsilon_scaling(a, b, C, reg=0.1, maxIter=100, maxInnerIter=100, tau=1000.0, scaling_base=0.75, scaling_coef=None, stopThr=1e-09, verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix\n scaling algorithm as proposed in [1] but with the log stabilization\n proposed in [3] and the log scaling proposed in [2] algorithm 3.2\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n '
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
def get_reg(it, reg, pre_reg):
if (it == 1):
return scaling_coef
elif (((pre_reg - reg) * scaling_base) < M_EPS):
return reg
else:
return (((pre_reg - reg) * scaling_base) + reg)
if (scaling_coef is None):
scaling_coef = (C.max() + reg)
it = 1
err = 1
running_reg = scaling_coef
if log:
log = {'err': []}
warm_start = None
while ((err > stopThr) and (it <= maxIter)):
running_reg = get_reg(it, reg, running_reg)
(P, _log) = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau, stopThr=stopThr, verbose=False, log=True, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
warm_start = {}
warm_start['alpha'] = _log['alpha']
warm_start['beta'] = _log['beta']
primal_val = (((C * P).sum() + (reg * (P * torch.log(P)).sum())) - (reg * P.sum()))
dual_val = (((_log['alpha'] * a).sum() + (_log['beta'] * b).sum()) - (reg * P.sum()))
err = (primal_val - dual_val)
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['alpha'] = _log['alpha']
log['beta'] = _log['beta']
return (P, log)
else:
return P
| 3,222,460,278,742,383,600
|
Solve the entropic regularization OT problem with log stabilization
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,C>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- C is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are target and source measures (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix
scaling algorithm as proposed in [1] but with the log stabilization
proposed in [3] and the log scaling proposed in [2] algorithm 3.2
Parameters
----------
a : torch.tensor (na,)
samples measure in the target domain
b : torch.tensor (nb,)
samples in the source domain
C : torch.tensor (na,nb)
loss matrix
reg : float
Regularization term > 0
tau : float
thershold for max value in u or v for log scaling
maxIter : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error ( > 0 )
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (na x nb) torch.tensor
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
References
----------
[1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
[2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019
[3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.
See Also
--------
|
losses/bregman_pytorch.py
|
sinkhorn_epsilon_scaling
|
SelmanOzleyen/DRDM-Count
|
python
|
def sinkhorn_epsilon_scaling(a, b, C, reg=0.1, maxIter=100, maxInnerIter=100, tau=1000.0, scaling_base=0.75, scaling_coef=None, stopThr=1e-09, verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):
'\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix\n scaling algorithm as proposed in [1] but with the log stabilization\n proposed in [3] and the log scaling proposed in [2] algorithm 3.2\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n '
(na, nb) = C.shape
assert ((na >= 1) and (nb >= 1)), 'C needs to be 2d'
assert ((na == a.shape[0]) and (nb == b.shape[0])), "Shape of a or b does't match that of C"
assert (reg > 0), 'reg should be greater than 0'
assert ((a.min() >= 0.0) and (b.min() >= 0.0)), 'Elements in a or b less than 0'
def get_reg(it, reg, pre_reg):
if (it == 1):
return scaling_coef
elif (((pre_reg - reg) * scaling_base) < M_EPS):
return reg
else:
return (((pre_reg - reg) * scaling_base) + reg)
if (scaling_coef is None):
scaling_coef = (C.max() + reg)
it = 1
err = 1
running_reg = scaling_coef
if log:
log = {'err': []}
warm_start = None
while ((err > stopThr) and (it <= maxIter)):
running_reg = get_reg(it, reg, running_reg)
(P, _log) = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau, stopThr=stopThr, verbose=False, log=True, warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq, **kwargs)
warm_start = {}
warm_start['alpha'] = _log['alpha']
warm_start['beta'] = _log['beta']
primal_val = (((C * P).sum() + (reg * (P * torch.log(P)).sum())) - (reg * P.sum()))
dual_val = (((_log['alpha'] * a).sum() + (_log['beta'] * b).sum()) - (reg * P.sum()))
err = (primal_val - dual_val)
log['err'].append(err)
if (verbose and ((it % print_freq) == 0)):
print('iteration {:5d}, constraint error {:5e}'.format(it, err))
it += 1
if log:
log['alpha'] = _log['alpha']
log['beta'] = _log['beta']
return (P, log)
else:
return P
|
def update_K(alpha, beta):
'log space computation'
'memory efficient'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=K)
torch.add(K, (- C), out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
| 7,934,780,117,655,571,000
|
log space computation
|
losses/bregman_pytorch.py
|
update_K
|
SelmanOzleyen/DRDM-Count
|
python
|
def update_K(alpha, beta):
'memory efficient'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=K)
torch.add(K, (- C), out=K)
torch.div(K, reg, out=K)
torch.exp(K, out=K)
|
def update_P(alpha, beta, u, v, ab_updated=False):
'log space P (gamma) computation'
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=P)
torch.add(P, (- C), out=P)
torch.div(P, reg, out=P)
if (not ab_updated):
torch.add(P, torch.log((u + M_EPS)).reshape((- 1), 1), out=P)
torch.add(P, torch.log((v + M_EPS)).reshape(1, (- 1)), out=P)
torch.exp(P, out=P)
| 6,091,197,225,457,190,000
|
log space P (gamma) computation
|
losses/bregman_pytorch.py
|
update_P
|
SelmanOzleyen/DRDM-Count
|
python
|
def update_P(alpha, beta, u, v, ab_updated=False):
torch.add(alpha.reshape((- 1), 1), beta.reshape(1, (- 1)), out=P)
torch.add(P, (- C), out=P)
torch.div(P, reg, out=P)
if (not ab_updated):
torch.add(P, torch.log((u + M_EPS)).reshape((- 1), 1), out=P)
torch.add(P, torch.log((v + M_EPS)).reshape(1, (- 1)), out=P)
torch.exp(P, out=P)
|
def bubblesort(nums: List[int]):
' sort list '
for i in range(0, len(nums)):
for j in range(0, ((len(nums) - i) - 1)):
if (nums[j] > nums[(j + 1)]):
tmp = nums[j]
nums[j] = nums[(j + 1)]
nums[(j + 1)] = tmp
return nums
| -7,130,790,351,319,383,000
|
sort list
|
bubblesort/bubblesort_logic.py
|
bubblesort
|
vscode-debug-specs/python
|
python
|
def bubblesort(nums: List[int]):
' '
for i in range(0, len(nums)):
for j in range(0, ((len(nums) - i) - 1)):
if (nums[j] > nums[(j + 1)]):
tmp = nums[j]
nums[j] = nums[(j + 1)]
nums[(j + 1)] = tmp
return nums
|
def make_sqlx(conn, schema, tables):
'Make sqlx lookup function for given tables'
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if (not obj):
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
| 8,371,756,681,237,449,000
|
Make sqlx lookup function for given tables
|
xutil/database/base.py
|
make_sqlx
|
flarco/n1slutil
|
python
|
def make_sqlx(conn, schema, tables):
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if (not obj):
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
|
def get_sql_sources(sql_text, echo=False):
'Obtain the source tables of a query\n '
import sqlparse
sql_text = re.sub('as\\(', 'as (', sql_text, 0, (re.MULTILINE | re.IGNORECASE))
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while (not done):
for tok in statement.tokens:
if tok.is_group:
if (cte_mode and isinstance(tok, sqlparse.sql.IdentifierList)):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if ((last_kw_from or last_kw_join) and last_tok.is_whitespace):
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if (isinstance(tok2, sqlparse.sql.Identifier) and ('(' in tok2.value)):
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif (isinstance(tok2, sqlparse.sql.Identifier) and (tok2.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok2.normalized.lower()))
sources_dict[tok2.normalized.lower()] = tok.parent
elif (isinstance(tok, sqlparse.sql.Identifier) and (tok.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok.normalized.lower()))
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if (tok.is_keyword and (tok.normalized == 'WITH')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'GROUP')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'WHERE')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'ORDER')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'CREATE')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'SELECT')):
cte_mode = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'FROM')):
last_kw_from = True
elif (tok.is_keyword and ('JOIN' in tok.normalized)):
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for (s, statement) in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if (isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table):
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo:
log(('-CREATE TABLE ' + create_table))
if (tok.is_keyword and (tok.normalized == 'TABLE') and last_kw_create):
last_kw_create_table = True
if (tok.is_keyword and (tok.normalized == 'CREATE')):
last_kw_create = True
if (tok.is_keyword and (tok.normalized == 'FROM')):
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
| 3,964,499,382,857,007,600
|
Obtain the source tables of a query
|
xutil/database/base.py
|
get_sql_sources
|
flarco/n1slutil
|
python
|
def get_sql_sources(sql_text, echo=False):
'\n '
import sqlparse
sql_text = re.sub('as\\(', 'as (', sql_text, 0, (re.MULTILINE | re.IGNORECASE))
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while (not done):
for tok in statement.tokens:
if tok.is_group:
if (cte_mode and isinstance(tok, sqlparse.sql.IdentifierList)):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if ((last_kw_from or last_kw_join) and last_tok.is_whitespace):
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if (isinstance(tok2, sqlparse.sql.Identifier) and ('(' in tok2.value)):
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif (isinstance(tok2, sqlparse.sql.Identifier) and (tok2.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok2.normalized.lower()))
sources_dict[tok2.normalized.lower()] = tok.parent
elif (isinstance(tok, sqlparse.sql.Identifier) and (tok.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok.normalized.lower()))
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if (tok.is_keyword and (tok.normalized == 'WITH')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'GROUP')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'WHERE')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'ORDER')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'CREATE')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'SELECT')):
cte_mode = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'FROM')):
last_kw_from = True
elif (tok.is_keyword and ('JOIN' in tok.normalized)):
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for (s, statement) in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if (isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table):
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo:
log(('-CREATE TABLE ' + create_table))
if (tok.is_keyword and (tok.normalized == 'TABLE') and last_kw_create):
last_kw_create_table = True
if (tok.is_keyword and (tok.normalized == 'CREATE')):
last_kw_create = True
if (tok.is_keyword and (tok.normalized == 'FROM')):
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
|
def __init__(self, conn_dict, profile=None, echo=False):
'Inititate connection'
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
template_base_path = '{}/database/templates/base.yaml'.format(get_dir_path())
self.template_dict = read_yaml(template_base_path)
template_path = '{}/database/templates/{}.yaml'.format(get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
if isinstance(temp_dict[key1], dict):
if (key1 not in self.template_dict):
self.template_dict[key1] = temp_dict[key1]
for key2 in temp_dict[key1]:
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log('Connected to {} as {}'.format(self._cred.name, self._cred.user))
| -3,225,673,821,873,554,000
|
Inititate connection
|
xutil/database/base.py
|
__init__
|
flarco/n1slutil
|
python
|
def __init__(self, conn_dict, profile=None, echo=False):
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
template_base_path = '{}/database/templates/base.yaml'.format(get_dir_path())
self.template_dict = read_yaml(template_base_path)
template_path = '{}/database/templates/{}.yaml'.format(get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
if isinstance(temp_dict[key1], dict):
if (key1 not in self.template_dict):
self.template_dict[key1] = temp_dict[key1]
for key2 in temp_dict[key1]:
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log('Connected to {} as {}'.format(self._cred.name, self._cred.user))
|
def connect(self):
'Connect to Database'
self.engine = self.get_engine()
self.connection = self.engine.connect()
| -1,046,290,792,239,417,200
|
Connect to Database
|
xutil/database/base.py
|
connect
|
flarco/n1slutil
|
python
|
def connect(self):
self.engine = self.get_engine()
self.connection = self.engine.connect()
|
def close(self):
'Close database connection'
self.conn.connection.close()
| -5,488,695,872,408,102,000
|
Close database connection
|
xutil/database/base.py
|
close
|
flarco/n1slutil
|
python
|
def close(self):
self.conn.connection.close()
|
def reconnect(self, min_tresh=0):
'Re-Connect to Database if minute threshold reached'
if ((now() - self.last_connect).total_seconds() > (min_tresh * 60)):
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
| -6,871,993,079,269,828,000
|
Re-Connect to Database if minute threshold reached
|
xutil/database/base.py
|
reconnect
|
flarco/n1slutil
|
python
|
def reconnect(self, min_tresh=0):
if ((now() - self.last_connect).total_seconds() > (min_tresh * 60)):
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
|
def set_variables(self):
'Set custom variables'
raise Exception("Method 'set_variables' is not implemented!")
| 2,225,049,539,593,413,400
|
Set custom variables
|
xutil/database/base.py
|
set_variables
|
flarco/n1slutil
|
python
|
def set_variables(self):
raise Exception("Method 'set_variables' is not implemented!")
|
def get_dialect(self, echo=False):
'SQLAlchemy dialect'
raise Exception("Method 'get_dialect' is not implemented!")
| -1,235,546,418,178,482,200
|
SQLAlchemy dialect
|
xutil/database/base.py
|
get_dialect
|
flarco/n1slutil
|
python
|
def get_dialect(self, echo=False):
raise Exception("Method 'get_dialect' is not implemented!")
|
def check_pk(self, table, fields):
'Check Primary key to ensure there are not duplicates'
if ('where' in fields.lower()):
(fields, where_clause) = fields.lower().split('where')
where_clause = ('where ' + where_clause)
else:
where_clause = ''
sql = "\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n ".format(table=table, fields=fields, where_clause=where_clause)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if (data[0].pk_result == 'FAIL'):
raise Exception('PK Text failed for table "{}" with fields "{}"'.format(table, fields))
| -4,513,796,390,382,553,000
|
Check Primary key to ensure there are not duplicates
|
xutil/database/base.py
|
check_pk
|
flarco/n1slutil
|
python
|
def check_pk(self, table, fields):
if ('where' in fields.lower()):
(fields, where_clause) = fields.lower().split('where')
where_clause = ('where ' + where_clause)
else:
where_clause =
sql = "\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n ".format(table=table, fields=fields, where_clause=where_clause)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if (data[0].pk_result == 'FAIL'):
raise Exception('PK Text failed for table "{}" with fields "{}"'.format(table, fields))
|
def execute_multi(self, sql, dtype='namedtuple', limit=None, echo=True, query_name='Record', log=log):
"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n "
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sqls = sql.split(';')
for sql in sqls:
if (not sql.strip()):
continue
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
(yield (fields, rows))
| -3,723,596,037,332,706,300
|
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
|
xutil/database/base.py
|
execute_multi
|
flarco/n1slutil
|
python
|
def execute_multi(self, sql, dtype='namedtuple', limit=None, echo=True, query_name='Record', log=log):
"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n "
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sqls = sql.split(';')
for sql in sqls:
if (not sql.strip()):
continue
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
(yield (fields, rows))
|
def execute(self, sql, dtype='tuple', limit=None, echo=True, query_name='Record', log=log):
'Execute SQL, return last result'
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return (fields, rows)
finally:
connection.close()
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
return (fields, rows)
| -6,452,716,125,294,067,000
|
Execute SQL, return last result
|
xutil/database/base.py
|
execute
|
flarco/n1slutil
|
python
|
def execute(self, sql, dtype='tuple', limit=None, echo=True, query_name='Record', log=log):
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return (fields, rows)
finally:
connection.close()
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
return (fields, rows)
|
def insert(self, table, data, echo=False):
'Insert records of namedtuple or dicts'
raise Exception('insert not implemented')
| -4,606,335,496,427,840,000
|
Insert records of namedtuple or dicts
|
xutil/database/base.py
|
insert
|
flarco/n1slutil
|
python
|
def insert(self, table, data, echo=False):
raise Exception('insert not implemented')
|
def drop_table(self, table, log=log):
'Drop table'
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if (self._template('error_filter.table_not_exist') in message):
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
| 315,493,088,537,622,700
|
Drop table
|
xutil/database/base.py
|
drop_table
|
flarco/n1slutil
|
python
|
def drop_table(self, table, log=log):
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if (self._template('error_filter.table_not_exist') in message):
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
|
def create_table(self, table, field_types, drop=False, log=log):
'Create table'
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
(ftype, max_len, dec_len) = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace('()', suff)
field_types_str = ', \n'.join([((self._fix_f_name(field) + ' ') + new_ftypes[field]) for field in new_ftypes])
sql = self._template('core.create_table').format(table=table, col_types=field_types_str)
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
| -7,000,479,734,006,737,000
|
Create table
|
xutil/database/base.py
|
create_table
|
flarco/n1slutil
|
python
|
def create_table(self, table, field_types, drop=False, log=log):
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
(ftype, max_len, dec_len) = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff =
new_ftypes[f] = self._template('general_type_map')[ftype].replace('()', suff)
field_types_str = ', \n'.join([((self._fix_f_name(field) + ' ') + new_ftypes[field]) for field in new_ftypes])
sql = self._template('core.create_table').format(table=table, col_types=field_types_str)
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
|
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
'Get fields of active Select cursor'
fields = OrderedDict()
cursor_desc = (cursor_desc if cursor_desc else self._cursor_description)
if (cursor_desc == None):
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
if ('cx_Oracle.NUMBER' in str(f[1])):
if (f[4] and (f[4] > 11)):
f_type = 'long'
if (f[5] and (f[5] > 0)):
f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
| 1,978,626,377,709,983,000
|
Get fields of active Select cursor
|
xutil/database/base.py
|
_get_cursor_fields
|
flarco/n1slutil
|
python
|
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
fields = OrderedDict()
cursor_desc = (cursor_desc if cursor_desc else self._cursor_description)
if (cursor_desc == None):
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
if ('cx_Oracle.NUMBER' in str(f[1])):
if (f[4] and (f[4] > 11)):
f_type = 'long'
if (f[5] and (f[5] > 0)):
f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
|
def stream(self, sql, rec_name='Record', dtype='namedtuple', yield_chuncks=False, chunk_size=None, limit=None, echo=True):
'Stream Select from SQL, yield records as they come in'
self.reconnect(min_tresh=10)
if echo:
log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = (limit if limit else self.fetch_size)
fetch_size = (chunk_size if chunk_size else fetch_size)
try:
self._do_execute(sql)
except Exception as e:
raise e
if (dtype == 'tuple'):
make_rec = (lambda row: row)
make_batch = (lambda rows: rows)
elif (dtype == 'dataframe'):
yield_chuncks = True
make_batch = (lambda rows: pandas.DataFrame(rows, columns=self._fields))
else:
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = (lambda row: Record(*row))
make_batch = (lambda rows: [make_rec(r) for r in rows])
self._stream_counter = 0
while True:
if (not self._fields):
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
(yield batch)
else:
for row in rows:
self._stream_counter += 1
(yield make_rec(row))
else:
break
if limit:
break
| -7,889,304,083,964,760,000
|
Stream Select from SQL, yield records as they come in
|
xutil/database/base.py
|
stream
|
flarco/n1slutil
|
python
|
def stream(self, sql, rec_name='Record', dtype='namedtuple', yield_chuncks=False, chunk_size=None, limit=None, echo=True):
self.reconnect(min_tresh=10)
if echo:
log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = (limit if limit else self.fetch_size)
fetch_size = (chunk_size if chunk_size else fetch_size)
try:
self._do_execute(sql)
except Exception as e:
raise e
if (dtype == 'tuple'):
make_rec = (lambda row: row)
make_batch = (lambda rows: rows)
elif (dtype == 'dataframe'):
yield_chuncks = True
make_batch = (lambda rows: pandas.DataFrame(rows, columns=self._fields))
else:
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = (lambda row: Record(*row))
make_batch = (lambda rows: [make_rec(r) for r in rows])
self._stream_counter = 0
while True:
if (not self._fields):
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
(yield batch)
else:
for row in rows:
self._stream_counter += 1
(yield make_rec(row))
else:
break
if limit:
break
|
def query(self, sql, rec_name='Record', dtype='namedtuple', limit=None, echo=True, retrying=False, log=log):
'Select from SQL, return list of namedtuples'
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if (not self.result.closed):
self.result.close()
fields = self._fields
if (not fields):
return []
if (dtype == 'namedtuple'):
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif (dtype == 'tuple'):
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif (dtype == 'dataframe'):
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise Exception('{} is not recongnized.'.format(dtype))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round((len(data) / secs), 1)
if echo:
log(' >>> Got {} rows in {} secs [{} r/s].'.format(len(data), secs, rate))
return data
| -2,469,832,208,219,459,000
|
Select from SQL, return list of namedtuples
|
xutil/database/base.py
|
query
|
flarco/n1slutil
|
python
|
def query(self, sql, rec_name='Record', dtype='namedtuple', limit=None, echo=True, retrying=False, log=log):
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if (not self.result.closed):
self.result.close()
fields = self._fields
if (not fields):
return []
if (dtype == 'namedtuple'):
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif (dtype == 'tuple'):
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif (dtype == 'dataframe'):
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise Exception('{} is not recongnized.'.format(dtype))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round((len(data) / secs), 1)
if echo:
log(' >>> Got {} rows in {} secs [{} r/s].'.format(len(data), secs, rate))
return data
|
def get_schemas(self, echo=True):
'Get list of schemas.'
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
| -6,565,010,715,667,272,000
|
Get list of schemas.
|
xutil/database/base.py
|
get_schemas
|
flarco/n1slutil
|
python
|
def get_schemas(self, echo=True):
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
|
def get_objects(self, schema, object_type='all', echo=True):
"Get metadata for objects. object_type in 'all', 'table', 'view'"
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if (object_type == 'all'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif (object_type == 'table'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif (object_type == 'view'):
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
| 6,508,891,224,793,525,000
|
Get metadata for objects. object_type in 'all', 'table', 'view'
|
xutil/database/base.py
|
get_objects
|
flarco/n1slutil
|
python
|
def get_objects(self, schema, object_type='all', echo=True):
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if (object_type == 'all'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif (object_type == 'table'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif (object_type == 'view'):
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
|
def get_tables(self, schema, echo=True):
'Get metadata for tables.'
schemas = (schema if isinstance(schema, list) else [schema])
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
| 4,581,136,877,876,844,500
|
Get metadata for tables.
|
xutil/database/base.py
|
get_tables
|
flarco/n1slutil
|
python
|
def get_tables(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
|
def get_views(self, schema, echo=True):
'Get metadata for views.'
schemas = (schema if isinstance(schema, list) else [schema])
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
| -4,287,179,280,659,660,300
|
Get metadata for views.
|
xutil/database/base.py
|
get_views
|
flarco/n1slutil
|
python
|
def get_views(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
|
def get_columns(self, table_name, object_type=None, echo=False, include_schema_table=True, native_type=True):
'Get column metadata for table'
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = (table_name if isinstance(table_name, list) else [table_name])
for table_name in table_names:
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if (not native_type):
r_dict['type'] = r_dict['type'].lower()
r_dict['type'] = (r_dict['type'].split('(')[0] if ('(' in r_dict['type']) else r_dict['type'])
native_type_map = self._template('native_type_map')
if (not (r_dict['type'] in native_type_map)):
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if (k not in headers.split()):
del r_dict[k]
if ('(' in r_dict['type']):
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, (i + 1)) for (i, r_dict) in enumerate(rows)]
self._fields = Rec._fields
return all_rows
| 8,247,970,183,440,504,000
|
Get column metadata for table
|
xutil/database/base.py
|
get_columns
|
flarco/n1slutil
|
python
|
def get_columns(self, table_name, object_type=None, echo=False, include_schema_table=True, native_type=True):
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = (table_name if isinstance(table_name, list) else [table_name])
for table_name in table_names:
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if (not native_type):
r_dict['type'] = r_dict['type'].lower()
r_dict['type'] = (r_dict['type'].split('(')[0] if ('(' in r_dict['type']) else r_dict['type'])
native_type_map = self._template('native_type_map')
if (not (r_dict['type'] in native_type_map)):
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if (k not in headers.split()):
del r_dict[k]
if ('(' in r_dict['type']):
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, (i + 1)) for (i, r_dict) in enumerate(rows)]
self._fields = Rec._fields
return all_rows
|
def get_primary_keys(self, table_name, echo=False):
'Get PK metadata for table'
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [get_rec(col, r_dict['name'], (i + 1)) for (i, col) in enumerate(r_dict['constrained_columns'])]
return rows
| 2,235,318,896,555,382,800
|
Get PK metadata for table
|
xutil/database/base.py
|
get_primary_keys
|
flarco/n1slutil
|
python
|
def get_primary_keys(self, table_name, echo=False):
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [get_rec(col, r_dict['name'], (i + 1)) for (i, col) in enumerate(r_dict['constrained_columns'])]
return rows
|
def get_indexes(self, table_name, echo=False):
'Get indexes metadata for table'
Rec = namedtuple('Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for (i, col) in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = (i + 1)
(yield Rec(**r_dict))
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
| -815,508,692,130,674,400
|
Get indexes metadata for table
|
xutil/database/base.py
|
get_indexes
|
flarco/n1slutil
|
python
|
def get_indexes(self, table_name, echo=False):
Rec = namedtuple('Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for (i, col) in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = (i + 1)
(yield Rec(**r_dict))
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
|
def get_ddl(self, table_name, object_type=None, echo=True):
'Get ddl for table'
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(sql_tmpl.format(schema=schema, table=table, obj_type=object_type))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = ([Rec(ddl)] if ddl else [])
self._fields = Rec._fields
return rows
| 7,846,279,401,217,097,000
|
Get ddl for table
|
xutil/database/base.py
|
get_ddl
|
flarco/n1slutil
|
python
|
def get_ddl(self, table_name, object_type=None, echo=True):
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(sql_tmpl.format(schema=schema, table=table, obj_type=object_type))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = ([Rec(ddl)] if ddl else [])
self._fields = Rec._fields
return rows
|
def get_all_columns(self):
'Get all columns for all tables / views'
sql_tmpl = self._template('metadata.all_columns')
if (not sql_tmpl):
raise Exception('get_all_columns not implemented for {}'.format(self.type))
rows = self.query(sql_tmpl)
return rows
| -4,695,411,077,918,565,000
|
Get all columns for all tables / views
|
xutil/database/base.py
|
get_all_columns
|
flarco/n1slutil
|
python
|
def get_all_columns(self):
sql_tmpl = self._template('metadata.all_columns')
if (not sql_tmpl):
raise Exception('get_all_columns not implemented for {}'.format(self.type))
rows = self.query(sql_tmpl)
return rows
|
def get_all_tables(self, filter, as_sql=False):
'Get all tables / views'
sql_tmpl = self._template('metadata.all_tables')
if (not sql_tmpl):
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return (sql if as_sql else self.query(sql, echo=False))
| -5,292,261,201,790,711,000
|
Get all tables / views
|
xutil/database/base.py
|
get_all_tables
|
flarco/n1slutil
|
python
|
def get_all_tables(self, filter, as_sql=False):
sql_tmpl = self._template('metadata.all_tables')
if (not sql_tmpl):
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return (sql if as_sql else self.query(sql, echo=False))
|
def analyze_fields(self, analysis, table_name, fields=[], as_sql=False, union=True, expr_func_map={}, **kwargs):
'Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n '
if ('.' not in table_name):
raise Exception("table_name must have schema and name in it with a '.'")
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
(schema, table) = self._split_schema_table(table_name)
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if (not fields):
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = ('function.' + expr_func_map[expr])
expr_func_map[expr] = ',\n'.join([self._template(tmpl_path).format(field=field) for field in [r.column_name for r in field_rows]])
sep = (' \nunion all\n' if union else ' \n ;\n')
sql = sep.join([self._template(('analysis.' + analysis)).format(schema=schema, field=field, table=table, type=(field_type[field.lower()] if field else ''), **expr_func_map, **kwargs) for field in fields])
return (sql if as_sql else self.query(sql, analysis, echo=False))
| 8,955,735,028,112,068,000
|
Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
|
xutil/database/base.py
|
analyze_fields
|
flarco/n1slutil
|
python
|
def analyze_fields(self, analysis, table_name, fields=[], as_sql=False, union=True, expr_func_map={}, **kwargs):
'Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n '
if ('.' not in table_name):
raise Exception("table_name must have schema and name in it with a '.'")
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
(schema, table) = self._split_schema_table(table_name)
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if (not fields):
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = ('function.' + expr_func_map[expr])
expr_func_map[expr] = ',\n'.join([self._template(tmpl_path).format(field=field) for field in [r.column_name for r in field_rows]])
sep = (' \nunion all\n' if union else ' \n ;\n')
sql = sep.join([self._template(('analysis.' + analysis)).format(schema=schema, field=field, table=table, type=(field_type[field.lower()] if field else ), **expr_func_map, **kwargs) for field in fields])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
'Base function for table level analysis'
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
if ((not tables) and ('schema' in kwargs)):
rows = self.get_schemas(kwargs['schema'])
crt_obj = (lambda r: struct(dict(schema=r.schema, table=r.object_name)))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = (lambda schema, table: struct(dict(schema=schema, table=table)))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([self._template(('analysis.' + analysis)).format(schema=obj.schema, table=obj.table, **kwargs) for obj in objs])
return (sql if as_sql else self.query(sql, analysis, echo=False))
| 4,400,742,215,244,150,000
|
Base function for table level analysis
|
xutil/database/base.py
|
analyze_tables
|
flarco/n1slutil
|
python
|
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
if ((not tables) and ('schema' in kwargs)):
rows = self.get_schemas(kwargs['schema'])
crt_obj = (lambda r: struct(dict(schema=r.schema, table=r.object_name)))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = (lambda schema, table: struct(dict(schema=schema, table=table)))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([self._template(('analysis.' + analysis)).format(schema=obj.schema, table=obj.table, **kwargs) for obj in objs])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
def begin_delete(self, resource_group_name, route_table_name, route_name, **kwargs):
'Deletes the specified route from a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| 2,944,561,345,238,298,600
|
Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
begin_delete
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def begin_delete(self, resource_group_name, route_table_name, route_name, **kwargs):
'Deletes the specified route from a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._delete_initial(resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def get(self, resource_group_name, route_table_name, route_name, **kwargs):
'Gets the specified route from a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: Route, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2020_08_01.models.Route\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-08-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
| -1,434,715,254,335,339,000
|
Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
get
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def get(self, resource_group_name, route_table_name, route_name, **kwargs):
'Gets the specified route from a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: Route, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2020_08_01.models.Route\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-08-01'
accept = 'application/json'
url = self.get.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
|
def begin_create_or_update(self, resource_group_name, route_table_name, route_name, route_parameters, **kwargs):
'Creates or updates a route in the specified route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :param route_parameters: Parameters supplied to the create or update route operation.\n :type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either Route or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, route_parameters=route_parameters, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
| -6,914,312,637,091,370,000
|
Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
begin_create_or_update
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def begin_create_or_update(self, resource_group_name, route_table_name, route_name, route_parameters, **kwargs):
'Creates or updates a route in the specified route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :param route_name: The name of the route.\n :type route_name: str\n :param route_parameters: Parameters supplied to the create or update route operation.\n :type route_parameters: ~azure.mgmt.network.v2020_08_01.models.Route\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either Route or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.Route]\n :raises ~azure.core.exceptions.HttpResponseError:\n '
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop('polling_interval', self._config.polling_interval)
cont_token = kwargs.pop('continuation_token', None)
if (cont_token is None):
raw_result = self._create_or_update_initial(resource_group_name=resource_group_name, route_table_name=route_table_name, route_name=route_name, route_parameters=route_parameters, cls=(lambda x, y, z: x), **kwargs)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'routeName': self._serialize.url('route_name', route_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
if (polling is True):
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif (polling is False):
polling_method = NoPolling()
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
|
def list(self, resource_group_name, route_table_name, **kwargs):
'Gets all routes in a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either RouteListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-08-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
| 621,652,106,427,642,600
|
Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_routes_operations.py
|
list
|
4thel00z/microsoft-crap-that-doesnt-work
|
python
|
def list(self, resource_group_name, route_table_name, **kwargs):
'Gets all routes in a route table.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param route_table_name: The name of the route table.\n :type route_table_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either RouteListResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.RouteListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n '
cls = kwargs.pop('cls', None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = '2020-08-01'
accept = 'application/json'
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header('accept', accept, 'str')
if (not next_link):
url = self.list.metadata['url']
path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'routeTableName': self._serialize.url('route_table_name', route_table_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return ((deserialized.next_link or None), iter(list_of_elem))
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if (response.status_code not in [200]):
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
|
def describe(self, onlyActive=True):
'Return a description of the current state of all active light sources.\n\n If onlyActive is False, then information for all sources will be returned, whether or not they are active.\n '
if onlyActive:
return OrderedDict([(n, s) for (n, s) in self._sources.items() if s['active']])
else:
return self._sources.copy()
| -3,250,613,416,447,644,700
|
Return a description of the current state of all active light sources.
If onlyActive is False, then information for all sources will be returned, whether or not they are active.
|
acq4/devices/LightSource/LightSource.py
|
describe
|
RonnyBergmann/acq4
|
python
|
def describe(self, onlyActive=True):
'Return a description of the current state of all active light sources.\n\n If onlyActive is False, then information for all sources will be returned, whether or not they are active.\n '
if onlyActive:
return OrderedDict([(n, s) for (n, s) in self._sources.items() if s['active']])
else:
return self._sources.copy()
|
def activeSources(self):
'Return the names of all active light sources.\n '
return [s['name'] for s in self._sources if s['active']]
| -5,335,197,598,235,236,000
|
Return the names of all active light sources.
|
acq4/devices/LightSource/LightSource.py
|
activeSources
|
RonnyBergmann/acq4
|
python
|
def activeSources(self):
'\n '
return [s['name'] for s in self._sources if s['active']]
|
def sourceActive(self, name):
'Return True if the named light source is currently active.\n '
return self._sources[name]['active']
| -8,691,415,396,041,450,000
|
Return True if the named light source is currently active.
|
acq4/devices/LightSource/LightSource.py
|
sourceActive
|
RonnyBergmann/acq4
|
python
|
def sourceActive(self, name):
'\n '
return self._sources[name]['active']
|
def setSourceActive(self, name, active):
'Activate / deactivate a light source.\n '
raise NotImplementedError()
| -3,596,929,846,804,946,000
|
Activate / deactivate a light source.
|
acq4/devices/LightSource/LightSource.py
|
setSourceActive
|
RonnyBergmann/acq4
|
python
|
def setSourceActive(self, name, active):
'\n '
raise NotImplementedError()
|
@staticmethod
async def create(coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore):
'\n Initializes a blockchain with the BlockRecords from disk, assuming they have all been\n validated. Uses the genesis block given in override_constants, or as a fallback,\n in the consensus constants config.\n '
self = Blockchain()
self.lock = asyncio.Lock()
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if (cpu_count > 61):
cpu_count = 61
num_workers = max((cpu_count - 2), 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f'Started {num_workers} processes for block validation')
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
(await self._load_chain_from_store())
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
| -5,395,361,207,020,217,000
|
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
|
kujenga/consensus/blockchain.py
|
create
|
Kujenga-Network/kujenga-blockchain
|
python
|
@staticmethod
async def create(coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore):
'\n Initializes a blockchain with the BlockRecords from disk, assuming they have all been\n validated. Uses the genesis block given in override_constants, or as a fallback,\n in the consensus constants config.\n '
self = Blockchain()
self.lock = asyncio.Lock()
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if (cpu_count > 61):
cpu_count = 61
num_workers = max((cpu_count - 2), 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f'Started {num_workers} processes for block validation')
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
(await self._load_chain_from_store())
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
|
async def _load_chain_from_store(self) -> None:
'\n Initializes the state of the Blockchain class from the database.\n '
(height_to_hash, sub_epoch_summaries) = (await self.block_store.get_peak_height_dicts())
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
(block_records, peak) = (await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE))
for block in block_records.values():
self.add_block_record(block)
if (len(block_records) == 0):
assert (peak is None)
self._peak_height = None
return None
assert (peak is not None)
self._peak_height = self.block_record(peak).height
assert (len(self.__height_to_hash) == (self._peak_height + 1))
| -4,490,703,506,348,418,000
|
Initializes the state of the Blockchain class from the database.
|
kujenga/consensus/blockchain.py
|
_load_chain_from_store
|
Kujenga-Network/kujenga-blockchain
|
python
|
async def _load_chain_from_store(self) -> None:
'\n \n '
(height_to_hash, sub_epoch_summaries) = (await self.block_store.get_peak_height_dicts())
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
(block_records, peak) = (await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE))
for block in block_records.values():
self.add_block_record(block)
if (len(block_records) == 0):
assert (peak is None)
self._peak_height = None
return None
assert (peak is not None)
self._peak_height = self.block_record(peak).height
assert (len(self.__height_to_hash) == (self._peak_height + 1))
|
def get_peak(self) -> Optional[BlockRecord]:
'\n Return the peak of the blockchain\n '
if (self._peak_height is None):
return None
return self.height_to_block_record(self._peak_height)
| 8,711,432,366,473,672,000
|
Return the peak of the blockchain
|
kujenga/consensus/blockchain.py
|
get_peak
|
Kujenga-Network/kujenga-blockchain
|
python
|
def get_peak(self) -> Optional[BlockRecord]:
'\n \n '
if (self._peak_height is None):
return None
return self.height_to_block_record(self._peak_height)
|
async def receive_block(self, block: FullBlock, pre_validation_result: Optional[PreValidationResult]=None, fork_point_with_peak: Optional[uint32]=None) -> Tuple[(ReceiveBlockResult, Optional[Err], Optional[uint32], Tuple[(List[CoinRecord], Dict[(bytes, Dict[(bytes32, CoinRecord)])])])]:
"\n This method must be called under the blockchain lock\n Adds a new block into the blockchain, if it's valid and connected to the current\n blockchain, regardless of whether it is the child of a head, or another block.\n Returns a header if block is added to head. Returns an error if the block is\n invalid. Also returns the fork height, in the case of a new peak.\n "
genesis: bool = (block.height == 0)
if self.contains_block(block.header_hash):
return (ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {}))
if ((not self.contains_block(block.prev_header_hash)) and (not genesis)):
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if ((not genesis) and ((self.block_record(block.prev_header_hash).height + 1) != block.height)):
return (ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {}))
npc_result: Optional[NPCResult] = None
if (pre_validation_result is None):
if (block.height == 0):
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
(sub_slot_iters, difficulty) = get_next_sub_slot_iters_and_difficulty(self.constants, (len(block.finished_sub_slots) > 0), prev_b, self)
if block.is_transaction_block():
if (block.transactions_generator is not None):
try:
block_generator: Optional[BlockGenerator] = (await self.get_block_generator(block))
except ValueError:
return (ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {}))
assert ((block_generator is not None) and (block.transactions_info is not None))
npc_result = get_name_puzzle_conditions(block_generator, min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost), cost_per_byte=self.constants.COST_PER_BYTE, safe_mode=False)
(removals, tx_additions) = tx_removals_and_additions(npc_result.npc_list)
else:
(removals, tx_additions) = ([], [])
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
(required_iters, error) = validate_finished_header_block(self.constants, self, header_block, False, difficulty, sub_slot_iters)
if (error is not None):
return (ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {}))
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert (pre_validation_result.error is None)
assert (required_iters is not None)
(error_code, _) = (await validate_block_body(self.constants, self, self.block_store, self.coin_store, self.get_peak(), block, block.height, npc_result, fork_point_with_peak, self.get_block_generator))
if (error_code is not None):
return (ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {}))
block_record = block_to_block_record(self.constants, self, required_iters, block, None)
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
(await self.block_store.db_wrapper.begin_transaction())
(await self.block_store.add_full_block(header_hash, block, block_record))
(fork_height, peak_height, records, (coin_record_change, hint_changes)) = (await self._reconsider_peak(block_record, genesis, fork_point_with_peak, npc_result))
(await self.block_store.db_wrapper.commit_transaction())
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if (fetched_block_record.sub_epoch_summary_included is not None):
self.__sub_epoch_summaries[fetched_block_record.height] = fetched_block_record.sub_epoch_summary_included
if (peak_height is not None):
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
(await self.block_store.db_wrapper.rollback_transaction())
raise
if (fork_height is not None):
assert (coin_record_change is not None)
return (ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes))
else:
return (ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {}))
| 5,664,638,515,299,313,000
|
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
|
kujenga/consensus/blockchain.py
|
receive_block
|
Kujenga-Network/kujenga-blockchain
|
python
|
async def receive_block(self, block: FullBlock, pre_validation_result: Optional[PreValidationResult]=None, fork_point_with_peak: Optional[uint32]=None) -> Tuple[(ReceiveBlockResult, Optional[Err], Optional[uint32], Tuple[(List[CoinRecord], Dict[(bytes, Dict[(bytes32, CoinRecord)])])])]:
"\n This method must be called under the blockchain lock\n Adds a new block into the blockchain, if it's valid and connected to the current\n blockchain, regardless of whether it is the child of a head, or another block.\n Returns a header if block is added to head. Returns an error if the block is\n invalid. Also returns the fork height, in the case of a new peak.\n "
genesis: bool = (block.height == 0)
if self.contains_block(block.header_hash):
return (ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {}))
if ((not self.contains_block(block.prev_header_hash)) and (not genesis)):
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if ((not genesis) and ((self.block_record(block.prev_header_hash).height + 1) != block.height)):
return (ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {}))
npc_result: Optional[NPCResult] = None
if (pre_validation_result is None):
if (block.height == 0):
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
(sub_slot_iters, difficulty) = get_next_sub_slot_iters_and_difficulty(self.constants, (len(block.finished_sub_slots) > 0), prev_b, self)
if block.is_transaction_block():
if (block.transactions_generator is not None):
try:
block_generator: Optional[BlockGenerator] = (await self.get_block_generator(block))
except ValueError:
return (ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {}))
assert ((block_generator is not None) and (block.transactions_info is not None))
npc_result = get_name_puzzle_conditions(block_generator, min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost), cost_per_byte=self.constants.COST_PER_BYTE, safe_mode=False)
(removals, tx_additions) = tx_removals_and_additions(npc_result.npc_list)
else:
(removals, tx_additions) = ([], [])
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
(required_iters, error) = validate_finished_header_block(self.constants, self, header_block, False, difficulty, sub_slot_iters)
if (error is not None):
return (ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {}))
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert (pre_validation_result.error is None)
assert (required_iters is not None)
(error_code, _) = (await validate_block_body(self.constants, self, self.block_store, self.coin_store, self.get_peak(), block, block.height, npc_result, fork_point_with_peak, self.get_block_generator))
if (error_code is not None):
return (ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {}))
block_record = block_to_block_record(self.constants, self, required_iters, block, None)
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
(await self.block_store.db_wrapper.begin_transaction())
(await self.block_store.add_full_block(header_hash, block, block_record))
(fork_height, peak_height, records, (coin_record_change, hint_changes)) = (await self._reconsider_peak(block_record, genesis, fork_point_with_peak, npc_result))
(await self.block_store.db_wrapper.commit_transaction())
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if (fetched_block_record.sub_epoch_summary_included is not None):
self.__sub_epoch_summaries[fetched_block_record.height] = fetched_block_record.sub_epoch_summary_included
if (peak_height is not None):
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
(await self.block_store.db_wrapper.rollback_transaction())
raise
if (fork_height is not None):
assert (coin_record_change is not None)
return (ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes))
else:
return (ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {}))
|
async def _reconsider_peak(self, block_record: BlockRecord, genesis: bool, fork_point_with_peak: Optional[uint32], npc_result: Optional[NPCResult]) -> Tuple[(Optional[uint32], Optional[uint32], List[BlockRecord], Tuple[(List[CoinRecord], Dict[(bytes, Dict[(bytes32, CoinRecord)])])])]:
'\n When a new block is added, this is called, to check if the new block is the new peak of the chain.\n This also handles reorgs by reverting blocks which are not in the heaviest chain.\n It returns the height of the fork between the previous chain and the new chain, or returns\n None if there was no update to the heaviest chain.\n '
peak = self.get_peak()
lastest_coin_state: Dict[(bytes32, CoinRecord)] = {}
hint_coin_state: Dict[(bytes32, Dict[(bytes32, CoinRecord)])] = {}
if genesis:
if (peak is None):
block: Optional[FullBlock] = (await self.block_store.get_full_block(block_record.header_hash))
assert (block is not None)
if (npc_result is not None):
(tx_removals, tx_additions) = tx_removals_and_additions(npc_result.npc_list)
else:
(tx_removals, tx_additions) = ([], [])
if block.is_transaction_block():
assert (block.foliage_transaction_block is not None)
added = (await self.coin_store.new_block(block.height, block.foliage_transaction_block.timestamp, block.get_included_reward_coins(), tx_additions, tx_removals))
else:
(added, _) = ([], [])
(await self.block_store.set_peak(block_record.header_hash))
return (uint32(0), uint32(0), [block_record], (added, {}))
return (None, None, [], ([], {}))
assert (peak is not None)
if (block_record.weight > peak.weight):
if (block_record.prev_hash == peak.header_hash):
fork_height: int = peak.height
elif (fork_point_with_peak is not None):
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if (block_record.prev_hash != peak.header_hash):
roll_changes: List[CoinRecord] = (await self.coin_store.rollback_to_block(fork_height))
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if (ses_included_height > fork_height):
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f'delete ses at height {height}')
del self.__sub_epoch_summaries[height]
blocks_to_add: List[Tuple[(FullBlock, BlockRecord)]] = []
curr = block_record.header_hash
while ((fork_height < 0) or (curr != self.height_to_hash(uint32(fork_height)))):
fetched_full_block: Optional[FullBlock] = (await self.block_store.get_full_block(curr))
fetched_block_record: Optional[BlockRecord] = (await self.block_store.get_block_record(curr))
assert (fetched_full_block is not None)
assert (fetched_block_record is not None)
blocks_to_add.append((fetched_full_block, fetched_block_record))
if (fetched_full_block.height == 0):
break
curr = fetched_block_record.prev_hash
records_to_add = []
for (fetched_full_block, fetched_block_record) in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if (fetched_block_record.header_hash == block_record.header_hash):
(tx_removals, tx_additions, npc_res) = (await self.get_tx_removals_and_additions(fetched_full_block, npc_result))
else:
(tx_removals, tx_additions, npc_res) = (await self.get_tx_removals_and_additions(fetched_full_block, None))
assert (fetched_full_block.foliage_transaction_block is not None)
added_rec = (await self.coin_store.new_block(fetched_full_block.height, fetched_full_block.foliage_transaction_block.timestamp, fetched_full_block.get_included_reward_coins(), tx_additions, tx_removals))
removed_rec: List[Optional[CoinRecord]] = [(await self.coin_store.get_coin_record(name)) for name in tx_removals]
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if (npc_res is not None):
hint_list: List[Tuple[(bytes32, bytes)]] = self.get_hint_list(npc_res)
(await self.hint_store.add_hints(hint_list))
for (coin_id, hint) in hint_list:
key = hint
if (key not in hint_coin_state):
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
(await self.block_store.set_peak(block_record.header_hash))
return (uint32(max(fork_height, 0)), block_record.height, records_to_add, (list(lastest_coin_state.values()), hint_coin_state))
return (None, None, [], ([], {}))
| 1,807,132,108,882,397,400
|
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
|
kujenga/consensus/blockchain.py
|
_reconsider_peak
|
Kujenga-Network/kujenga-blockchain
|
python
|
async def _reconsider_peak(self, block_record: BlockRecord, genesis: bool, fork_point_with_peak: Optional[uint32], npc_result: Optional[NPCResult]) -> Tuple[(Optional[uint32], Optional[uint32], List[BlockRecord], Tuple[(List[CoinRecord], Dict[(bytes, Dict[(bytes32, CoinRecord)])])])]:
'\n When a new block is added, this is called, to check if the new block is the new peak of the chain.\n This also handles reorgs by reverting blocks which are not in the heaviest chain.\n It returns the height of the fork between the previous chain and the new chain, or returns\n None if there was no update to the heaviest chain.\n '
peak = self.get_peak()
lastest_coin_state: Dict[(bytes32, CoinRecord)] = {}
hint_coin_state: Dict[(bytes32, Dict[(bytes32, CoinRecord)])] = {}
if genesis:
if (peak is None):
block: Optional[FullBlock] = (await self.block_store.get_full_block(block_record.header_hash))
assert (block is not None)
if (npc_result is not None):
(tx_removals, tx_additions) = tx_removals_and_additions(npc_result.npc_list)
else:
(tx_removals, tx_additions) = ([], [])
if block.is_transaction_block():
assert (block.foliage_transaction_block is not None)
added = (await self.coin_store.new_block(block.height, block.foliage_transaction_block.timestamp, block.get_included_reward_coins(), tx_additions, tx_removals))
else:
(added, _) = ([], [])
(await self.block_store.set_peak(block_record.header_hash))
return (uint32(0), uint32(0), [block_record], (added, {}))
return (None, None, [], ([], {}))
assert (peak is not None)
if (block_record.weight > peak.weight):
if (block_record.prev_hash == peak.header_hash):
fork_height: int = peak.height
elif (fork_point_with_peak is not None):
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if (block_record.prev_hash != peak.header_hash):
roll_changes: List[CoinRecord] = (await self.coin_store.rollback_to_block(fork_height))
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if (ses_included_height > fork_height):
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f'delete ses at height {height}')
del self.__sub_epoch_summaries[height]
blocks_to_add: List[Tuple[(FullBlock, BlockRecord)]] = []
curr = block_record.header_hash
while ((fork_height < 0) or (curr != self.height_to_hash(uint32(fork_height)))):
fetched_full_block: Optional[FullBlock] = (await self.block_store.get_full_block(curr))
fetched_block_record: Optional[BlockRecord] = (await self.block_store.get_block_record(curr))
assert (fetched_full_block is not None)
assert (fetched_block_record is not None)
blocks_to_add.append((fetched_full_block, fetched_block_record))
if (fetched_full_block.height == 0):
break
curr = fetched_block_record.prev_hash
records_to_add = []
for (fetched_full_block, fetched_block_record) in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if (fetched_block_record.header_hash == block_record.header_hash):
(tx_removals, tx_additions, npc_res) = (await self.get_tx_removals_and_additions(fetched_full_block, npc_result))
else:
(tx_removals, tx_additions, npc_res) = (await self.get_tx_removals_and_additions(fetched_full_block, None))
assert (fetched_full_block.foliage_transaction_block is not None)
added_rec = (await self.coin_store.new_block(fetched_full_block.height, fetched_full_block.foliage_transaction_block.timestamp, fetched_full_block.get_included_reward_coins(), tx_additions, tx_removals))
removed_rec: List[Optional[CoinRecord]] = [(await self.coin_store.get_coin_record(name)) for name in tx_removals]
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if (npc_res is not None):
hint_list: List[Tuple[(bytes32, bytes)]] = self.get_hint_list(npc_res)
(await self.hint_store.add_hints(hint_list))
for (coin_id, hint) in hint_list:
key = hint
if (key not in hint_coin_state):
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
(await self.block_store.set_peak(block_record.header_hash))
return (uint32(max(fork_height, 0)), block_record.height, records_to_add, (list(lastest_coin_state.values()), hint_coin_state))
return (None, None, [], ([], {}))
|
def contains_block(self, header_hash: bytes32) -> bool:
'\n True if we have already added this block to the chain. This may return false for orphan blocks\n that we have added but no longer keep in memory.\n '
return (header_hash in self.__block_records)
| -9,152,929,373,722,569,000
|
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
|
kujenga/consensus/blockchain.py
|
contains_block
|
Kujenga-Network/kujenga-blockchain
|
python
|
def contains_block(self, header_hash: bytes32) -> bool:
'\n True if we have already added this block to the chain. This may return false for orphan blocks\n that we have added but no longer keep in memory.\n '
return (header_hash in self.__block_records)
|
async def warmup(self, fork_point: uint32):
'\n Loads blocks into the cache. The blocks loaded include all blocks from\n fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.\n\n Args:\n fork_point: the last block height to load in the cache\n\n '
if (self._peak_height is None):
return None
block_records = (await self.block_store.get_block_records_in_range(max((fork_point - self.constants.BLOCKS_CACHE_SIZE), uint32(0)), fork_point))
for block_record in block_records.values():
self.add_block_record(block_record)
| 6,246,685,826,707,292,000
|
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
|
kujenga/consensus/blockchain.py
|
warmup
|
Kujenga-Network/kujenga-blockchain
|
python
|
async def warmup(self, fork_point: uint32):
'\n Loads blocks into the cache. The blocks loaded include all blocks from\n fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.\n\n Args:\n fork_point: the last block height to load in the cache\n\n '
if (self._peak_height is None):
return None
block_records = (await self.block_store.get_block_records_in_range(max((fork_point - self.constants.BLOCKS_CACHE_SIZE), uint32(0)), fork_point))
for block_record in block_records.values():
self.add_block_record(block_record)
|
def clean_block_record(self, height: int):
'\n Clears all block records in the cache which have block_record < height.\n Args:\n height: Minimum height that we need to keep in the cache\n '
if (height < 0):
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while ((blocks_to_remove is not None) and (height >= 0)):
for header_hash in blocks_to_remove:
del self.__block_records[header_hash]
del self.__heights_in_cache[uint32(height)]
if (height == 0):
break
height = (height - 1)
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
| 8,406,551,519,050,297,000
|
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
|
kujenga/consensus/blockchain.py
|
clean_block_record
|
Kujenga-Network/kujenga-blockchain
|
python
|
def clean_block_record(self, height: int):
'\n Clears all block records in the cache which have block_record < height.\n Args:\n height: Minimum height that we need to keep in the cache\n '
if (height < 0):
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while ((blocks_to_remove is not None) and (height >= 0)):
for header_hash in blocks_to_remove:
del self.__block_records[header_hash]
del self.__heights_in_cache[uint32(height)]
if (height == 0):
break
height = (height - 1)
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
|
def clean_block_records(self):
'\n Cleans the cache so that we only maintain relevant blocks. This removes\n block records that have height < peak - BLOCKS_CACHE_SIZE.\n These blocks are necessary for calculating future difficulty adjustments.\n '
if (len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE):
return None
peak = self.get_peak()
assert (peak is not None)
if ((peak.height - self.constants.BLOCKS_CACHE_SIZE) < 0):
return None
self.clean_block_record((peak.height - self.constants.BLOCKS_CACHE_SIZE))
| 4,200,059,749,752,214,500
|
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
|
kujenga/consensus/blockchain.py
|
clean_block_records
|
Kujenga-Network/kujenga-blockchain
|
python
|
def clean_block_records(self):
'\n Cleans the cache so that we only maintain relevant blocks. This removes\n block records that have height < peak - BLOCKS_CACHE_SIZE.\n These blocks are necessary for calculating future difficulty adjustments.\n '
if (len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE):
return None
peak = self.get_peak()
assert (peak is not None)
if ((peak.height - self.constants.BLOCKS_CACHE_SIZE) < 0):
return None
self.clean_block_record((peak.height - self.constants.BLOCKS_CACHE_SIZE))
|
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
'\n gets block records by height (only blocks that are part of the chain)\n '
records: List[BlockRecord] = []
hashes = []
assert (batch_size < 999)
for height in heights:
hashes.append(self.height_to_hash(height))
if (len(hashes) > batch_size):
res = (await self.block_store.get_block_records_by_hash(hashes))
records.extend(res)
hashes = []
if (len(hashes) > 0):
res = (await self.block_store.get_block_records_by_hash(hashes))
records.extend(res)
return records
| 2,921,475,375,414,308,000
|
gets block records by height (only blocks that are part of the chain)
|
kujenga/consensus/blockchain.py
|
get_block_records_at
|
Kujenga-Network/kujenga-blockchain
|
python
|
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
'\n \n '
records: List[BlockRecord] = []
hashes = []
assert (batch_size < 999)
for height in heights:
hashes.append(self.height_to_hash(height))
if (len(hashes) > batch_size):
res = (await self.block_store.get_block_records_by_hash(hashes))
records.extend(res)
hashes = []
if (len(hashes) > 0):
res = (await self.block_store.get_block_records_by_hash(hashes))
records.extend(res)
return records
|
def add_block_record(self, block_record: BlockRecord):
'\n Adds a block record to the cache.\n '
self.__block_records[block_record.header_hash] = block_record
if (block_record.height not in self.__heights_in_cache.keys()):
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
| 1,117,064,767,505,630,700
|
Adds a block record to the cache.
|
kujenga/consensus/blockchain.py
|
add_block_record
|
Kujenga-Network/kujenga-blockchain
|
python
|
def add_block_record(self, block_record: BlockRecord):
'\n \n '
self.__block_records[block_record.header_hash] = block_record
if (block_record.height not in self.__heights_in_cache.keys()):
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
|
def headers(instance):
'\n Returns the first row of the instance.dataset\n\n Returns:\n List\n\n '
return instance.dataset[0]
| -5,050,454,796,651,471,000
|
Returns the first row of the instance.dataset
Returns:
List
|
preprocessor/ListData.py
|
headers
|
clokman/KFIR
|
python
|
def headers(instance):
'\n Returns the first row of the instance.dataset\n\n Returns:\n List\n\n '
return instance.dataset[0]
|
def data_rows(instance):
'\n Returns the rows of the instance.dataset except the first rows.\n\n Returns:\n List\n '
return instance.dataset[1:len(instance.dataset)]
| -8,125,771,205,555,917,000
|
Returns the rows of the instance.dataset except the first rows.
Returns:
List
|
preprocessor/ListData.py
|
data_rows
|
clokman/KFIR
|
python
|
def data_rows(instance):
'\n Returns the rows of the instance.dataset except the first rows.\n\n Returns:\n List\n '
return instance.dataset[1:len(instance.dataset)]
|
def import_csv_file(instance, input_file_path, column_delimiter_pattern_in_input_file, line_head_pattern_to_remove='', line_tail_pattern_to_remove='', cell_head_and_tail_characters_to_remove=''):
'\n Returns:\n nothing\n\n Examples:\n >>> # Import a CSV file (yasgui.org formatting)\n >>> my_list_data = ListData()\n >>> my_list_data.import_csv_file(\'test_data//yasgui_output_100.csv\',\n ... column_delimiter_pattern_in_input_file=\' , \',\n ... line_tail_pattern_to_remove=\' ,\',\n ... cell_head_and_tail_characters_to_remove=\'"\')\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n ----------------------------------LINE 1----------------------------------\n [\'publication_type\', \'journal_article\', \'title\', \'publication_year\', \'author_name\', \'journal_name\', \'journal_issue_number\', \'journal_volume_number\', \'startEndPages\', \'publisher_name\', \'doi\']\n ----------------------------------LINE 2----------------------------------\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\']\n <BLANKLINE>\n CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.\n\n\n >>> # Parse a one-column CSV file\n >>> my_list_data = ListData()\n >>> my_list_data.import_csv_file(\'test_data//one_column_data.csv\',\n ... column_delimiter_pattern_in_input_file=\',\')\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n ----------------------------------LINE 1----------------------------------\n [\'doi\', \'\']\n ----------------------------------LINE 2----------------------------------\n [\'10.1163/187607508X384689\', \'\']\n <BLANKLINE>\n CSV file "test_data//one_column_data.csv" is imported as ListData object.\n >>> my_list_data.get_column_at_index(0)\n [\'doi\', \'10.1163/187607508X384689\', \'10.1017/S0954579416000572\', \'10.1007/s11562-016-0353-7\', \'10.1016/j.adolescence.2016.09.008\', \'10.1186/s13561-016-0122-6\', \'10.1007/s00799-016-0182-6\', \'10.5194/gmd-2016-266\', \'10.1007/s00737-015-0531-2\', \'10.1103/RevModPhys.88.021003\', \'https://doi.org/10.1101/167171\', \'https://doi.org/10.1016/j.chb.2017.04.047\', \'10.1016/j.trb.2016.09.005\', \'10.1016/j.ancene.2016.01.001\', \'10.1111/adb.12322\', \'10.1017/njg.2016.45\', \'10.1080/1359432X.2016.1209489\', \'10.1117/1.JBO.21.6.066008\', \'10.5194/gmd-10-3329-2017\', \'10.1016/j.rser.2017.01.103\', \'10.1177/2050157916664559\', \'10.1007/978-3-319-45931-8_17\', \'10.1007/s11136-015-1171-8\', \'10.1145/2991079.2991121\', \'10.1093/cz/zow089\', \'10.1126/science.aac8167\', \'10.1007/s00586-016-4606-1\', \'10.1186/s12937-017-0229-6\', \'10.1007/s11357-016-9894-1\', \'10.1080/00130095.2015.1094371\', \'10.1016/j.epsl.2016.02.028\', \'10.1371/journal.pone.0168636\', \'10.1016/j.atmosres.2016.03.016\', \'10.1111/deci.12206\', \'10.1126/science.aad9634\', \'10.1103/PhysRevA.94.012506\', \'10.4103/0019-5545.196846\', \'10.1016/j.cedpsych.2017.01.006\', \'10.3324/haematol.2015.133470\', \'10.1057/978-1-137-50956-7\', \'10.1016/j.scico.2016.04.001\', \'https://doi.org/10.1016/j.scico.2016.04.001\', \'10.1080/03081087.2015.1053425\', \'10.3758/s13423-017-1270-3\', \'10.1681/ASN.2015030287\', \'10.1016/j.avb.2016.05.006\', \'10.1177/0971333616689191\', \'10.1002/sej.1243\', \'10.1016/j.foreco.2017.06.023\', \'10.1103/PhysRevLett.118.071801\', \'https://doi.org/10.1093/geront/gnv127\', \'10.1007/978-3-319-42324-1_16\', \'10.1109/JBHI.2015.2412656\', \'10.1016/j.jeem.2016.04.002\', \'10.1080/00207543.2015.1058982\', \'10.1038/mp.2016.100\', \'10.1080/03003930.2016.1194267\', \'10.1016/j.envint.2017.01.018\', \'10.1038/pr.2015.179\', \'10.1177/1753193416669263\', \'10.1016/j.tre.2016.11.003\', \'10.1021/acs.jpcc.5b12016\', \'10.1002/anie.201603510\', \'10.1073/pnas.1607005113\', \'(DOI) - 10.1111/cch.12521\', \'10.1017/S0016756815000886\', \'10.1080/1350293X.2015.1073507\', \'10.1152/jn.00701.2015\', \'10.1371/journal.pone.0170791\', \'10.1016/j.seares.2016.07.005\', \'10.1016/j.reseneeco.2016.03.003\', \'10.1007/s00531-017-1499-0\', \'10.1007/s41669-017-0014-7\', \'10.1093/acrefore/9780190228613.013.439\', \'10.14814/phy2.13201\', \'10.1016/j.jtrangeo.2016.10.013\', \'10.1523/JNEUROSCI.3658-16.2017\', \'10.1192/bjpo.bp.115.000166\', \'10.1136/bmjgh-2016-000109\', \'10.7554/eLife.20320.001\', \'10.1037/pas0000332\', \'10.1177/1474704916673841\', \'10.1057/978-1-137-58179-2\', \'10.1002/ejp.963\', \'10.1017/thg.2016.78\', \'10.1038/tpj.2016.32\', \'10.1016/j.jesp.2017.03.008\', \'10.1287/trsc.2015.0647\', \'10.1186/s13015-016-0087-3\', \'10.1016/j.neuroimage.2016.10.030\', \'10.1371/journal.pone.0169109\', \'10.1007/s11367-017-1358-z\', \'10.1080/1369183X.2015.1061425\', \'10.2196/mental.4614\', \'10.1002/arp.1564\', \'10.1021/acs.orglett.6b01023\', \'10.3847/1538-4357/aa6c47\', \'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/\', \'10.1186/s12888-016-0790-0\', \'10.1371/journal.pone.0155755\']\n\n\n #>>> Enter parsing paramaters that do not match the contents of the CSV file\n #>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use\n #>>> my_list_data = ListData()\n #>>> try:\n #... my_list_data.import_csv_file(\'test_data//one_column_data.txt\',\n #... column_delimiter_pattern_in_input_file=\'\\n\',\n #... line_head_pattern_to_remove=\'\',\n #... line_tail_pattern_to_remove=\'\')\n #... except Exception as error_message:\n #... print(\'Exception caught: \' + str(error_message))\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.\n '
from preprocessor.csv_tools import CSV_File
csv_file = CSV_File(input_file_path, column_delimiter_pattern_in_input_file=column_delimiter_pattern_in_input_file)
csv_file.set_parsing_and_cleaning_parameters(line_head_pattern_to_remove=line_head_pattern_to_remove, line_tail_pattern_to_remove=line_tail_pattern_to_remove, cell_head_and_tail_characters_to_remove=cell_head_and_tail_characters_to_remove)
with open(csv_file.input_file_path, encoding='utf8') as input_file:
for (i, each_line) in enumerate(input_file):
csv_line = csv_file.get_line_at_position_from_file((i + 1))
csv_row = csv_file.clean_and_parse_line_to_CSV_Row_using_cleaning_parameters(csv_line)
instance.append_row(csv_row)
if instance.dataset:
print(('\nCSV file "%s" is imported as ListData object.' % csv_file.input_file_path))
else:
raise ValueError(('No data imported from CSV file "%s". Parsing parameters provided does not seem to match formatting of the inputted CSV file.' % csv_file.input_file_path))
| 4,266,730,992,492,970,500
|
Returns:
nothing
Examples:
>>> # Import a CSV file (yasgui.org formatting)
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//yasgui_output_100.csv',
... column_delimiter_pattern_in_input_file=' , ',
... line_tail_pattern_to_remove=' ,',
... cell_head_and_tail_characters_to_remove='"')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['publication_type', 'journal_article', 'title', 'publication_year', 'author_name', 'journal_name', 'journal_issue_number', 'journal_volume_number', 'startEndPages', 'publisher_name', 'doi']
----------------------------------LINE 2----------------------------------
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']
<BLANKLINE>
CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.
>>> # Parse a one-column CSV file
>>> my_list_data = ListData()
>>> my_list_data.import_csv_file('test_data//one_column_data.csv',
... column_delimiter_pattern_in_input_file=',')
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
----------------------------------LINE 1----------------------------------
['doi', '']
----------------------------------LINE 2----------------------------------
['10.1163/187607508X384689', '']
<BLANKLINE>
CSV file "test_data//one_column_data.csv" is imported as ListData object.
>>> my_list_data.get_column_at_index(0)
['doi', '10.1163/187607508X384689', '10.1017/S0954579416000572', '10.1007/s11562-016-0353-7', '10.1016/j.adolescence.2016.09.008', '10.1186/s13561-016-0122-6', '10.1007/s00799-016-0182-6', '10.5194/gmd-2016-266', '10.1007/s00737-015-0531-2', '10.1103/RevModPhys.88.021003', 'https://doi.org/10.1101/167171', 'https://doi.org/10.1016/j.chb.2017.04.047', '10.1016/j.trb.2016.09.005', '10.1016/j.ancene.2016.01.001', '10.1111/adb.12322', '10.1017/njg.2016.45', '10.1080/1359432X.2016.1209489', '10.1117/1.JBO.21.6.066008', '10.5194/gmd-10-3329-2017', '10.1016/j.rser.2017.01.103', '10.1177/2050157916664559', '10.1007/978-3-319-45931-8_17', '10.1007/s11136-015-1171-8', '10.1145/2991079.2991121', '10.1093/cz/zow089', '10.1126/science.aac8167', '10.1007/s00586-016-4606-1', '10.1186/s12937-017-0229-6', '10.1007/s11357-016-9894-1', '10.1080/00130095.2015.1094371', '10.1016/j.epsl.2016.02.028', '10.1371/journal.pone.0168636', '10.1016/j.atmosres.2016.03.016', '10.1111/deci.12206', '10.1126/science.aad9634', '10.1103/PhysRevA.94.012506', '10.4103/0019-5545.196846', '10.1016/j.cedpsych.2017.01.006', '10.3324/haematol.2015.133470', '10.1057/978-1-137-50956-7', '10.1016/j.scico.2016.04.001', 'https://doi.org/10.1016/j.scico.2016.04.001', '10.1080/03081087.2015.1053425', '10.3758/s13423-017-1270-3', '10.1681/ASN.2015030287', '10.1016/j.avb.2016.05.006', '10.1177/0971333616689191', '10.1002/sej.1243', '10.1016/j.foreco.2017.06.023', '10.1103/PhysRevLett.118.071801', 'https://doi.org/10.1093/geront/gnv127', '10.1007/978-3-319-42324-1_16', '10.1109/JBHI.2015.2412656', '10.1016/j.jeem.2016.04.002', '10.1080/00207543.2015.1058982', '10.1038/mp.2016.100', '10.1080/03003930.2016.1194267', '10.1016/j.envint.2017.01.018', '10.1038/pr.2015.179', '10.1177/1753193416669263', '10.1016/j.tre.2016.11.003', '10.1021/acs.jpcc.5b12016', '10.1002/anie.201603510', '10.1073/pnas.1607005113', '(DOI) - 10.1111/cch.12521', '10.1017/S0016756815000886', '10.1080/1350293X.2015.1073507', '10.1152/jn.00701.2015', '10.1371/journal.pone.0170791', '10.1016/j.seares.2016.07.005', '10.1016/j.reseneeco.2016.03.003', '10.1007/s00531-017-1499-0', '10.1007/s41669-017-0014-7', '10.1093/acrefore/9780190228613.013.439', '10.14814/phy2.13201', '10.1016/j.jtrangeo.2016.10.013', '10.1523/JNEUROSCI.3658-16.2017', '10.1192/bjpo.bp.115.000166', '10.1136/bmjgh-2016-000109', '10.7554/eLife.20320.001', '10.1037/pas0000332', '10.1177/1474704916673841', '10.1057/978-1-137-58179-2', '10.1002/ejp.963', '10.1017/thg.2016.78', '10.1038/tpj.2016.32', '10.1016/j.jesp.2017.03.008', '10.1287/trsc.2015.0647', '10.1186/s13015-016-0087-3', '10.1016/j.neuroimage.2016.10.030', '10.1371/journal.pone.0169109', '10.1007/s11367-017-1358-z', '10.1080/1369183X.2015.1061425', '10.2196/mental.4614', '10.1002/arp.1564', '10.1021/acs.orglett.6b01023', '10.3847/1538-4357/aa6c47', 'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/', '10.1186/s12888-016-0790-0', '10.1371/journal.pone.0155755']
#>>> Enter parsing paramaters that do not match the contents of the CSV file
#>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use
#>>> my_list_data = ListData()
#>>> try:
#... my_list_data.import_csv_file('test_data//one_column_data.txt',
#... column_delimiter_pattern_in_input_file='\n',
#... line_head_pattern_to_remove='',
#... line_tail_pattern_to_remove='')
#... except Exception as error_message:
#... print('Exception caught: ' + str(error_message))
Cleaning parameters are set. Output resulting from a demo parsing operation is as following:
----------------------------------LINE 0----------------------------------
<BLANKLINE>
Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.
|
preprocessor/ListData.py
|
import_csv_file
|
clokman/KFIR
|
python
|
def import_csv_file(instance, input_file_path, column_delimiter_pattern_in_input_file, line_head_pattern_to_remove=, line_tail_pattern_to_remove=, cell_head_and_tail_characters_to_remove=):
'\n Returns:\n nothing\n\n Examples:\n >>> # Import a CSV file (yasgui.org formatting)\n >>> my_list_data = ListData()\n >>> my_list_data.import_csv_file(\'test_data//yasgui_output_100.csv\',\n ... column_delimiter_pattern_in_input_file=\' , \',\n ... line_tail_pattern_to_remove=\' ,\',\n ... cell_head_and_tail_characters_to_remove=\'"\')\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n ----------------------------------LINE 1----------------------------------\n [\'publication_type\', \'journal_article\', \'title\', \'publication_year\', \'author_name\', \'journal_name\', \'journal_issue_number\', \'journal_volume_number\', \'startEndPages\', \'publisher_name\', \'doi\']\n ----------------------------------LINE 2----------------------------------\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\']\n <BLANKLINE>\n CSV file "test_data//yasgui_output_100.csv" is imported as ListData object.\n\n\n >>> # Parse a one-column CSV file\n >>> my_list_data = ListData()\n >>> my_list_data.import_csv_file(\'test_data//one_column_data.csv\',\n ... column_delimiter_pattern_in_input_file=\',\')\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n ----------------------------------LINE 1----------------------------------\n [\'doi\', \'\']\n ----------------------------------LINE 2----------------------------------\n [\'10.1163/187607508X384689\', \'\']\n <BLANKLINE>\n CSV file "test_data//one_column_data.csv" is imported as ListData object.\n >>> my_list_data.get_column_at_index(0)\n [\'doi\', \'10.1163/187607508X384689\', \'10.1017/S0954579416000572\', \'10.1007/s11562-016-0353-7\', \'10.1016/j.adolescence.2016.09.008\', \'10.1186/s13561-016-0122-6\', \'10.1007/s00799-016-0182-6\', \'10.5194/gmd-2016-266\', \'10.1007/s00737-015-0531-2\', \'10.1103/RevModPhys.88.021003\', \'https://doi.org/10.1101/167171\', \'https://doi.org/10.1016/j.chb.2017.04.047\', \'10.1016/j.trb.2016.09.005\', \'10.1016/j.ancene.2016.01.001\', \'10.1111/adb.12322\', \'10.1017/njg.2016.45\', \'10.1080/1359432X.2016.1209489\', \'10.1117/1.JBO.21.6.066008\', \'10.5194/gmd-10-3329-2017\', \'10.1016/j.rser.2017.01.103\', \'10.1177/2050157916664559\', \'10.1007/978-3-319-45931-8_17\', \'10.1007/s11136-015-1171-8\', \'10.1145/2991079.2991121\', \'10.1093/cz/zow089\', \'10.1126/science.aac8167\', \'10.1007/s00586-016-4606-1\', \'10.1186/s12937-017-0229-6\', \'10.1007/s11357-016-9894-1\', \'10.1080/00130095.2015.1094371\', \'10.1016/j.epsl.2016.02.028\', \'10.1371/journal.pone.0168636\', \'10.1016/j.atmosres.2016.03.016\', \'10.1111/deci.12206\', \'10.1126/science.aad9634\', \'10.1103/PhysRevA.94.012506\', \'10.4103/0019-5545.196846\', \'10.1016/j.cedpsych.2017.01.006\', \'10.3324/haematol.2015.133470\', \'10.1057/978-1-137-50956-7\', \'10.1016/j.scico.2016.04.001\', \'https://doi.org/10.1016/j.scico.2016.04.001\', \'10.1080/03081087.2015.1053425\', \'10.3758/s13423-017-1270-3\', \'10.1681/ASN.2015030287\', \'10.1016/j.avb.2016.05.006\', \'10.1177/0971333616689191\', \'10.1002/sej.1243\', \'10.1016/j.foreco.2017.06.023\', \'10.1103/PhysRevLett.118.071801\', \'https://doi.org/10.1093/geront/gnv127\', \'10.1007/978-3-319-42324-1_16\', \'10.1109/JBHI.2015.2412656\', \'10.1016/j.jeem.2016.04.002\', \'10.1080/00207543.2015.1058982\', \'10.1038/mp.2016.100\', \'10.1080/03003930.2016.1194267\', \'10.1016/j.envint.2017.01.018\', \'10.1038/pr.2015.179\', \'10.1177/1753193416669263\', \'10.1016/j.tre.2016.11.003\', \'10.1021/acs.jpcc.5b12016\', \'10.1002/anie.201603510\', \'10.1073/pnas.1607005113\', \'(DOI) - 10.1111/cch.12521\', \'10.1017/S0016756815000886\', \'10.1080/1350293X.2015.1073507\', \'10.1152/jn.00701.2015\', \'10.1371/journal.pone.0170791\', \'10.1016/j.seares.2016.07.005\', \'10.1016/j.reseneeco.2016.03.003\', \'10.1007/s00531-017-1499-0\', \'10.1007/s41669-017-0014-7\', \'10.1093/acrefore/9780190228613.013.439\', \'10.14814/phy2.13201\', \'10.1016/j.jtrangeo.2016.10.013\', \'10.1523/JNEUROSCI.3658-16.2017\', \'10.1192/bjpo.bp.115.000166\', \'10.1136/bmjgh-2016-000109\', \'10.7554/eLife.20320.001\', \'10.1037/pas0000332\', \'10.1177/1474704916673841\', \'10.1057/978-1-137-58179-2\', \'10.1002/ejp.963\', \'10.1017/thg.2016.78\', \'10.1038/tpj.2016.32\', \'10.1016/j.jesp.2017.03.008\', \'10.1287/trsc.2015.0647\', \'10.1186/s13015-016-0087-3\', \'10.1016/j.neuroimage.2016.10.030\', \'10.1371/journal.pone.0169109\', \'10.1007/s11367-017-1358-z\', \'10.1080/1369183X.2015.1061425\', \'10.2196/mental.4614\', \'10.1002/arp.1564\', \'10.1021/acs.orglett.6b01023\', \'10.3847/1538-4357/aa6c47\', \'http://www.socialevraagstukken.nl/veiligheid-creeer-je-met-geborgenheid/\', \'10.1186/s12888-016-0790-0\', \'10.1371/journal.pone.0155755\']\n\n\n #>>> Enter parsing paramaters that do not match the contents of the CSV file\n #>>> Error is not invoked anymore as another from CSV_File takes over. Kept for possible future use\n #>>> my_list_data = ListData()\n #>>> try:\n #... my_list_data.import_csv_file(\'test_data//one_column_data.txt\',\n #... column_delimiter_pattern_in_input_file=\'\\n\',\n #... line_head_pattern_to_remove=\'\',\n #... line_tail_pattern_to_remove=\'\')\n #... except Exception as error_message:\n #... print(\'Exception caught: \' + str(error_message))\n Cleaning parameters are set. Output resulting from a demo parsing operation is as following:\n ----------------------------------LINE 0----------------------------------\n <BLANKLINE>\n Exception caught: No data imported from CSV file "test_data//one_column_data.csv". Parsing parameters provided does not seem to match formatting of the inputted CSV file.\n '
from preprocessor.csv_tools import CSV_File
csv_file = CSV_File(input_file_path, column_delimiter_pattern_in_input_file=column_delimiter_pattern_in_input_file)
csv_file.set_parsing_and_cleaning_parameters(line_head_pattern_to_remove=line_head_pattern_to_remove, line_tail_pattern_to_remove=line_tail_pattern_to_remove, cell_head_and_tail_characters_to_remove=cell_head_and_tail_characters_to_remove)
with open(csv_file.input_file_path, encoding='utf8') as input_file:
for (i, each_line) in enumerate(input_file):
csv_line = csv_file.get_line_at_position_from_file((i + 1))
csv_row = csv_file.clean_and_parse_line_to_CSV_Row_using_cleaning_parameters(csv_line)
instance.append_row(csv_row)
if instance.dataset:
print(('\nCSV file "%s" is imported as ListData object.' % csv_file.input_file_path))
else:
raise ValueError(('No data imported from CSV file "%s". Parsing parameters provided does not seem to match formatting of the inputted CSV file.' % csv_file.input_file_path))
|
def import_json_object(instance, json_object):
"\n Converts a JSON formatted object to a ListData object.\n\n Args:\n json_dictionary(dict): a dictionary that is formatted as JSON\n\n Returns:\n \n Examples:\n >>> my_json_object = {\n ... 1: {'label': 'Example', 'value': 3},\n ... 2: {'label': 'Test', 'value': 1},\n ... 3: {'label': 'Tryout'}\n ... }\n >>> print(my_json_object)\n {1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}\n\n >>> my_list_data = ListData()\n >>> my_list_data.import_json_object(my_json_object)\n >>> print(my_list_data.dataset)\n [['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]\n "
from preprocessor.legacy_functions.get_header_index import get_header_index
try:
if instance.headers():
raise Exception('Instance.headers not empty prior to append operation. This method is not compatible with adding new headers/columns.')
except IndexError:
headers_list = []
for (each_entry_id, each_entry_data) in json_object.items():
for each_field_name in each_entry_data.keys():
if (each_field_name not in headers_list):
headers_list.append(each_field_name)
instance.dataset.append(headers_list)
for (each_entry_id, each_entry_data) in json_object.items():
instance.dataset.append([])
current_row = instance.dataset[(- 1)]
while (len(current_row) < len(instance.headers())):
current_row.append(instance.missing_data_character)
for (each_field_name, each_field_value) in each_entry_data.items():
current_field_name_header_index = get_header_index(each_field_name, instance.dataset)
current_row[current_field_name_header_index] = each_field_value
| -6,396,754,683,567,436,000
|
Converts a JSON formatted object to a ListData object.
Args:
json_dictionary(dict): a dictionary that is formatted as JSON
Returns:
Examples:
>>> my_json_object = {
... 1: {'label': 'Example', 'value': 3},
... 2: {'label': 'Test', 'value': 1},
... 3: {'label': 'Tryout'}
... }
>>> print(my_json_object)
{1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}
>>> my_list_data = ListData()
>>> my_list_data.import_json_object(my_json_object)
>>> print(my_list_data.dataset)
[['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]
|
preprocessor/ListData.py
|
import_json_object
|
clokman/KFIR
|
python
|
def import_json_object(instance, json_object):
"\n Converts a JSON formatted object to a ListData object.\n\n Args:\n json_dictionary(dict): a dictionary that is formatted as JSON\n\n Returns:\n \n Examples:\n >>> my_json_object = {\n ... 1: {'label': 'Example', 'value': 3},\n ... 2: {'label': 'Test', 'value': 1},\n ... 3: {'label': 'Tryout'}\n ... }\n >>> print(my_json_object)\n {1: {'label': 'Example', 'value': 3}, 2: {'label': 'Test', 'value': 1}, 3: {'label': 'Tryout'}}\n\n >>> my_list_data = ListData()\n >>> my_list_data.import_json_object(my_json_object)\n >>> print(my_list_data.dataset)\n [['label', 'value'], ['Example', 3], ['Test', 1], ['Tryout', ' ']]\n "
from preprocessor.legacy_functions.get_header_index import get_header_index
try:
if instance.headers():
raise Exception('Instance.headers not empty prior to append operation. This method is not compatible with adding new headers/columns.')
except IndexError:
headers_list = []
for (each_entry_id, each_entry_data) in json_object.items():
for each_field_name in each_entry_data.keys():
if (each_field_name not in headers_list):
headers_list.append(each_field_name)
instance.dataset.append(headers_list)
for (each_entry_id, each_entry_data) in json_object.items():
instance.dataset.append([])
current_row = instance.dataset[(- 1)]
while (len(current_row) < len(instance.headers())):
current_row.append(instance.missing_data_character)
for (each_field_name, each_field_value) in each_entry_data.items():
current_field_name_header_index = get_header_index(each_field_name, instance.dataset)
current_row[current_field_name_header_index] = each_field_value
|
def import_bibliography_object(instance, bibliography_object):
"\n Converts a Bibliography class object to a ListData object.\n\n Returns:\n ListData class object\n\n Examples:\n >>> from triplicator.bibTools import Bibliography\n >>> my_bibliography = Bibliography()\n >>> my_bibliography.setEntry('01', 'author', 'John Doe')\n >>> my_bibliography.setEntry('02', 'author', 'Jane Doe')\n >>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')\n >>> print(my_bibliography.entries)\n {'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}\n >>> my_list_data = ListData()\n >>> my_list_data.import_bibliography_object(my_bibliography)\n >>> print(my_list_data.dataset)\n [['author'], ['John Doe'], ['Jane Doe']]\n "
instance.import_json_object(bibliography_object.entries)
| -4,453,412,358,065,190,000
|
Converts a Bibliography class object to a ListData object.
Returns:
ListData class object
Examples:
>>> from triplicator.bibTools import Bibliography
>>> my_bibliography = Bibliography()
>>> my_bibliography.setEntry('01', 'author', 'John Doe')
>>> my_bibliography.setEntry('02', 'author', 'Jane Doe')
>>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')
>>> print(my_bibliography.entries)
{'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}
>>> my_list_data = ListData()
>>> my_list_data.import_bibliography_object(my_bibliography)
>>> print(my_list_data.dataset)
[['author'], ['John Doe'], ['Jane Doe']]
|
preprocessor/ListData.py
|
import_bibliography_object
|
clokman/KFIR
|
python
|
def import_bibliography_object(instance, bibliography_object):
"\n Converts a Bibliography class object to a ListData object.\n\n Returns:\n ListData class object\n\n Examples:\n >>> from triplicator.bibTools import Bibliography\n >>> my_bibliography = Bibliography()\n >>> my_bibliography.setEntry('01', 'author', 'John Doe')\n >>> my_bibliography.setEntry('02', 'author', 'Jane Doe')\n >>> #my_bibliography.import_data('..//triplicator//example_data//test.bib')\n >>> print(my_bibliography.entries)\n {'01': {'author': 'John Doe'}, '02': {'author': 'Jane Doe'}}\n >>> my_list_data = ListData()\n >>> my_list_data.import_bibliography_object(my_bibliography)\n >>> print(my_list_data.dataset)\n [['author'], ['John Doe'], ['Jane Doe']]\n "
instance.import_json_object(bibliography_object.entries)
|
def get_column_at_index(instance, index):
"\n Allows columns to be selected (i.e., returned) by entering their index position.\n \n :return: A list vector that contains values from the queried column\n \n :example:\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.get_column_at_index(1)\n ['birth_date', 2084, 2054]\n "
column = [each_row[index] for each_row in instance.dataset]
return column
| 7,221,061,146,976,088,000
|
Allows columns to be selected (i.e., returned) by entering their index position.
:return: A list vector that contains values from the queried column
:example:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_column_at_index(1)
['birth_date', 2084, 2054]
|
preprocessor/ListData.py
|
get_column_at_index
|
clokman/KFIR
|
python
|
def get_column_at_index(instance, index):
"\n Allows columns to be selected (i.e., returned) by entering their index position.\n \n :return: A list vector that contains values from the queried column\n \n :example:\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.get_column_at_index(1)\n ['birth_date', 2084, 2054]\n "
column = [each_row[index] for each_row in instance.dataset]
return column
|
def get_row_length(instance):
"\n Gets the length of a sample row from the dataset.\n\n Returns:\n Integer\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.get_row_length()\n 2\n "
probe_index = 0
row_length = 0
try:
row_length = len(instance.dataset[probe_index])
except IndexError:
raise ('Not possible to probe row at index %s. Nothing found at this index position.' % probe_index)
return row_length
| 5,714,986,125,114,063,000
|
Gets the length of a sample row from the dataset.
Returns:
Integer
Examples:
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.get_row_length()
2
|
preprocessor/ListData.py
|
get_row_length
|
clokman/KFIR
|
python
|
def get_row_length(instance):
"\n Gets the length of a sample row from the dataset.\n\n Returns:\n Integer\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.get_row_length()\n 2\n "
probe_index = 0
row_length = 0
try:
row_length = len(instance.dataset[probe_index])
except IndexError:
raise ('Not possible to probe row at index %s. Nothing found at this index position.' % probe_index)
return row_length
|
def transpose_dataset(instance):
"\n\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.transpose_dataset().dataset\n [['name', 'john', 'jane'], ['birth_date', 2084, 2054]]\n >>> my_listdata.transpose_dataset().dataset\n [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n\n >>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset\n True\n "
row_length = instance.get_row_length()
columns = [instance.get_column_at_index(i) for i in range(0, row_length)]
instance.dataset = columns
return instance
| -2,419,339,717,802,129,400
|
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'john', 'jane'], ['birth_date', 2084, 2054]]
>>> my_listdata.transpose_dataset().dataset
[['name', 'birth_date'], ['john', 2084], ['jane', 2054]]
>>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset
True
|
preprocessor/ListData.py
|
transpose_dataset
|
clokman/KFIR
|
python
|
def transpose_dataset(instance):
"\n\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n >>> my_listdata.transpose_dataset().dataset\n [['name', 'john', 'jane'], ['birth_date', 2084, 2054]]\n >>> my_listdata.transpose_dataset().dataset\n [['name', 'birth_date'], ['john', 2084], ['jane', 2054]]\n\n >>> my_listdata.transpose_dataset().dataset == my_listdata.transpose_dataset().transpose_dataset().dataset\n True\n "
row_length = instance.get_row_length()
columns = [instance.get_column_at_index(i) for i in range(0, row_length)]
instance.dataset = columns
return instance
|
def merge_all_rows_to_one(instance, value_separator_pattern=' | '):
'\n >>> my_listdata = ListData().append_row([\'john\', 2054]).append_row([\'john\', 3254])\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2054 | 3254\']\n\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [[\'john\', 2054], [\'john\', 3254], [\'john\', 2672]]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2054 | 3254 | 2672\']\n\n # method does not deal with headers\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054]]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'name | john\', \'birth_date | 2084 | 2054\']\n\n # but headers can be easily managed\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054]]\n >>> my_listdata.dataset = my_listdata.dataset[1:]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2084 | 2054\']\n\n # different separator pattern (and a transpose-like operation)\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054], [\'jane\', 2054]]\n >>> my_listdata.merge_all_rows_to_one(\'; \').dataset\n [\'name; john; jane\', \'birth_date; 2084; 2054\']\n\n >>> type(my_listdata.dataset)\n <class \'list\'>\n\n >>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer\n >>> line_1 = CSV_Line(\' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,\')\n >>> line_2 = CSV_Line(\' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,\')\n >>> line_1.clean_head_and_tail_from_patterns(\' ,\', location=\'tail\').clean_head_and_tail_from_patterns(\' \', location=\'head\')\n \'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"\'\n >>> line_2.clean_head_and_tail_from_patterns(\' ,\', location=\'tail\').clean_head_and_tail_from_patterns(\' \', location=\'head\')\n \'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"\'\n >>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(\' , \').clean_cell_heads_and_tails_from_characters(\'"\')\n >>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(\' , \').clean_cell_heads_and_tails_from_characters(\'"\')\n >>> buffer = Row_Merge_Buffer(1)\n >>> buffer.append_as_first_row_and_reset_buffer(row_1)\n "https://w3id.org/oc/corpus/br/45174: [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\']]"\n >>> buffer.append_row_if_ids_match(row_2)\n "https://w3id.org/oc/corpus/br/45174: [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]"\n >>> buffer.merge_all_rows_to_one(\' | \')\n "https://w3id.org/oc/corpus/br/45174: [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']"\n\n # List conversion with actual rows\n >>> a = ListData()\n >>> a.dataset = [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]\n >>> a.merge_all_rows_to_one(\' | \').dataset\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']\n\n # Row_Merge_Buffer class conversion with actual rows\n >>> a = Row_Merge_Buffer(1)\n >>> a.dataset = [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]\n >>> a.merge_all_rows_to_one(\' | \').dataset\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']\n\n # Error from empty dataset\n >>> a = ListData()\n >>>\n >>> try:\n ... a.merge_all_rows_to_one(\' | \') # no item to index in empty dataset\n ... except Exception as error_message:\n ... print(\'Exception: \' + str(error_message))\n Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).\n The input dataset is:\n []\n '
try:
instance.dataset[0]
except IndexError:
raise IndexError(('Dataset to be merged is either empty or not indexable (no item at index [0]).\nThe input dataset is:\n%s' % str(instance.dataset)))
dataset = instance.dataset
merged_row = dataset[0]
for each_row in dataset:
current_row = each_row
current_cell_position = 0
for (each_current_cell, each_merged_cell) in zip(current_row, merged_row):
if (str(each_current_cell) not in str(each_merged_cell)):
merged_cell = ((str(each_merged_cell) + value_separator_pattern) + str(each_current_cell))
merged_row[current_cell_position] = merged_cell
current_cell_position += 1
instance.dataset = merged_row
return instance
| -8,251,441,568,987,171,000
|
>>> my_listdata = ListData().append_row(['john', 2054]).append_row(['john', 3254])
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254']
>>> my_listdata = ListData()
>>> my_listdata.dataset = [['john', 2054], ['john', 3254], ['john', 2672]]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2054 | 3254 | 2672']
# method does not deal with headers
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.merge_all_rows_to_one().dataset
['name | john', 'birth_date | 2084 | 2054']
# but headers can be easily managed
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054]]
>>> my_listdata.dataset = my_listdata.dataset[1:]
>>> my_listdata.merge_all_rows_to_one().dataset
['john', '2084 | 2054']
# different separator pattern (and a transpose-like operation)
>>> my_listdata.dataset = [['name', 'birth_date'], ['john', 2084], ['john', 2054], ['jane', 2054]]
>>> my_listdata.merge_all_rows_to_one('; ').dataset
['name; john; jane', 'birth_date; 2084; 2054']
>>> type(my_listdata.dataset)
<class 'list'>
>>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer
>>> line_1 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,')
>>> line_2 = CSV_Line(' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,')
>>> line_1.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"'
>>> line_2.clean_head_and_tail_from_patterns(' ,', location='tail').clean_head_and_tail_from_patterns(' ', location='head')
'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"'
>>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(' , ').clean_cell_heads_and_tails_from_characters('"')
>>> buffer = Row_Merge_Buffer(1)
>>> buffer.append_as_first_row_and_reset_buffer(row_1)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893']]"
>>> buffer.append_row_if_ids_match(row_2)
"https://w3id.org/oc/corpus/br/45174: [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]"
>>> buffer.merge_all_rows_to_one(' | ')
"https://w3id.org/oc/corpus/br/45174: ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']"
# List conversion with actual rows
>>> a = ListData()
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Row_Merge_Buffer class conversion with actual rows
>>> a = Row_Merge_Buffer(1)
>>> a.dataset = [['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893'], ['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', 'https://doi.org/10.1037//0022-006x.56.6.893']]
>>> a.merge_all_rows_to_one(' | ').dataset
['Journal Article', 'https://w3id.org/oc/corpus/br/45174', 'An inventory for measuring clinical anxiety: Psychometric properties.', '1988', 'Steer - Robert A. | John - Doe B.', 'Journal of Consulting and Clinical Psychology', '6', '56', '893--897', 'American Psychological Association (APA)', '10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893']
# Error from empty dataset
>>> a = ListData()
>>>
>>> try:
... a.merge_all_rows_to_one(' | ') # no item to index in empty dataset
... except Exception as error_message:
... print('Exception: ' + str(error_message))
Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).
The input dataset is:
[]
|
preprocessor/ListData.py
|
merge_all_rows_to_one
|
clokman/KFIR
|
python
|
def merge_all_rows_to_one(instance, value_separator_pattern=' | '):
'\n >>> my_listdata = ListData().append_row([\'john\', 2054]).append_row([\'john\', 3254])\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2054 | 3254\']\n\n >>> my_listdata = ListData()\n >>> my_listdata.dataset = [[\'john\', 2054], [\'john\', 3254], [\'john\', 2672]]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2054 | 3254 | 2672\']\n\n # method does not deal with headers\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054]]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'name | john\', \'birth_date | 2084 | 2054\']\n\n # but headers can be easily managed\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054]]\n >>> my_listdata.dataset = my_listdata.dataset[1:]\n >>> my_listdata.merge_all_rows_to_one().dataset\n [\'john\', \'2084 | 2054\']\n\n # different separator pattern (and a transpose-like operation)\n >>> my_listdata.dataset = [[\'name\', \'birth_date\'], [\'john\', 2084], [\'john\', 2054], [\'jane\', 2054]]\n >>> my_listdata.merge_all_rows_to_one(\'; \').dataset\n [\'name; john; jane\', \'birth_date; 2084; 2054\']\n\n >>> type(my_listdata.dataset)\n <class \'list\'>\n\n >>> from preprocessor.csv_tools import CSV_Line, CSV_Row, Row_Merge_Buffer\n >>> line_1 = CSV_Line(\' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893" ,\')\n >>> line_2 = CSV_Line(\' "Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893" ,\')\n >>> line_1.clean_head_and_tail_from_patterns(\' ,\', location=\'tail\').clean_head_and_tail_from_patterns(\' \', location=\'head\')\n \'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "Steer - Robert A." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "10.1037//0022-006x.56.6.893"\'\n >>> line_2.clean_head_and_tail_from_patterns(\' ,\', location=\'tail\').clean_head_and_tail_from_patterns(\' \', location=\'head\')\n \'"Journal Article" , "https://w3id.org/oc/corpus/br/45174" , "An inventory for measuring clinical anxiety: Psychometric properties." , "1988" , "John - Doe B." , "Journal of Consulting and Clinical Psychology" , "6" , "56" , "893--897" , "American Psychological Association (APA)" , "https://doi.org/10.1037//0022-006x.56.6.893"\'\n >>> row_1 = line_1.parse_line_and_CONVERT_to_CSV_Row(\' , \').clean_cell_heads_and_tails_from_characters(\'"\')\n >>> row_2 = line_2.parse_line_and_CONVERT_to_CSV_Row(\' , \').clean_cell_heads_and_tails_from_characters(\'"\')\n >>> buffer = Row_Merge_Buffer(1)\n >>> buffer.append_as_first_row_and_reset_buffer(row_1)\n "https://w3id.org/oc/corpus/br/45174: [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\']]"\n >>> buffer.append_row_if_ids_match(row_2)\n "https://w3id.org/oc/corpus/br/45174: [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]"\n >>> buffer.merge_all_rows_to_one(\' | \')\n "https://w3id.org/oc/corpus/br/45174: [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']"\n\n # List conversion with actual rows\n >>> a = ListData()\n >>> a.dataset = [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]\n >>> a.merge_all_rows_to_one(\' | \').dataset\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']\n\n # Row_Merge_Buffer class conversion with actual rows\n >>> a = Row_Merge_Buffer(1)\n >>> a.dataset = [[\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893\'], [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'https://doi.org/10.1037//0022-006x.56.6.893\']]\n >>> a.merge_all_rows_to_one(\' | \').dataset\n [\'Journal Article\', \'https://w3id.org/oc/corpus/br/45174\', \'An inventory for measuring clinical anxiety: Psychometric properties.\', \'1988\', \'Steer - Robert A. | John - Doe B.\', \'Journal of Consulting and Clinical Psychology\', \'6\', \'56\', \'893--897\', \'American Psychological Association (APA)\', \'10.1037//0022-006x.56.6.893 | https://doi.org/10.1037//0022-006x.56.6.893\']\n\n # Error from empty dataset\n >>> a = ListData()\n >>>\n >>> try:\n ... a.merge_all_rows_to_one(\' | \') # no item to index in empty dataset\n ... except Exception as error_message:\n ... print(\'Exception: \' + str(error_message))\n Exception: Dataset to be merged is either empty or not indexable (no item at index [0]).\n The input dataset is:\n []\n '
try:
instance.dataset[0]
except IndexError:
raise IndexError(('Dataset to be merged is either empty or not indexable (no item at index [0]).\nThe input dataset is:\n%s' % str(instance.dataset)))
dataset = instance.dataset
merged_row = dataset[0]
for each_row in dataset:
current_row = each_row
current_cell_position = 0
for (each_current_cell, each_merged_cell) in zip(current_row, merged_row):
if (str(each_current_cell) not in str(each_merged_cell)):
merged_cell = ((str(each_merged_cell) + value_separator_pattern) + str(each_current_cell))
merged_row[current_cell_position] = merged_cell
current_cell_position += 1
instance.dataset = merged_row
return instance
|
def append_row(instance, new_row):
"\n Appends a row the ListData object's dataset variable.\n\n Returns:\n ListData object (instance)\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.append_row([1,2,3]).dataset\n [[1, 2, 3]]\n\n >>> my_listdata.dataset\n [[1, 2, 3]]\n >>> my_listdata.append_row(['a','b','c']).dataset\n [[1, 2, 3], ['a', 'b', 'c']]\n\n >>> my_listdata.dataset\n [[1, 2, 3], ['a', 'b', 'c']]\n\n >>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset\n [[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]\n\n "
instance.dataset.append(new_row)
return instance
| 9,212,340,499,343,603,000
|
Appends a row the ListData object's dataset variable.
Returns:
ListData object (instance)
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.append_row(['a','b','c']).dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.dataset
[[1, 2, 3], ['a', 'b', 'c']]
>>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset
[[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]
|
preprocessor/ListData.py
|
append_row
|
clokman/KFIR
|
python
|
def append_row(instance, new_row):
"\n Appends a row the ListData object's dataset variable.\n\n Returns:\n ListData object (instance)\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.append_row([1,2,3]).dataset\n [[1, 2, 3]]\n\n >>> my_listdata.dataset\n [[1, 2, 3]]\n >>> my_listdata.append_row(['a','b','c']).dataset\n [[1, 2, 3], ['a', 'b', 'c']]\n\n >>> my_listdata.dataset\n [[1, 2, 3], ['a', 'b', 'c']]\n\n >>> my_listdata.append_row(['x', 'y']).append_row(['z', 't']).append_row(['m', 'n']).dataset\n [[1, 2, 3], ['a', 'b', 'c'], ['x', 'y'], ['z', 't'], ['m', 'n']]\n\n "
instance.dataset.append(new_row)
return instance
|
def clear_all(instance):
"\n Resets ListData object's dataset variable to its empty state.\n\n Returns:\n ListData object\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.append_row([1,2,3]).dataset\n [[1, 2, 3]]\n >>> my_listdata.dataset\n [[1, 2, 3]]\n >>> my_listdata.clear_all().dataset\n []\n >>> my_listdata.dataset\n []\n "
instance.dataset = []
return instance
| 4,490,231,985,143,969,300
|
Resets ListData object's dataset variable to its empty state.
Returns:
ListData object
Examples:
>>> my_listdata = ListData()
>>> my_listdata.append_row([1,2,3]).dataset
[[1, 2, 3]]
>>> my_listdata.dataset
[[1, 2, 3]]
>>> my_listdata.clear_all().dataset
[]
>>> my_listdata.dataset
[]
|
preprocessor/ListData.py
|
clear_all
|
clokman/KFIR
|
python
|
def clear_all(instance):
"\n Resets ListData object's dataset variable to its empty state.\n\n Returns:\n ListData object\n\n Examples:\n >>> my_listdata = ListData()\n >>> my_listdata.append_row([1,2,3]).dataset\n [[1, 2, 3]]\n >>> my_listdata.dataset\n [[1, 2, 3]]\n >>> my_listdata.clear_all().dataset\n []\n >>> my_listdata.dataset\n []\n "
instance.dataset = []
return instance
|
def append_column(instance, new_column_values, new_column_name):
'\n\n :param new_column_values:\n :param new_column_name:\n :param dataset:\n :return: Changes the inputted dataset when ran (no need for assigning the output to a variable).\n :usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)\n\n :example:\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = [[\'day\', \'month\'], [1, \'June\'], [3, \'May\'], [4, \'Jun\']]\n >>> years_column = [2149, 2150, 2151]\n >>> my_list_data.append_column(years_column, "year")\n >>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.\n [[\'day\', \'month\', \'year\'], [1, \'June\', 2149], [3, \'May\', 2150], [4, \'Jun\', 2151]]\n '
if (new_column_name in instance.headers()):
print(('ERROR: Header name already in dataset. Re-run all code up to this point or change header name.\nError occured while processing new_column_name: ' + str(new_column_name)))
raise ValueError('Header name already in dataset. Please choose a different name. If name is correct, try re-running all code up to this point. (See console output for last header name processed.)')
if (len(new_column_values) != len(instance.data_rows())):
raise Exception(((((('Inputted column length must be equal to instance.dataset column length.\n' + 'new_column_values length: ') + str(len(new_column_values))) + '\n') + 'instance.data_rows() length: ') + str(len(instance.data_rows()))))
new_column = new_column_values
new_column.insert(0, new_column_name)
for (i, row) in enumerate(instance.dataset):
instance.dataset[i].append(new_column[i])
| -4,321,684,749,884,232,700
|
:param new_column_values:
:param new_column_name:
:param dataset:
:return: Changes the inputted dataset when ran (no need for assigning the output to a variable).
:usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)
:example:
>>> my_list_data = ListData()
>>> my_list_data.dataset = [['day', 'month'], [1, 'June'], [3, 'May'], [4, 'Jun']]
>>> years_column = [2149, 2150, 2151]
>>> my_list_data.append_column(years_column, "year")
>>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.
[['day', 'month', 'year'], [1, 'June', 2149], [3, 'May', 2150], [4, 'Jun', 2151]]
|
preprocessor/ListData.py
|
append_column
|
clokman/KFIR
|
python
|
def append_column(instance, new_column_values, new_column_name):
'\n\n :param new_column_values:\n :param new_column_name:\n :param dataset:\n :return: Changes the inputted dataset when ran (no need for assigning the output to a variable).\n :usage: append_column(NEW_COLUMN_VARIABLES_LIST, NEW_COLUMN_NAME_STRING, DATASET)\n\n :example:\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = [[\'day\', \'month\'], [1, \'June\'], [3, \'May\'], [4, \'Jun\']]\n >>> years_column = [2149, 2150, 2151]\n >>> my_list_data.append_column(years_column, "year")\n >>> print(my_list_data.dataset) # changes the original data set without a need to assign the output to a new variable, etc.\n [[\'day\', \'month\', \'year\'], [1, \'June\', 2149], [3, \'May\', 2150], [4, \'Jun\', 2151]]\n '
if (new_column_name in instance.headers()):
print(('ERROR: Header name already in dataset. Re-run all code up to this point or change header name.\nError occured while processing new_column_name: ' + str(new_column_name)))
raise ValueError('Header name already in dataset. Please choose a different name. If name is correct, try re-running all code up to this point. (See console output for last header name processed.)')
if (len(new_column_values) != len(instance.data_rows())):
raise Exception(((((('Inputted column length must be equal to instance.dataset column length.\n' + 'new_column_values length: ') + str(len(new_column_values))) + '\n') + 'instance.data_rows() length: ') + str(len(instance.data_rows()))))
new_column = new_column_values
new_column.insert(0, new_column_name)
for (i, row) in enumerate(instance.dataset):
instance.dataset[i].append(new_column[i])
|
def remove_column(instance, target_column_header):
"\n Removes a column from dataset.\n\n Args:\n target_column_header(str): Name of the column to be removed.\n\n Returns:\n Nothing; modifies dataset.\n\n Examples:\n >>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],\n ... ['4', 'Jun', '15.00']]\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = example_data\n >>> print(my_list_data.dataset)\n [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]\n >>> my_list_data.remove_column('hour')\n >>> print(my_list_data.dataset)\n [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n\n "
from preprocessor.legacy_functions.get_header_index import get_header_index
target_index = get_header_index(target_column_header, instance.dataset)
for (i, row) in enumerate(instance.dataset):
del instance.dataset[i][target_index]
| 7,980,447,242,423,446,000
|
Removes a column from dataset.
Args:
target_column_header(str): Name of the column to be removed.
Returns:
Nothing; modifies dataset.
Examples:
>>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],
... ['4', 'Jun', '15.00']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]
>>> my_list_data.remove_column('hour')
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
|
preprocessor/ListData.py
|
remove_column
|
clokman/KFIR
|
python
|
def remove_column(instance, target_column_header):
"\n Removes a column from dataset.\n\n Args:\n target_column_header(str): Name of the column to be removed.\n\n Returns:\n Nothing; modifies dataset.\n\n Examples:\n >>> example_data = [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'],\n ... ['4', 'Jun', '15.00']]\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = example_data\n >>> print(my_list_data.dataset)\n [['day', 'month', 'hour'], ['1', 'June', '12.00'], ['3', 'May', '11.00'], ['4', 'Jun', '15.00']]\n >>> my_list_data.remove_column('hour')\n >>> print(my_list_data.dataset)\n [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n\n "
from preprocessor.legacy_functions.get_header_index import get_header_index
target_index = get_header_index(target_column_header, instance.dataset)
for (i, row) in enumerate(instance.dataset):
del instance.dataset[i][target_index]
|
def remove_columns(instance, target_column_headers_list):
'\n Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal\n of multiple columns.\n\n Args:\n target_column_headers_list(list): A list of strings whose items are the header names of columns to\n be removed\n\n Returns:\n Nothing; modifies dataset.\n '
if (type(target_column_headers_list) == list):
pass
else:
raise Exception('The argument "target_column_headers_list" must be of "list" type.')
for each_column_header in target_column_headers_list:
instance.remove_column(each_column_header)
| 1,848,056,667,119,853,600
|
Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal
of multiple columns.
Args:
target_column_headers_list(list): A list of strings whose items are the header names of columns to
be removed
Returns:
Nothing; modifies dataset.
|
preprocessor/ListData.py
|
remove_columns
|
clokman/KFIR
|
python
|
def remove_columns(instance, target_column_headers_list):
'\n Removes multiple columns from dataset. Is a variation of .remove_column() method to support efficient removal\n of multiple columns.\n\n Args:\n target_column_headers_list(list): A list of strings whose items are the header names of columns to\n be removed\n\n Returns:\n Nothing; modifies dataset.\n '
if (type(target_column_headers_list) == list):
pass
else:
raise Exception('The argument "target_column_headers_list" must be of "list" type.')
for each_column_header in target_column_headers_list:
instance.remove_column(each_column_header)
|
def replace_headers(instance, header_replacements_list):
"\n Replaces headers of a dataset.\n\n Args:\n header_replacements_list(list): A list of strings to replace headers\n\n Returns:\n Nothing; modifies the provided dataset.\n\n Examples:\n >>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = example_data\n >>> print(my_list_data.dataset)\n [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n >>> my_list_data.replace_headers(['d', 'm'])\n >>> print(my_list_data.dataset)\n [['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n "
if (len(header_replacements_list) == len(instance.headers())):
pass
else:
raise Exception((((((('header_replacements_list should be the same length with instance.headers()' + '\n') + 'header_replacements_list length: ') + str(len(header_replacements_list))) + '\n') + 'instance.headers() length: ') + str(len(instance.headers()))))
for (i, each_header) in enumerate(header_replacements_list):
instance.dataset[0][i] = each_header
| 8,936,132,330,508,127,000
|
Replaces headers of a dataset.
Args:
header_replacements_list(list): A list of strings to replace headers
Returns:
Nothing; modifies the provided dataset.
Examples:
>>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data = ListData()
>>> my_list_data.dataset = example_data
>>> print(my_list_data.dataset)
[['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
>>> my_list_data.replace_headers(['d', 'm'])
>>> print(my_list_data.dataset)
[['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]
|
preprocessor/ListData.py
|
replace_headers
|
clokman/KFIR
|
python
|
def replace_headers(instance, header_replacements_list):
"\n Replaces headers of a dataset.\n\n Args:\n header_replacements_list(list): A list of strings to replace headers\n\n Returns:\n Nothing; modifies the provided dataset.\n\n Examples:\n >>> example_data = [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n >>> my_list_data = ListData()\n >>> my_list_data.dataset = example_data\n >>> print(my_list_data.dataset)\n [['day', 'month'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n >>> my_list_data.replace_headers(['d', 'm'])\n >>> print(my_list_data.dataset)\n [['d', 'm'], ['1', 'June'], ['3', 'May'], ['4', 'Jun']]\n "
if (len(header_replacements_list) == len(instance.headers())):
pass
else:
raise Exception((((((('header_replacements_list should be the same length with instance.headers()' + '\n') + 'header_replacements_list length: ') + str(len(header_replacements_list))) + '\n') + 'instance.headers() length: ') + str(len(instance.headers()))))
for (i, each_header) in enumerate(header_replacements_list):
instance.dataset[0][i] = each_header
|
def append_row(self, new_row):
"\n Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to\n the buffer\n\n Args:\n new_row(list, bool, str, int): The object to be added as a new row to buffer\n\n Returns:\n ListData object (self)\n\n Examples:\n # initiate\n >>> my_buffer = ListBuffer()\n\n # empty?\n >>> my_buffer.is_empty\n True\n\n # simple add\n >>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output\n\n # fluent interface\n >>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). append_row(['item 7', 'item 8', 'item 9']).dataset\n [['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]\n\n # empty now?\n >>> my_buffer.is_empty\n False\n\n "
ListData.append_row(self, new_row)
self.is_empty = False
return self
| -741,385,359,228,105,700
|
Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to
the buffer
Args:
new_row(list, bool, str, int): The object to be added as a new row to buffer
Returns:
ListData object (self)
Examples:
# initiate
>>> my_buffer = ListBuffer()
# empty?
>>> my_buffer.is_empty
True
# simple add
>>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output
# fluent interface
>>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). append_row(['item 7', 'item 8', 'item 9']).dataset
[['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]
# empty now?
>>> my_buffer.is_empty
False
|
preprocessor/ListData.py
|
append_row
|
clokman/KFIR
|
python
|
def append_row(self, new_row):
"\n Overrides the ListData method of the same name to change buffer state to 'not empty' after adding something to\n the buffer\n\n Args:\n new_row(list, bool, str, int): The object to be added as a new row to buffer\n\n Returns:\n ListData object (self)\n\n Examples:\n # initiate\n >>> my_buffer = ListBuffer()\n\n # empty?\n >>> my_buffer.is_empty\n True\n\n # simple add\n >>> a = my_buffer.append_row(['item 1', 'item 2', 'item 3']) # variable assignment is to suppress output\n\n # fluent interface\n >>> my_buffer.append_row(['item 4', 'item 5', 'item 6']). append_row(['item 7', 'item 8', 'item 9']).dataset\n [['item 1', 'item 2', 'item 3'], ['item 4', 'item 5', 'item 6'], ['item 7', 'item 8', 'item 9']]\n\n # empty now?\n >>> my_buffer.is_empty\n False\n\n "
ListData.append_row(self, new_row)
self.is_empty = False
return self
|
def is_each_row_balanced(self, exclude_special_rows_of_syntax=None):
'\n Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can\n exclude special row types (e.g., comment) from evaluation.\n\n Args:\n exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation\n (e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., \'bibtex\').\n\n Keyword Args:\n - bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax\n\n Returns:\n boolean\n\n Examples:\n >>> # an unbalanced row is present\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row([\'a\', \'b\', \'c\']).append_row([\'d\', \'e\', \'f\']).dataset\n [[\'a\', \'b\', \'c\'], [\'d\', \'e\', \'f\']]\n >>> my_buffer.append_row([\'g\', \'h\' , \'>\']) .is_each_row_balanced()\n False\n\n >>> # single row from a bib file\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row(\' year = "2017",\') .is_each_row_balanced()\n True\n\n >>> # bibtex entry start (no exception vs. exception)\n >>> my_buffer.append_row(\'@article{96d9add3e2f44e8abbf030170689bc30,\') .is_each_row_balanced()\n False\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n >>> # bibtex comment (no exception vs. exception)\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row(\'% This is a comment with an unbalanced characters }]>\') .is_each_row_balanced()\n False\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n >>> # a full bibtex entry with an unbalanced curly bracket at title field\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@book{a82caf00e1a143759c7f5543b6c84ea5,\', \'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",\', \'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",\', \'year = "2015",\', \'doi = "10.1007/978-3-319-26585-8",\', \'isbn = "9783319265841",\', \'series = "LNAI",\', \'publisher = "Springer",\', \'number = "9485",\', \'}\', \'\']\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\') # error\n False\n >>> # the same entry with unbalanced curly bracket removed\n >>> my_buffer.dataset = [\'@book{a82caf00e1a143759c7f5543b6c84ea5,\', \'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",\', \'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",\', \'year = "2015",\', \'doi = "10.1007/978-3-319-26585-8",\', \'isbn = "9783319265841",\', \'series = "LNAI",\', \'publisher = "Springer",\', \'number = "9485",\', \'}\', \'\']\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n '
from preprocessor.string_tools import String
buffer = self.dataset
is_balanced_log = []
for each_row in buffer:
each_row = String(str(each_row))
if (not each_row.is_balanced()):
if (exclude_special_rows_of_syntax == 'bibtex'):
if (each_row.is_line_type('bibtex', 'start of entry') or each_row.is_line_type('bibtex', 'end of entry') or each_row.is_line_type('bibtex', 'comment')):
is_balanced_log.append(True)
else:
is_balanced_log.append(False)
else:
is_balanced_log.append(False)
else:
is_balanced_log.append(True)
if (False in is_balanced_log):
return False
else:
return True
| -7,911,168,897,088,828,000
|
Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can
exclude special row types (e.g., comment) from evaluation.
Args:
exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation
(e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., 'bibtex').
Keyword Args:
- bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax
Returns:
boolean
Examples:
>>> # an unbalanced row is present
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(['a', 'b', 'c']).append_row(['d', 'e', 'f']).dataset
[['a', 'b', 'c'], ['d', 'e', 'f']]
>>> my_buffer.append_row(['g', 'h' , '>']) .is_each_row_balanced()
False
>>> # single row from a bib file
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row(' year = "2017",') .is_each_row_balanced()
True
>>> # bibtex entry start (no exception vs. exception)
>>> my_buffer.append_row('@article{96d9add3e2f44e8abbf030170689bc30,') .is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # bibtex comment (no exception vs. exception)
>>> my_buffer = ListBuffer()
>>> my_buffer.append_row('% This is a comment with an unbalanced characters }]>') .is_each_row_balanced()
False
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
>>> # a full bibtex entry with an unbalanced curly bracket at title field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex') # error
False
>>> # the same entry with unbalanced curly bracket removed
>>> my_buffer.dataset = ['@book{a82caf00e1a143759c7f5543b6c84ea5,', 'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",', 'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",', 'year = "2015",', 'doi = "10.1007/978-3-319-26585-8",', 'isbn = "9783319265841",', 'series = "LNAI",', 'publisher = "Springer",', 'number = "9485",', '}', '']
>>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax='bibtex')
True
|
preprocessor/ListData.py
|
is_each_row_balanced
|
clokman/KFIR
|
python
|
def is_each_row_balanced(self, exclude_special_rows_of_syntax=None):
'\n Checks whether each row in buffer is balanced (i.e., does not have unmatched parantheses, brackets, etc). Can\n exclude special row types (e.g., comment) from evaluation.\n\n Args:\n exclude_special_rows_of_syntax(str): specifies what type of rows to exclude from evaluation\n (e.g., comment rows). Uses predefined syntax settings per specified syntax (e.g., \'bibtex\').\n\n Keyword Args:\n - bibtex (exclude_special_rows_of_syntax): sets evaluation exclusion criteria for bibtex syntax\n\n Returns:\n boolean\n\n Examples:\n >>> # an unbalanced row is present\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row([\'a\', \'b\', \'c\']).append_row([\'d\', \'e\', \'f\']).dataset\n [[\'a\', \'b\', \'c\'], [\'d\', \'e\', \'f\']]\n >>> my_buffer.append_row([\'g\', \'h\' , \'>\']) .is_each_row_balanced()\n False\n\n >>> # single row from a bib file\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row(\' year = "2017",\') .is_each_row_balanced()\n True\n\n >>> # bibtex entry start (no exception vs. exception)\n >>> my_buffer.append_row(\'@article{96d9add3e2f44e8abbf030170689bc30,\') .is_each_row_balanced()\n False\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n >>> # bibtex comment (no exception vs. exception)\n >>> my_buffer = ListBuffer()\n >>> my_buffer.append_row(\'% This is a comment with an unbalanced characters }]>\') .is_each_row_balanced()\n False\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n >>> # a full bibtex entry with an unbalanced curly bracket at title field\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@book{a82caf00e1a143759c7f5543b6c84ea5,\', \'title = "{Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",\', \'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",\', \'year = "2015",\', \'doi = "10.1007/978-3-319-26585-8",\', \'isbn = "9783319265841",\', \'series = "LNAI",\', \'publisher = "Springer",\', \'number = "9485",\', \'}\', \'\']\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\') # error\n False\n >>> # the same entry with unbalanced curly bracket removed\n >>> my_buffer.dataset = [\'@book{a82caf00e1a143759c7f5543b6c84ea5,\', \'title = "Knowledge Representation for Health Care (AIME 2015 International Joint Workshop, KR4HC/ProHealth 2015)",\', \'author = "D Riano and R. Lenz and S Miksch and M Peleg and M. Reichert and {ten Teije}, A.C.M.",\', \'year = "2015",\', \'doi = "10.1007/978-3-319-26585-8",\', \'isbn = "9783319265841",\', \'series = "LNAI",\', \'publisher = "Springer",\', \'number = "9485",\', \'}\', \'\']\n >>> my_buffer.is_each_row_balanced(exclude_special_rows_of_syntax=\'bibtex\')\n True\n\n '
from preprocessor.string_tools import String
buffer = self.dataset
is_balanced_log = []
for each_row in buffer:
each_row = String(str(each_row))
if (not each_row.is_balanced()):
if (exclude_special_rows_of_syntax == 'bibtex'):
if (each_row.is_line_type('bibtex', 'start of entry') or each_row.is_line_type('bibtex', 'end of entry') or each_row.is_line_type('bibtex', 'comment')):
is_balanced_log.append(True)
else:
is_balanced_log.append(False)
else:
is_balanced_log.append(False)
else:
is_balanced_log.append(True)
if (False in is_balanced_log):
return False
else:
return True
|
def is_parsable(self, syntax_to_parse_by='bibtex'):
'\n\n Args:\n syntax_to_parse_by:\n\n Returns:\n boolean\n\n Examples:\n # bibtex entry with no issues\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@article{5f3ed8a5037f4837be0c7e8e5a1f0948,\',\n ... \'title = "New Horizons biedt eindelijk goede blik op Pluto",\',\n ... \'author = "B. Andeweg",\',\n ... \'year = "2015",\',\n ... \'month = "7",\',\n ... \'journal = "Volkskrant",\',\n ... \'}\']\n >>> my_buffer.is_parsable()\n True\n\n # unmatched " in author field\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@article{5f3ed8a5037f4837be0c7e8e5a1f0948,\',\n ... \'title = "New Horizons biedt eindelijk goede blik op Pluto",\',\n ... \'author = "B. "Andeweg",\',\n ... \'year = "2015",\',\n ... \'month = "7",\',\n ... \'journal = "Volkskrant",\',\n ... \'}\']\n >>> my_buffer.is_parsable()\n False\n '
if (syntax_to_parse_by == 'bibtex'):
from pybtex.database.input import bibtex
parser = bibtex.Parser()
with open('temp_buffer_dump.bib', 'w', encoding='utf8') as temp_buffer_dump_file:
for each_buffer_row in self.dataset:
print(each_buffer_row, file=temp_buffer_dump_file)
with open('temp_buffer_dump.bib', encoding='utf8') as temp_buffer_dump_file:
try:
parsed_file = parser.parse_file(temp_buffer_dump_file)
return True
except:
return False
| -4,502,546,403,636,217,300
|
Args:
syntax_to_parse_by:
Returns:
boolean
Examples:
# bibtex entry with no issues
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
True
# unmatched " in author field
>>> my_buffer = ListBuffer()
>>> my_buffer.dataset = ['@article{5f3ed8a5037f4837be0c7e8e5a1f0948,',
... 'title = "New Horizons biedt eindelijk goede blik op Pluto",',
... 'author = "B. "Andeweg",',
... 'year = "2015",',
... 'month = "7",',
... 'journal = "Volkskrant",',
... '}']
>>> my_buffer.is_parsable()
False
|
preprocessor/ListData.py
|
is_parsable
|
clokman/KFIR
|
python
|
def is_parsable(self, syntax_to_parse_by='bibtex'):
'\n\n Args:\n syntax_to_parse_by:\n\n Returns:\n boolean\n\n Examples:\n # bibtex entry with no issues\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@article{5f3ed8a5037f4837be0c7e8e5a1f0948,\',\n ... \'title = "New Horizons biedt eindelijk goede blik op Pluto",\',\n ... \'author = "B. Andeweg",\',\n ... \'year = "2015",\',\n ... \'month = "7",\',\n ... \'journal = "Volkskrant",\',\n ... \'}\']\n >>> my_buffer.is_parsable()\n True\n\n # unmatched " in author field\n >>> my_buffer = ListBuffer()\n >>> my_buffer.dataset = [\'@article{5f3ed8a5037f4837be0c7e8e5a1f0948,\',\n ... \'title = "New Horizons biedt eindelijk goede blik op Pluto",\',\n ... \'author = "B. "Andeweg",\',\n ... \'year = "2015",\',\n ... \'month = "7",\',\n ... \'journal = "Volkskrant",\',\n ... \'}\']\n >>> my_buffer.is_parsable()\n False\n '
if (syntax_to_parse_by == 'bibtex'):
from pybtex.database.input import bibtex
parser = bibtex.Parser()
with open('temp_buffer_dump.bib', 'w', encoding='utf8') as temp_buffer_dump_file:
for each_buffer_row in self.dataset:
print(each_buffer_row, file=temp_buffer_dump_file)
with open('temp_buffer_dump.bib', encoding='utf8') as temp_buffer_dump_file:
try:
parsed_file = parser.parse_file(temp_buffer_dump_file)
return True
except:
return False
|
def _id(thing):
'Quote string if needed for it to be a valid identifier.'
if isinstance(thing, AspObject):
return thing
elif isinstance(thing, bool):
return ('"%s"' % str(thing))
elif isinstance(thing, int):
return str(thing)
else:
return ('"%s"' % str(thing))
| -9,163,242,725,028,129,000
|
Quote string if needed for it to be a valid identifier.
|
lib/spack/spack/solver/asp.py
|
_id
|
AaltoSciComp/spack
|
python
|
def _id(thing):
if isinstance(thing, AspObject):
return thing
elif isinstance(thing, bool):
return ('"%s"' % str(thing))
elif isinstance(thing, int):
return str(thing)
else:
return ('"%s"' % str(thing))
|
def extend_flag_list(flag_list, new_flags):
'Extend a list of flags, preserving order and precedence.\n\n Add new_flags at the end of flag_list. If any flags in new_flags are\n already in flag_list, they are moved to the end so that they take\n higher precedence on the compile line.\n\n '
for flag in new_flags:
if (flag in flag_list):
flag_list.remove(flag)
flag_list.append(flag)
| 5,304,618,090,113,952,000
|
Extend a list of flags, preserving order and precedence.
Add new_flags at the end of flag_list. If any flags in new_flags are
already in flag_list, they are moved to the end so that they take
higher precedence on the compile line.
|
lib/spack/spack/solver/asp.py
|
extend_flag_list
|
AaltoSciComp/spack
|
python
|
def extend_flag_list(flag_list, new_flags):
'Extend a list of flags, preserving order and precedence.\n\n Add new_flags at the end of flag_list. If any flags in new_flags are\n already in flag_list, they are moved to the end so that they take\n higher precedence on the compile line.\n\n '
for flag in new_flags:
if (flag in flag_list):
flag_list.remove(flag)
flag_list.append(flag)
|
def check_same_flags(flag_dict_1, flag_dict_2):
'Return True if flag dicts contain the same flags regardless of order.'
types = set(flag_dict_1.keys()).union(set(flag_dict_2.keys()))
for t in types:
values1 = set(flag_dict_1.get(t, []))
values2 = set(flag_dict_2.get(t, []))
assert (values1 == values2)
| 1,949,529,931,564,383,700
|
Return True if flag dicts contain the same flags regardless of order.
|
lib/spack/spack/solver/asp.py
|
check_same_flags
|
AaltoSciComp/spack
|
python
|
def check_same_flags(flag_dict_1, flag_dict_2):
types = set(flag_dict_1.keys()).union(set(flag_dict_2.keys()))
for t in types:
values1 = set(flag_dict_1.get(t, []))
values2 = set(flag_dict_2.get(t, []))
assert (values1 == values2)
|
def check_packages_exist(specs):
'Ensure all packages mentioned in specs exist.'
repo = spack.repo.path
for spec in specs:
for s in spec.traverse():
try:
check_passed = (repo.exists(s.name) or repo.is_virtual(s.name))
except Exception as e:
msg = 'Cannot find package: {0}'.format(str(e))
check_passed = False
tty.debug(msg)
if (not check_passed):
raise spack.repo.UnknownPackageError(str(s.fullname))
| 5,778,717,062,947,235,000
|
Ensure all packages mentioned in specs exist.
|
lib/spack/spack/solver/asp.py
|
check_packages_exist
|
AaltoSciComp/spack
|
python
|
def check_packages_exist(specs):
repo = spack.repo.path
for spec in specs:
for s in spec.traverse():
try:
check_passed = (repo.exists(s.name) or repo.is_virtual(s.name))
except Exception as e:
msg = 'Cannot find package: {0}'.format(str(e))
check_passed = False
tty.debug(msg)
if (not check_passed):
raise spack.repo.UnknownPackageError(str(s.fullname))
|
def solve(specs, dump=(), models=0, timers=False, stats=False, tests=False):
'Solve for a stable model of specs.\n\n Arguments:\n specs (list): list of Specs to solve.\n dump (tuple): what to dump\n models (int): number of models to search (default: 0)\n '
driver = PyclingoDriver()
if ('asp' in dump):
driver.out = sys.stdout
for root in specs:
for s in root.traverse():
if s.virtual:
continue
spack.spec.Spec.ensure_valid_variants(s)
setup = SpackSolverSetup()
return driver.solve(setup, specs, dump, models, timers, stats, tests)
| 3,040,711,511,748,255,000
|
Solve for a stable model of specs.
Arguments:
specs (list): list of Specs to solve.
dump (tuple): what to dump
models (int): number of models to search (default: 0)
|
lib/spack/spack/solver/asp.py
|
solve
|
AaltoSciComp/spack
|
python
|
def solve(specs, dump=(), models=0, timers=False, stats=False, tests=False):
'Solve for a stable model of specs.\n\n Arguments:\n specs (list): list of Specs to solve.\n dump (tuple): what to dump\n models (int): number of models to search (default: 0)\n '
driver = PyclingoDriver()
if ('asp' in dump):
driver.out = sys.stdout
for root in specs:
for s in root.traverse():
if s.virtual:
continue
spack.spec.Spec.ensure_valid_variants(s)
setup = SpackSolverSetup()
return driver.solve(setup, specs, dump, models, timers, stats, tests)
|
@property
def specs(self):
'List of concretized specs satisfying the initial\n abstract request.\n '
if self._concrete_specs:
return self._concrete_specs
msg = 'cannot compute specs ["satisfiable" is not True ]'
assert self.satisfiable, msg
self._concrete_specs = []
best = min(self.answers)
(opt, _, answer) = best
for input_spec in self.abstract_specs:
key = input_spec.name
if input_spec.virtual:
providers = [spec.name for spec in answer.values() if spec.package.provides(key)]
key = providers[0]
self._concrete_specs.append(answer[key])
return self._concrete_specs
| 6,606,093,366,351,177,000
|
List of concretized specs satisfying the initial
abstract request.
|
lib/spack/spack/solver/asp.py
|
specs
|
AaltoSciComp/spack
|
python
|
@property
def specs(self):
'List of concretized specs satisfying the initial\n abstract request.\n '
if self._concrete_specs:
return self._concrete_specs
msg = 'cannot compute specs ["satisfiable" is not True ]'
assert self.satisfiable, msg
self._concrete_specs = []
best = min(self.answers)
(opt, _, answer) = best
for input_spec in self.abstract_specs:
key = input_spec.name
if input_spec.virtual:
providers = [spec.name for spec in answer.values() if spec.package.provides(key)]
key = providers[0]
self._concrete_specs.append(answer[key])
return self._concrete_specs
|
def __init__(self, cores=True, asp=None):
'Driver for the Python clingo interface.\n\n Arguments:\n cores (bool): whether to generate unsatisfiable cores for better\n error reporting.\n asp (file-like): optional stream to write a text-based ASP program\n for debugging or verification.\n '
global clingo
if (not clingo):
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
import clingo
self.out = (asp or llnl.util.lang.Devnull())
self.cores = cores
| -6,134,784,348,510,137,000
|
Driver for the Python clingo interface.
Arguments:
cores (bool): whether to generate unsatisfiable cores for better
error reporting.
asp (file-like): optional stream to write a text-based ASP program
for debugging or verification.
|
lib/spack/spack/solver/asp.py
|
__init__
|
AaltoSciComp/spack
|
python
|
def __init__(self, cores=True, asp=None):
'Driver for the Python clingo interface.\n\n Arguments:\n cores (bool): whether to generate unsatisfiable cores for better\n error reporting.\n asp (file-like): optional stream to write a text-based ASP program\n for debugging or verification.\n '
global clingo
if (not clingo):
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_clingo_importable_or_raise()
import clingo
self.out = (asp or llnl.util.lang.Devnull())
self.cores = cores
|
def fact(self, head):
'ASP fact (a rule without a body).'
symbol = (head.symbol() if hasattr(head, 'symbol') else head)
self.out.write(('%s.\n' % str(symbol)))
atom = self.backend.add_atom(symbol)
self.backend.add_rule([atom], [], choice=self.cores)
if self.cores:
self.assumptions.append(atom)
| 7,479,646,998,659,304,000
|
ASP fact (a rule without a body).
|
lib/spack/spack/solver/asp.py
|
fact
|
AaltoSciComp/spack
|
python
|
def fact(self, head):
symbol = (head.symbol() if hasattr(head, 'symbol') else head)
self.out.write(('%s.\n' % str(symbol)))
atom = self.backend.add_atom(symbol)
self.backend.add_rule([atom], [], choice=self.cores)
if self.cores:
self.assumptions.append(atom)
|
def pkg_version_rules(self, pkg):
'Output declared versions of a package.\n\n This uses self.possible_versions so that we include any versions\n that arise from a spec.\n '
def key_fn(version):
return (version.origin, version.idx)
pkg = packagize(pkg)
declared_versions = self.declared_versions[pkg.name]
most_to_least_preferred = sorted(declared_versions, key=key_fn)
for (weight, declared_version) in enumerate(most_to_least_preferred):
self.gen.fact(fn.version_declared(pkg.name, declared_version.version, weight, version_origin_str[declared_version.origin]))
deprecated = self.deprecated_versions[pkg.name]
for v in sorted(deprecated):
self.gen.fact(fn.deprecated_version(pkg.name, v))
| 1,797,884,597,056,406,300
|
Output declared versions of a package.
This uses self.possible_versions so that we include any versions
that arise from a spec.
|
lib/spack/spack/solver/asp.py
|
pkg_version_rules
|
AaltoSciComp/spack
|
python
|
def pkg_version_rules(self, pkg):
'Output declared versions of a package.\n\n This uses self.possible_versions so that we include any versions\n that arise from a spec.\n '
def key_fn(version):
return (version.origin, version.idx)
pkg = packagize(pkg)
declared_versions = self.declared_versions[pkg.name]
most_to_least_preferred = sorted(declared_versions, key=key_fn)
for (weight, declared_version) in enumerate(most_to_least_preferred):
self.gen.fact(fn.version_declared(pkg.name, declared_version.version, weight, version_origin_str[declared_version.origin]))
deprecated = self.deprecated_versions[pkg.name]
for v in sorted(deprecated):
self.gen.fact(fn.deprecated_version(pkg.name, v))
|
def spec_versions(self, spec):
"Return list of clauses expressing spec's version constraints."
spec = specify(spec)
assert spec.name
if spec.concrete:
return [fn.version(spec.name, spec.version)]
if (spec.versions == spack.version.ver(':')):
return []
self.version_constraints.add((spec.name, spec.versions))
return [fn.version_satisfies(spec.name, spec.versions)]
| 5,945,572,827,840,501,000
|
Return list of clauses expressing spec's version constraints.
|
lib/spack/spack/solver/asp.py
|
spec_versions
|
AaltoSciComp/spack
|
python
|
def spec_versions(self, spec):
spec = specify(spec)
assert spec.name
if spec.concrete:
return [fn.version(spec.name, spec.version)]
if (spec.versions == spack.version.ver(':')):
return []
self.version_constraints.add((spec.name, spec.versions))
return [fn.version_satisfies(spec.name, spec.versions)]
|
def available_compilers(self):
'Facts about available compilers.'
self.gen.h2('Available compilers')
compilers = self.possible_compilers
compiler_versions = collections.defaultdict((lambda : set()))
for compiler in compilers:
compiler_versions[compiler.name].add(compiler.version)
for compiler in sorted(compiler_versions):
for v in sorted(compiler_versions[compiler]):
self.gen.fact(fn.compiler_version(compiler, v))
self.gen.newline()
| -4,816,124,531,095,553,000
|
Facts about available compilers.
|
lib/spack/spack/solver/asp.py
|
available_compilers
|
AaltoSciComp/spack
|
python
|
def available_compilers(self):
self.gen.h2('Available compilers')
compilers = self.possible_compilers
compiler_versions = collections.defaultdict((lambda : set()))
for compiler in compilers:
compiler_versions[compiler.name].add(compiler.version)
for compiler in sorted(compiler_versions):
for v in sorted(compiler_versions[compiler]):
self.gen.fact(fn.compiler_version(compiler, v))
self.gen.newline()
|
def compiler_defaults(self):
'Set compiler defaults, given a list of possible compilers.'
self.gen.h2('Default compiler preferences')
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(compiler_list, key=(lambda x: (x.name, x.version)), reverse=True)
ppk = spack.package_prefs.PackagePrefs('all', 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for (i, cspec) in enumerate(matches):
f = fn.default_compiler_preference(cspec.name, cspec.version, i)
self.gen.fact(f)
for entry in spack.compilers.all_compilers_config():
compiler_entry = entry['compiler']
cspec = spack.spec.CompilerSpec(compiler_entry['spec'])
if (not compiler_entry.get('target', None)):
continue
self.gen.fact(fn.compiler_supports_target(cspec.name, cspec.version, compiler_entry['target']))
| 2,857,875,852,841,491,500
|
Set compiler defaults, given a list of possible compilers.
|
lib/spack/spack/solver/asp.py
|
compiler_defaults
|
AaltoSciComp/spack
|
python
|
def compiler_defaults(self):
self.gen.h2('Default compiler preferences')
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(compiler_list, key=(lambda x: (x.name, x.version)), reverse=True)
ppk = spack.package_prefs.PackagePrefs('all', 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for (i, cspec) in enumerate(matches):
f = fn.default_compiler_preference(cspec.name, cspec.version, i)
self.gen.fact(f)
for entry in spack.compilers.all_compilers_config():
compiler_entry = entry['compiler']
cspec = spack.spec.CompilerSpec(compiler_entry['spec'])
if (not compiler_entry.get('target', None)):
continue
self.gen.fact(fn.compiler_supports_target(cspec.name, cspec.version, compiler_entry['target']))
|
def package_compiler_defaults(self, pkg):
"Facts about packages' compiler prefs."
packages = spack.config.get('packages')
pkg_prefs = packages.get(pkg.name)
if ((not pkg_prefs) or ('compiler' not in pkg_prefs)):
return
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(compiler_list, key=(lambda x: (x.name, x.version)), reverse=True)
ppk = spack.package_prefs.PackagePrefs(pkg.name, 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for (i, cspec) in enumerate(reversed(matches)):
self.gen.fact(fn.node_compiler_preference(pkg.name, cspec.name, cspec.version, ((- i) * 100)))
| -6,119,435,165,651,928,000
|
Facts about packages' compiler prefs.
|
lib/spack/spack/solver/asp.py
|
package_compiler_defaults
|
AaltoSciComp/spack
|
python
|
def package_compiler_defaults(self, pkg):
packages = spack.config.get('packages')
pkg_prefs = packages.get(pkg.name)
if ((not pkg_prefs) or ('compiler' not in pkg_prefs)):
return
compiler_list = self.possible_compilers.copy()
compiler_list = sorted(compiler_list, key=(lambda x: (x.name, x.version)), reverse=True)
ppk = spack.package_prefs.PackagePrefs(pkg.name, 'compiler', all=False)
matches = sorted(compiler_list, key=ppk)
for (i, cspec) in enumerate(reversed(matches)):
self.gen.fact(fn.node_compiler_preference(pkg.name, cspec.name, cspec.version, ((- i) * 100)))
|
def condition(self, required_spec, imposed_spec=None, name=None):
'Generate facts for a dependency or virtual provider condition.\n\n Arguments:\n required_spec (spack.spec.Spec): the spec that triggers this condition\n imposed_spec (spack.spec.Spec or None): the sepc with constraints that\n are imposed when this condition is triggered\n name (str or None): name for `required_spec` (required if\n required_spec is anonymous, ignored if not)\n\n Returns:\n int: id of the condition created by this function\n '
named_cond = required_spec.copy()
named_cond.name = (named_cond.name or name)
assert named_cond.name, 'must provide name for anonymous condtions!'
condition_id = next(self._condition_id_counter)
self.gen.fact(fn.condition(condition_id))
requirements = self.checked_spec_clauses(named_cond, body=True, required_from=name)
for pred in requirements:
self.gen.fact(fn.condition_requirement(condition_id, pred.name, *pred.args))
if imposed_spec:
imposed_constraints = self.checked_spec_clauses(imposed_spec, body=False, required_from=name)
for pred in imposed_constraints:
if (pred.name in ('node', 'virtual_node')):
continue
self.gen.fact(fn.imposed_constraint(condition_id, pred.name, *pred.args))
return condition_id
| -6,618,896,958,326,488,000
|
Generate facts for a dependency or virtual provider condition.
Arguments:
required_spec (spack.spec.Spec): the spec that triggers this condition
imposed_spec (spack.spec.Spec or None): the sepc with constraints that
are imposed when this condition is triggered
name (str or None): name for `required_spec` (required if
required_spec is anonymous, ignored if not)
Returns:
int: id of the condition created by this function
|
lib/spack/spack/solver/asp.py
|
condition
|
AaltoSciComp/spack
|
python
|
def condition(self, required_spec, imposed_spec=None, name=None):
'Generate facts for a dependency or virtual provider condition.\n\n Arguments:\n required_spec (spack.spec.Spec): the spec that triggers this condition\n imposed_spec (spack.spec.Spec or None): the sepc with constraints that\n are imposed when this condition is triggered\n name (str or None): name for `required_spec` (required if\n required_spec is anonymous, ignored if not)\n\n Returns:\n int: id of the condition created by this function\n '
named_cond = required_spec.copy()
named_cond.name = (named_cond.name or name)
assert named_cond.name, 'must provide name for anonymous condtions!'
condition_id = next(self._condition_id_counter)
self.gen.fact(fn.condition(condition_id))
requirements = self.checked_spec_clauses(named_cond, body=True, required_from=name)
for pred in requirements:
self.gen.fact(fn.condition_requirement(condition_id, pred.name, *pred.args))
if imposed_spec:
imposed_constraints = self.checked_spec_clauses(imposed_spec, body=False, required_from=name)
for pred in imposed_constraints:
if (pred.name in ('node', 'virtual_node')):
continue
self.gen.fact(fn.imposed_constraint(condition_id, pred.name, *pred.args))
return condition_id
|
def package_dependencies_rules(self, pkg, tests):
"Translate 'depends_on' directives into ASP logic."
for (_, conditions) in sorted(pkg.dependencies.items()):
for (cond, dep) in sorted(conditions.items()):
deptypes = dep.type.copy()
if (not tests):
deptypes.discard('test')
if ((not isinstance(tests, bool)) and (pkg.name not in tests)):
deptypes.discard('test')
if (not deptypes):
continue
condition_id = self.condition(cond, dep.spec, pkg.name)
self.gen.fact(fn.dependency_condition(condition_id, pkg.name, dep.spec.name))
for t in sorted(deptypes):
self.gen.fact(fn.dependency_type(condition_id, t))
self.gen.newline()
| 6,123,517,379,699,171,000
|
Translate 'depends_on' directives into ASP logic.
|
lib/spack/spack/solver/asp.py
|
package_dependencies_rules
|
AaltoSciComp/spack
|
python
|
def package_dependencies_rules(self, pkg, tests):
for (_, conditions) in sorted(pkg.dependencies.items()):
for (cond, dep) in sorted(conditions.items()):
deptypes = dep.type.copy()
if (not tests):
deptypes.discard('test')
if ((not isinstance(tests, bool)) and (pkg.name not in tests)):
deptypes.discard('test')
if (not deptypes):
continue
condition_id = self.condition(cond, dep.spec, pkg.name)
self.gen.fact(fn.dependency_condition(condition_id, pkg.name, dep.spec.name))
for t in sorted(deptypes):
self.gen.fact(fn.dependency_type(condition_id, t))
self.gen.newline()
|
def virtual_preferences(self, pkg_name, func):
"Call func(vspec, provider, i) for each of pkg's provider prefs."
config = spack.config.get('packages')
pkg_prefs = config.get(pkg_name, {}).get('providers', {})
for (vspec, providers) in pkg_prefs.items():
if (vspec not in self.possible_virtuals):
continue
for (i, provider) in enumerate(providers):
provider_name = spack.spec.Spec(provider).name
func(vspec, provider_name, i)
| -4,192,218,378,398,953,000
|
Call func(vspec, provider, i) for each of pkg's provider prefs.
|
lib/spack/spack/solver/asp.py
|
virtual_preferences
|
AaltoSciComp/spack
|
python
|
def virtual_preferences(self, pkg_name, func):
config = spack.config.get('packages')
pkg_prefs = config.get(pkg_name, {}).get('providers', {})
for (vspec, providers) in pkg_prefs.items():
if (vspec not in self.possible_virtuals):
continue
for (i, provider) in enumerate(providers):
provider_name = spack.spec.Spec(provider).name
func(vspec, provider_name, i)
|
def external_packages(self):
'Facts on external packages, as read from packages.yaml'
packages_yaml = spack.config.get('packages')
packages_yaml = _normalize_packages_yaml(packages_yaml)
self.gen.h1('External packages')
for (pkg_name, data) in packages_yaml.items():
if (pkg_name == 'all'):
continue
if (pkg_name not in spack.repo.path):
continue
self.gen.h2('External package: {0}'.format(pkg_name))
external_buildable = data.get('buildable', True)
if (not external_buildable):
self.gen.fact(fn.external_only(pkg_name))
externals = data.get('externals', [])
external_specs = [spack.spec.Spec(x['spec']) for x in externals]
external_versions = [(x.version, external_id) for (external_id, x) in enumerate(external_specs)]
external_versions = [(v, idx, external_id) for (idx, (v, external_id)) in enumerate(sorted(external_versions, reverse=True))]
for (version, idx, external_id) in external_versions:
self.declared_versions[pkg_name].append(DeclaredVersion(version=version, idx=idx, origin=version_provenance.external))
for (local_idx, spec) in enumerate(external_specs):
condition_id = self.condition(spec)
self.gen.fact(fn.possible_external(condition_id, pkg_name, local_idx))
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
| 3,756,903,581,268,894,700
|
Facts on external packages, as read from packages.yaml
|
lib/spack/spack/solver/asp.py
|
external_packages
|
AaltoSciComp/spack
|
python
|
def external_packages(self):
packages_yaml = spack.config.get('packages')
packages_yaml = _normalize_packages_yaml(packages_yaml)
self.gen.h1('External packages')
for (pkg_name, data) in packages_yaml.items():
if (pkg_name == 'all'):
continue
if (pkg_name not in spack.repo.path):
continue
self.gen.h2('External package: {0}'.format(pkg_name))
external_buildable = data.get('buildable', True)
if (not external_buildable):
self.gen.fact(fn.external_only(pkg_name))
externals = data.get('externals', [])
external_specs = [spack.spec.Spec(x['spec']) for x in externals]
external_versions = [(x.version, external_id) for (external_id, x) in enumerate(external_specs)]
external_versions = [(v, idx, external_id) for (idx, (v, external_id)) in enumerate(sorted(external_versions, reverse=True))]
for (version, idx, external_id) in external_versions:
self.declared_versions[pkg_name].append(DeclaredVersion(version=version, idx=idx, origin=version_provenance.external))
for (local_idx, spec) in enumerate(external_specs):
condition_id = self.condition(spec)
self.gen.fact(fn.possible_external(condition_id, pkg_name, local_idx))
self.possible_versions[spec.name].add(spec.version)
self.gen.newline()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.