body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@callback
def record_call(service):
'Add recorded event to set.'
calls.append(service)
| 7,588,650,191,437,550,000
|
Add recorded event to set.
|
tests/components/test_script.py
|
record_call
|
27tech/home-assistant
|
python
|
@callback
def record_call(service):
calls.append(service)
|
def tokenize(text: str) -> Iterator[str]:
'return iterable of uppercased words'
for match in RE_WORD.finditer(text):
(yield match.group().upper())
| 8,251,342,383,710,335,000
|
return iterable of uppercased words
|
08-def-type-hints/charindex.py
|
tokenize
|
eumiro/example-code-2e
|
python
|
def tokenize(text: str) -> Iterator[str]:
for match in RE_WORD.finditer(text):
(yield match.group().upper())
|
def transform(self, payload: Dict[(str, Any)], metadata: Optional[Dict[(str, Any)]]=None):
'\n The mapping is done in 4 major steps:\n\n 1. Flattens the data.\n 2. Metadata Replacers:\n Some key mapping parameters are specified in the metadata. Keys that have placeholders like\n ${metadata_key} will be substituted by values on the specified metadata key.\n 3. Map Data.\n In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the\n payload and self._config.mapping have matching keys. Maybe not all keys in payload are in\n self._config.mapping, in which case we choose what to do with those extra keys with the config\n self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present\n in the payload, the configuration self._config.ignore_missing_data chooses what should be done.\n 4. Unflattens the data.\n :return: transformed and restructured data.\n '
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if (metadata is not None):
for (meta_key, meta_value) in metadata.items():
map_key = map_key.replace((('@{' + meta_key) + '}'), str(meta_value))
map_value = map_value.replace((('@{' + meta_key) + '}'), str(meta_value))
translated_dict[map_value] = flat_data[map_key]
if (not self._config.ignore_missing_data):
missing_keys = (map_keys_set - flat_data.keys())
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in (flat_data.keys() - self._config.mapping.keys()):
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return (translated_dict, metadata)
if (metadata is None):
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
| -5,254,253,032,800,944,000
|
The mapping is done in 4 major steps:
1. Flattens the data.
2. Metadata Replacers:
Some key mapping parameters are specified in the metadata. Keys that have placeholders like
${metadata_key} will be substituted by values on the specified metadata key.
3. Map Data.
In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the
payload and self._config.mapping have matching keys. Maybe not all keys in payload are in
self._config.mapping, in which case we choose what to do with those extra keys with the config
self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present
in the payload, the configuration self._config.ignore_missing_data chooses what should be done.
4. Unflattens the data.
:return: transformed and restructured data.
|
transformer/transformers/map_keys.py
|
transform
|
santunioni/Transformer
|
python
|
def transform(self, payload: Dict[(str, Any)], metadata: Optional[Dict[(str, Any)]]=None):
'\n The mapping is done in 4 major steps:\n\n 1. Flattens the data.\n 2. Metadata Replacers:\n Some key mapping parameters are specified in the metadata. Keys that have placeholders like\n ${metadata_key} will be substituted by values on the specified metadata key.\n 3. Map Data.\n In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the\n payload and self._config.mapping have matching keys. Maybe not all keys in payload are in\n self._config.mapping, in which case we choose what to do with those extra keys with the config\n self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present\n in the payload, the configuration self._config.ignore_missing_data chooses what should be done.\n 4. Unflattens the data.\n :return: transformed and restructured data.\n '
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if (metadata is not None):
for (meta_key, meta_value) in metadata.items():
map_key = map_key.replace((('@{' + meta_key) + '}'), str(meta_value))
map_value = map_value.replace((('@{' + meta_key) + '}'), str(meta_value))
translated_dict[map_value] = flat_data[map_key]
if (not self._config.ignore_missing_data):
missing_keys = (map_keys_set - flat_data.keys())
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in (flat_data.keys() - self._config.mapping.keys()):
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return (translated_dict, metadata)
if (metadata is None):
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
|
def nmf(Y, A, S, W=None, prox_A=operators.prox_plus, prox_S=operators.prox_plus, proxs_g=None, steps_g=None, Ls=None, slack=0.9, update_order=None, steps_g_update='steps_f', max_iter=1000, e_rel=0.001, e_abs=0, traceback=None):
'Non-negative matrix factorization.\n\n This method solves the NMF problem\n minimize || Y - AS ||_2^2\n under an arbitrary number of constraints on A and/or S.\n\n Args:\n Y: target matrix MxN\n A: initial amplitude matrix MxK, will be updated\n S: initial source matrix KxN, will be updated\n W: (optional weight matrix MxN)\n prox_A: direct projection contraint of A\n prox_S: direct projection constraint of S\n proxs_g: list of constraints for A or S for ADMM-type optimization\n [[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]\n steps_g: specific value of step size for proxs_g (experts only!)\n Ls: list of linear operators for the constraint functions proxs_g\n If set, needs to have same format as proxs_g.\n Matrices can be numpy.array, scipy.sparse, or None (for identity).\n slack: tolerance for (re)evaluation of Lipschitz constants\n See Steps_AS() for details.\n update_order: list of factor indices in update order\n j=0 -> A, j=1 -> S\n max_iter: maximum iteration number, irrespective of current residuals\n e_rel: relative error threshold for primal and dual residuals\n e_abs: absolute error threshold for primal and dual residuals\n traceback: utils.Traceback to hold variable histories\n\n Returns:\n converged: convence test for A,S\n errors: difference between latest and previous iterations for A,S\n\n See also:\n algorithms.bsdmm for update_order and steps_g_update\n utils.AcceleratedProxF for Nesterov acceleration\n\n Reference:\n Moolekamp & Melchior, 2017 (arXiv:1708.09066)\n\n '
if (W is not None):
WA = normalizeMatrix(W, 1)
WS = normalizeMatrix(W, 0)
else:
WA = WS = 1
steps_f = Steps_AS(WA=WA, WS=WS, slack=slack)
from functools import partial
f = partial(prox_likelihood, Y=Y, WA=WA, WS=WS, prox_S=prox_S, prox_A=prox_A)
X = [A, S]
if ((proxs_g is None) or (not utils.hasNotNone(proxs_g))):
return algorithms.bpgm(X, f, steps_f, accelerated=True, update_order=update_order, max_iter=max_iter, e_rel=e_rel, traceback=traceback)
else:
return algorithms.bsdmm(X, f, steps_f, proxs_g, steps_g=steps_g, Ls=Ls, update_order=update_order, steps_g_update=steps_g_update, max_iter=max_iter, e_rel=e_rel, e_abs=e_abs, traceback=traceback)
| -1,810,764,077,884,436,500
|
Non-negative matrix factorization.
This method solves the NMF problem
minimize || Y - AS ||_2^2
under an arbitrary number of constraints on A and/or S.
Args:
Y: target matrix MxN
A: initial amplitude matrix MxK, will be updated
S: initial source matrix KxN, will be updated
W: (optional weight matrix MxN)
prox_A: direct projection contraint of A
prox_S: direct projection constraint of S
proxs_g: list of constraints for A or S for ADMM-type optimization
[[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]
steps_g: specific value of step size for proxs_g (experts only!)
Ls: list of linear operators for the constraint functions proxs_g
If set, needs to have same format as proxs_g.
Matrices can be numpy.array, scipy.sparse, or None (for identity).
slack: tolerance for (re)evaluation of Lipschitz constants
See Steps_AS() for details.
update_order: list of factor indices in update order
j=0 -> A, j=1 -> S
max_iter: maximum iteration number, irrespective of current residuals
e_rel: relative error threshold for primal and dual residuals
e_abs: absolute error threshold for primal and dual residuals
traceback: utils.Traceback to hold variable histories
Returns:
converged: convence test for A,S
errors: difference between latest and previous iterations for A,S
See also:
algorithms.bsdmm for update_order and steps_g_update
utils.AcceleratedProxF for Nesterov acceleration
Reference:
Moolekamp & Melchior, 2017 (arXiv:1708.09066)
|
proxmin/nmf.py
|
nmf
|
herjy/proxmin
|
python
|
def nmf(Y, A, S, W=None, prox_A=operators.prox_plus, prox_S=operators.prox_plus, proxs_g=None, steps_g=None, Ls=None, slack=0.9, update_order=None, steps_g_update='steps_f', max_iter=1000, e_rel=0.001, e_abs=0, traceback=None):
'Non-negative matrix factorization.\n\n This method solves the NMF problem\n minimize || Y - AS ||_2^2\n under an arbitrary number of constraints on A and/or S.\n\n Args:\n Y: target matrix MxN\n A: initial amplitude matrix MxK, will be updated\n S: initial source matrix KxN, will be updated\n W: (optional weight matrix MxN)\n prox_A: direct projection contraint of A\n prox_S: direct projection constraint of S\n proxs_g: list of constraints for A or S for ADMM-type optimization\n [[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]\n steps_g: specific value of step size for proxs_g (experts only!)\n Ls: list of linear operators for the constraint functions proxs_g\n If set, needs to have same format as proxs_g.\n Matrices can be numpy.array, scipy.sparse, or None (for identity).\n slack: tolerance for (re)evaluation of Lipschitz constants\n See Steps_AS() for details.\n update_order: list of factor indices in update order\n j=0 -> A, j=1 -> S\n max_iter: maximum iteration number, irrespective of current residuals\n e_rel: relative error threshold for primal and dual residuals\n e_abs: absolute error threshold for primal and dual residuals\n traceback: utils.Traceback to hold variable histories\n\n Returns:\n converged: convence test for A,S\n errors: difference between latest and previous iterations for A,S\n\n See also:\n algorithms.bsdmm for update_order and steps_g_update\n utils.AcceleratedProxF for Nesterov acceleration\n\n Reference:\n Moolekamp & Melchior, 2017 (arXiv:1708.09066)\n\n '
if (W is not None):
WA = normalizeMatrix(W, 1)
WS = normalizeMatrix(W, 0)
else:
WA = WS = 1
steps_f = Steps_AS(WA=WA, WS=WS, slack=slack)
from functools import partial
f = partial(prox_likelihood, Y=Y, WA=WA, WS=WS, prox_S=prox_S, prox_A=prox_A)
X = [A, S]
if ((proxs_g is None) or (not utils.hasNotNone(proxs_g))):
return algorithms.bpgm(X, f, steps_f, accelerated=True, update_order=update_order, max_iter=max_iter, e_rel=e_rel, traceback=traceback)
else:
return algorithms.bsdmm(X, f, steps_f, proxs_g, steps_g=steps_g, Ls=Ls, update_order=update_order, steps_g_update=steps_g_update, max_iter=max_iter, e_rel=e_rel, e_abs=e_abs, traceback=traceback)
|
def __init__(self, WA=1, WS=1, slack=0.1, max_stride=100):
'Helper class to compute the Lipschitz constants of grad f.\n\n The __call__ function compute the spectral norms of A or S, which\n determine the Lipschitz constant of the respective update steps.\n\n If a weight matrix is used, the stepsize will be upper bounded by\n assuming the maximum value of the weights. In the case of varying\n weights, it is generally advised to normalize the weight matrix\n differently for the A and S updates, therefore two maximum numbers\n (WAMax, WSmax) can be set.\n\n Because the spectral norm is expensive to compute, it will only update\n the step_size if relative changes of L exceed slack/2.\n If not, which is usually the case after only a few iterations, it will\n report a previous value for the next several iterations. The stride\n between updates is set by\n stride -> stride * (slack/2 / rel_error\n i.e. it increases more strongly if the rel_error is much below the\n slack budget.\n '
import scipy.sparse
if (WA is 1):
self.WA = WA
else:
self.WA = scipy.sparse.diags(WA.reshape((- 1)))
if (WS is 1):
self.WS = WS
else:
self.WS = scipy.sparse.diags(WS.reshape((- 1)))
self._cb = [utils.ApproximateCache(self._one_over_lipschitzA, slack=slack, max_stride=max_stride), utils.ApproximateCache(self._one_over_lipschitzS, slack=slack, max_stride=max_stride)]
| 6,426,688,991,894,909,000
|
Helper class to compute the Lipschitz constants of grad f.
The __call__ function compute the spectral norms of A or S, which
determine the Lipschitz constant of the respective update steps.
If a weight matrix is used, the stepsize will be upper bounded by
assuming the maximum value of the weights. In the case of varying
weights, it is generally advised to normalize the weight matrix
differently for the A and S updates, therefore two maximum numbers
(WAMax, WSmax) can be set.
Because the spectral norm is expensive to compute, it will only update
the step_size if relative changes of L exceed slack/2.
If not, which is usually the case after only a few iterations, it will
report a previous value for the next several iterations. The stride
between updates is set by
stride -> stride * (slack/2 / rel_error
i.e. it increases more strongly if the rel_error is much below the
slack budget.
|
proxmin/nmf.py
|
__init__
|
herjy/proxmin
|
python
|
def __init__(self, WA=1, WS=1, slack=0.1, max_stride=100):
'Helper class to compute the Lipschitz constants of grad f.\n\n The __call__ function compute the spectral norms of A or S, which\n determine the Lipschitz constant of the respective update steps.\n\n If a weight matrix is used, the stepsize will be upper bounded by\n assuming the maximum value of the weights. In the case of varying\n weights, it is generally advised to normalize the weight matrix\n differently for the A and S updates, therefore two maximum numbers\n (WAMax, WSmax) can be set.\n\n Because the spectral norm is expensive to compute, it will only update\n the step_size if relative changes of L exceed slack/2.\n If not, which is usually the case after only a few iterations, it will\n report a previous value for the next several iterations. The stride\n between updates is set by\n stride -> stride * (slack/2 / rel_error\n i.e. it increases more strongly if the rel_error is much below the\n slack budget.\n '
import scipy.sparse
if (WA is 1):
self.WA = WA
else:
self.WA = scipy.sparse.diags(WA.reshape((- 1)))
if (WS is 1):
self.WS = WS
else:
self.WS = scipy.sparse.diags(WS.reshape((- 1)))
self._cb = [utils.ApproximateCache(self._one_over_lipschitzA, slack=slack, max_stride=max_stride), utils.ApproximateCache(self._one_over_lipschitzS, slack=slack, max_stride=max_stride)]
|
def __init__(self, encoder_type=None, encoder_name=None, decoder_name=None, encoder_decoder_type=None, encoder_decoder_name=None, config=None, args=None, use_cuda=True, cuda_device=(- 1), **kwargs):
'\n Initializes a Seq2SeqModel.\n\n Args:\n encoder_type (optional): The type of model to use as the encoder.\n encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n Must be the same "size" as the encoder model (base/base, large/large, etc.)\n encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)\n encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.\n config (optional): A configuration file to build an EncoderDecoderModel.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the \'from_pretrained\' implementation where this will be supplied.\n '
if (not config):
if (not ((encoder_name and decoder_name) or encoder_decoder_name)):
raise ValueError('You must specify a Seq2Seq config \t OR \tencoder_type, encoder_name, and decoder_name OR \t \tencoder_type and encoder_decoder_name')
elif (not (encoder_type or encoder_decoder_type)):
raise ValueError('You must specify a Seq2Seq config \t OR \tencoder_type, encoder_name, and decoder_name \t OR \tencoder_type and encoder_decoder_name')
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if ('sweep_config' in kwargs):
sweep_config = kwargs.pop('sweep_config')
sweep_values = {key: value['value'] for (key, value) in sweep_config.as_dict().items() if (key != '_wandb')}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if (self.args.n_gpu > 0):
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if (cuda_device == (- 1)):
self.device = torch.device('cuda')
else:
self.device = torch.device(f'cuda:{cuda_device}')
else:
raise ValueError("'use_cuda' set to True when cuda is unavailable.Make sure CUDA is available or set `use_cuda=False`.")
else:
self.device = 'cpu'
self.results = {}
if (not use_cuda):
self.args.fp16 = False
if encoder_decoder_type:
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[encoder_decoder_type]
else:
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[encoder_type]
if (encoder_decoder_type in ['bart', 'marian']):
self.model = model_class.from_pretrained(encoder_decoder_name)
if (encoder_decoder_type == 'bart'):
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif (encoder_decoder_type == 'marian'):
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(os.path.join(encoder_decoder_name, 'encoder'), os.path.join(encoder_decoder_name, 'decoder'))
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, 'encoder'))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, 'decoder'))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, 'encoder'))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, 'decoder'))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(encoder_name, decoder_name, config=config)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if (self.args.wandb_project and (not wandb_available)):
warnings.warn('wandb_project specified but wandb is not available. Wandb disabled.')
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
self.args.base_marian_model_name = encoder_decoder_name
elif (encoder_name and decoder_name):
self.args.model_name = ((encoder_name + '-') + decoder_name)
else:
self.args.model_name = 'encoder-decoder'
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = (encoder_type + '-bert')
else:
self.args.model_type = 'encoder-decoder'
| 5,702,111,848,489,322,000
|
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
|
simpletransformers/seq2seq/seq2seq_model.py
|
__init__
|
AliOsm/simpletransformers
|
python
|
def __init__(self, encoder_type=None, encoder_name=None, decoder_name=None, encoder_decoder_type=None, encoder_decoder_name=None, config=None, args=None, use_cuda=True, cuda_device=(- 1), **kwargs):
'\n Initializes a Seq2SeqModel.\n\n Args:\n encoder_type (optional): The type of model to use as the encoder.\n encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.\n Must be the same "size" as the encoder model (base/base, large/large, etc.)\n encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)\n encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.\n config (optional): A configuration file to build an EncoderDecoderModel.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the \'from_pretrained\' implementation where this will be supplied.\n '
if (not config):
if (not ((encoder_name and decoder_name) or encoder_decoder_name)):
raise ValueError('You must specify a Seq2Seq config \t OR \tencoder_type, encoder_name, and decoder_name OR \t \tencoder_type and encoder_decoder_name')
elif (not (encoder_type or encoder_decoder_type)):
raise ValueError('You must specify a Seq2Seq config \t OR \tencoder_type, encoder_name, and decoder_name \t OR \tencoder_type and encoder_decoder_name')
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if ('sweep_config' in kwargs):
sweep_config = kwargs.pop('sweep_config')
sweep_values = {key: value['value'] for (key, value) in sweep_config.as_dict().items() if (key != '_wandb')}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if (self.args.n_gpu > 0):
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if (cuda_device == (- 1)):
self.device = torch.device('cuda')
else:
self.device = torch.device(f'cuda:{cuda_device}')
else:
raise ValueError("'use_cuda' set to True when cuda is unavailable.Make sure CUDA is available or set `use_cuda=False`.")
else:
self.device = 'cpu'
self.results = {}
if (not use_cuda):
self.args.fp16 = False
if encoder_decoder_type:
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[encoder_decoder_type]
else:
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[encoder_type]
if (encoder_decoder_type in ['bart', 'marian']):
self.model = model_class.from_pretrained(encoder_decoder_name)
if (encoder_decoder_type == 'bart'):
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif (encoder_decoder_type == 'marian'):
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(os.path.join(encoder_decoder_name, 'encoder'), os.path.join(encoder_decoder_name, 'decoder'))
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, 'encoder'))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, 'decoder'))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, 'encoder'))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, 'decoder'))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(encoder_name, decoder_name, config=config)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if (self.args.wandb_project and (not wandb_available)):
warnings.warn('wandb_project specified but wandb is not available. Wandb disabled.')
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
self.args.base_marian_model_name = encoder_decoder_name
elif (encoder_name and decoder_name):
self.args.model_name = ((encoder_name + '-') + decoder_name)
else:
self.args.model_name = 'encoder-decoder'
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = (encoder_type + '-bert')
else:
self.args.model_type = 'encoder-decoder'
|
def train_model(self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs):
"\n Trains the model using 'train_data'\n\n Args:\n train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.\n\n Returns:\n None\n "
if args:
self.args.update_from_dict(args)
if (self.args.evaluate_during_training and (eval_data is None)):
raise ValueError('evaluate_during_training is enabled but eval_data is not specified. Pass eval_data to model.train_model() if using evaluate_during_training.')
if (not output_dir):
output_dir = self.args.output_dir
if (os.path.exists(output_dir) and os.listdir(output_dir) and (not self.args.overwrite_output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty. Set args.overwrite_output_dir = True to overcome.'.format(output_dir))
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
(global_step, tr_loss) = self.train(train_dataset, output_dir, show_running_loss=show_running_loss, eval_data=eval_data, verbose=verbose, **kwargs)
self._save_model(self.args.output_dir, model=self.model)
if verbose:
logger.info(' Training of {} model complete. Saved to {}.'.format(self.args.model_name, output_dir))
| -3,020,603,917,038,356,000
|
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
|
simpletransformers/seq2seq/seq2seq_model.py
|
train_model
|
AliOsm/simpletransformers
|
python
|
def train_model(self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs):
"\n Trains the model using 'train_data'\n\n Args:\n train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.\n\n Returns:\n None\n "
if args:
self.args.update_from_dict(args)
if (self.args.evaluate_during_training and (eval_data is None)):
raise ValueError('evaluate_during_training is enabled but eval_data is not specified. Pass eval_data to model.train_model() if using evaluate_during_training.')
if (not output_dir):
output_dir = self.args.output_dir
if (os.path.exists(output_dir) and os.listdir(output_dir) and (not self.args.overwrite_output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty. Set args.overwrite_output_dir = True to overcome.'.format(output_dir))
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
(global_step, tr_loss) = self.train(train_dataset, output_dir, show_running_loss=show_running_loss, eval_data=eval_data, verbose=verbose, **kwargs)
self._save_model(self.args.output_dir, model=self.model)
if verbose:
logger.info(' Training of {} model complete. Saved to {}.'.format(self.args.model_name, output_dir))
|
def train(self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs):
'\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n '
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=self.args.dataloader_num_workers)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop('params')
custom_parameter_names.update(params)
param_group = {**group}
param_group['params'] = [p for (n, p) in model.named_parameters() if (n in params)]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop('layer')
layer = f'layer.{layer_number}.'
group_d = {**group}
group_nd = {**group}
group_nd['weight_decay'] = 0.0
params_d = []
params_nd = []
for (n, p) in model.named_parameters():
if ((n not in custom_parameter_names) and (layer in n)):
if any(((nd in n) for nd in no_decay)):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d['params'] = params_d
group_nd['params'] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if (not self.args.train_custom_parameters_only):
optimizer_grouped_parameters.extend([{'params': [p for (n, p) in model.named_parameters() if ((n not in custom_parameter_names) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if ((n not in custom_parameter_names) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}])
warmup_steps = math.ceil((t_total * args.warmup_ratio))
args.warmup_steps = (warmup_steps if (args.warmup_steps == 0) else args.warmup_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (args.model_name and os.path.isfile(os.path.join(args.model_name, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, 'scheduler.pt')))
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
logger.info(' Training started')
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if (args.model_name and os.path.exists(args.model_name)):
try:
checkpoint_suffix = args.model_name.split('/')[(- 1)].split('-')
if (len(checkpoint_suffix) > 2):
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[(- 1)]
global_step = int(checkpoint_suffix)
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the current epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if (epochs_trained > 0):
epochs_trained -= 1
continue
train_iterator.set_description(f'Epoch {(epoch_number + 1)} of {args.num_train_epochs}')
batch_iterator = tqdm(train_dataloader, desc=f'Running Epoch {epoch_number} of {args.num_train_epochs}', disable=args.silent, mininterval=0)
for (step, batch) in enumerate(batch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(f'Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}')
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log({'Training loss': current_loss, 'lr': scheduler.get_lr()[0], 'global_step': global_step})
if ((args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir_current = os.path.join(output_dir, 'checkpoint-{}'.format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if (args.evaluate_during_training and ((args.evaluate_during_training_steps > 0) and ((global_step % args.evaluate_during_training_steps) == 0))):
results = self.eval_model(eval_data, verbose=(verbose and args.evaluate_during_training_verbose), silent=args.evaluate_during_training_silent, **kwargs)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
output_dir_current = os.path.join(output_dir, 'checkpoint-{}'.format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores['global_step'].append(global_step)
training_progress_scores['train_loss'].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, 'training_progress_scores.csv'), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if (not best_eval_metric):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if (best_eval_metric and args.early_stopping_metric_minimize):
if ((results[args.early_stopping_metric] - best_eval_metric) < args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif args.use_early_stopping:
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
elif ((results[args.early_stopping_metric] - best_eval_metric) > args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif args.use_early_stopping:
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
epoch_number += 1
output_dir_current = os.path.join(output_dir, 'checkpoint-{}-epoch-{}'.format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training):
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(eval_data, verbose=(verbose and args.evaluate_during_training_verbose), silent=args.evaluate_during_training_silent, **kwargs)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores['global_step'].append(global_step)
training_progress_scores['train_loss'].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, 'training_progress_scores.csv'), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if (not best_eval_metric):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if (best_eval_metric and args.early_stopping_metric_minimize):
if ((results[args.early_stopping_metric] - best_eval_metric) < args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif (args.use_early_stopping and args.early_stopping_consider_epochs):
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
elif ((results[args.early_stopping_metric] - best_eval_metric) > args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif (args.use_early_stopping and args.early_stopping_consider_epochs):
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
return (global_step, (tr_loss / global_step))
| -7,693,660,778,794,015,000
|
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
|
simpletransformers/seq2seq/seq2seq_model.py
|
train
|
AliOsm/simpletransformers
|
python
|
def train(self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs):
'\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n '
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=self.args.dataloader_num_workers)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop('params')
custom_parameter_names.update(params)
param_group = {**group}
param_group['params'] = [p for (n, p) in model.named_parameters() if (n in params)]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop('layer')
layer = f'layer.{layer_number}.'
group_d = {**group}
group_nd = {**group}
group_nd['weight_decay'] = 0.0
params_d = []
params_nd = []
for (n, p) in model.named_parameters():
if ((n not in custom_parameter_names) and (layer in n)):
if any(((nd in n) for nd in no_decay)):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d['params'] = params_d
group_nd['params'] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if (not self.args.train_custom_parameters_only):
optimizer_grouped_parameters.extend([{'params': [p for (n, p) in model.named_parameters() if ((n not in custom_parameter_names) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if ((n not in custom_parameter_names) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}])
warmup_steps = math.ceil((t_total * args.warmup_ratio))
args.warmup_steps = (warmup_steps if (args.warmup_steps == 0) else args.warmup_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (args.model_name and os.path.isfile(os.path.join(args.model_name, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, 'scheduler.pt')))
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
logger.info(' Training started')
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if (args.model_name and os.path.exists(args.model_name)):
try:
checkpoint_suffix = args.model_name.split('/')[(- 1)].split('-')
if (len(checkpoint_suffix) > 2):
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[(- 1)]
global_step = int(checkpoint_suffix)
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the current epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if (epochs_trained > 0):
epochs_trained -= 1
continue
train_iterator.set_description(f'Epoch {(epoch_number + 1)} of {args.num_train_epochs}')
batch_iterator = tqdm(train_dataloader, desc=f'Running Epoch {epoch_number} of {args.num_train_epochs}', disable=args.silent, mininterval=0)
for (step, batch) in enumerate(batch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
loss = outputs[0]
else:
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(f'Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}')
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log({'Training loss': current_loss, 'lr': scheduler.get_lr()[0], 'global_step': global_step})
if ((args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir_current = os.path.join(output_dir, 'checkpoint-{}'.format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if (args.evaluate_during_training and ((args.evaluate_during_training_steps > 0) and ((global_step % args.evaluate_during_training_steps) == 0))):
results = self.eval_model(eval_data, verbose=(verbose and args.evaluate_during_training_verbose), silent=args.evaluate_during_training_silent, **kwargs)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
output_dir_current = os.path.join(output_dir, 'checkpoint-{}'.format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores['global_step'].append(global_step)
training_progress_scores['train_loss'].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, 'training_progress_scores.csv'), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if (not best_eval_metric):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if (best_eval_metric and args.early_stopping_metric_minimize):
if ((results[args.early_stopping_metric] - best_eval_metric) < args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif args.use_early_stopping:
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
elif ((results[args.early_stopping_metric] - best_eval_metric) > args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif args.use_early_stopping:
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
epoch_number += 1
output_dir_current = os.path.join(output_dir, 'checkpoint-{}-epoch-{}'.format(global_step, epoch_number))
if (args.save_model_every_epoch or args.evaluate_during_training):
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(eval_data, verbose=(verbose and args.evaluate_during_training_verbose), silent=args.evaluate_during_training_silent, **kwargs)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores['global_step'].append(global_step)
training_progress_scores['train_loss'].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, 'training_progress_scores.csv'), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if (not best_eval_metric):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if (best_eval_metric and args.early_stopping_metric_minimize):
if ((results[args.early_stopping_metric] - best_eval_metric) < args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif (args.use_early_stopping and args.early_stopping_consider_epochs):
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
elif ((results[args.early_stopping_metric] - best_eval_metric) > args.early_stopping_delta):
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
elif (args.use_early_stopping and args.early_stopping_consider_epochs):
if (early_stopping_counter < args.early_stopping_patience):
early_stopping_counter += 1
if verbose:
logger.info(f' No improvement in {args.early_stopping_metric}')
logger.info(f' Current step: {early_stopping_counter}')
logger.info(f' Early stopping patience: {args.early_stopping_patience}')
else:
if verbose:
logger.info(f' Patience of {args.early_stopping_patience} steps reached')
logger.info(' Training terminated.')
train_iterator.close()
return (global_step, (tr_loss / global_step))
return (global_step, (tr_loss / global_step))
|
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
'\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n Returns:\n results: Dictionary containing evaluation results.\n '
if (not output_dir):
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data['input_text'].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data['target_text'].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
| -2,470,111,290,407,275,000
|
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
|
simpletransformers/seq2seq/seq2seq_model.py
|
eval_model
|
AliOsm/simpletransformers
|
python
|
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
'\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.\n - `input_text`: The input text sequence.\n - `target_text`: The target text sequence.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n Returns:\n results: Dictionary containing evaluation results.\n '
if (not output_dir):
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data['input_text'].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data['target_text'].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
|
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
'\n Evaluates the model on eval_dataset.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n '
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=(args.silent or silent), desc='Running Evaluation'):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
results['eval_loss'] = eval_loss
output_eval_file = os.path.join(eval_output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
for key in sorted(results.keys()):
writer.write('{} = {}\n'.format(key, str(results[key])))
return results
| -3,573,907,752,735,053,000
|
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
|
simpletransformers/seq2seq/seq2seq_model.py
|
evaluate
|
AliOsm/simpletransformers
|
python
|
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
'\n Evaluates the model on eval_dataset.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n '
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=(args.silent or silent), desc='Running Evaluation'):
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
results['eval_loss'] = eval_loss
output_eval_file = os.path.join(eval_output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
for key in sorted(results.keys()):
writer.write('{} = {}\n'.format(key, str(results[key])))
return results
|
def predict(self, to_predict):
'\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.\n\n Returns:\n preds: A python list of the generated sequences.\n '
self._move_model_to_device()
all_outputs = []
for batch in [to_predict[i:(i + self.args.eval_batch_size)] for i in range(0, len(to_predict), self.args.eval_batch_size)]:
if (self.args.model_type == 'marian'):
input_ids = self.encoder_tokenizer.prepare_translation_batch(batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors='pt')['input_ids']
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors='pt')['input_ids']
input_ids = input_ids.to(self.device)
if (self.args.model_type in ['bart', 'marian']):
outputs = self.model.generate(input_ids=input_ids, num_beams=self.args.num_beams, max_length=self.args.max_length, length_penalty=self.args.length_penalty, early_stopping=self.args.early_stopping, repetition_penalty=self.args.repetition_penalty, do_sample=self.args.do_sample, top_k=self.args.top_k, top_p=self.args.top_p, num_return_sequences=self.args.num_return_sequences)
else:
outputs = self.model.generate(input_ids=input_ids, decoder_start_token_id=self.model.config.decoder.pad_token_id, num_beams=self.args.num_beams, max_length=self.args.max_length, length_penalty=self.args.length_penalty, early_stopping=self.args.early_stopping, repetition_penalty=self.args.repetition_penalty, do_sample=self.args.do_sample, top_k=self.args.top_k, top_p=self.args.top_p, num_return_sequences=self.args.num_return_sequences)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to('cpu')
with Pool(self.args.process_count) as p:
outputs = list(tqdm(p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize), total=len(all_outputs), desc='Decoding outputs', disable=self.args.silent))
self._move_model_to_device()
else:
outputs = [self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for output_id in all_outputs]
if (self.args.num_return_sequences > 1):
return [outputs[i:(i + self.args.num_return_sequences)] for i in range(0, len(outputs), self.args.num_return_sequences)]
else:
return outputs
| 7,405,487,662,115,485,000
|
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
|
simpletransformers/seq2seq/seq2seq_model.py
|
predict
|
AliOsm/simpletransformers
|
python
|
def predict(self, to_predict):
'\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.\n\n Returns:\n preds: A python list of the generated sequences.\n '
self._move_model_to_device()
all_outputs = []
for batch in [to_predict[i:(i + self.args.eval_batch_size)] for i in range(0, len(to_predict), self.args.eval_batch_size)]:
if (self.args.model_type == 'marian'):
input_ids = self.encoder_tokenizer.prepare_translation_batch(batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors='pt')['input_ids']
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors='pt')['input_ids']
input_ids = input_ids.to(self.device)
if (self.args.model_type in ['bart', 'marian']):
outputs = self.model.generate(input_ids=input_ids, num_beams=self.args.num_beams, max_length=self.args.max_length, length_penalty=self.args.length_penalty, early_stopping=self.args.early_stopping, repetition_penalty=self.args.repetition_penalty, do_sample=self.args.do_sample, top_k=self.args.top_k, top_p=self.args.top_p, num_return_sequences=self.args.num_return_sequences)
else:
outputs = self.model.generate(input_ids=input_ids, decoder_start_token_id=self.model.config.decoder.pad_token_id, num_beams=self.args.num_beams, max_length=self.args.max_length, length_penalty=self.args.length_penalty, early_stopping=self.args.early_stopping, repetition_penalty=self.args.repetition_penalty, do_sample=self.args.do_sample, top_k=self.args.top_k, top_p=self.args.top_p, num_return_sequences=self.args.num_return_sequences)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to('cpu')
with Pool(self.args.process_count) as p:
outputs = list(tqdm(p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize), total=len(all_outputs), desc='Decoding outputs', disable=self.args.silent))
self._move_model_to_device()
else:
outputs = [self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True) for output_id in all_outputs]
if (self.args.num_return_sequences > 1):
return [outputs[i:(i + self.args.num_return_sequences)] for i in range(0, len(outputs), self.args.num_return_sequences)]
else:
return outputs
|
def compute_metrics(self, labels, preds, **kwargs):
'\n Computes the evaluation metrics for the model predictions.\n\n Args:\n labels: List of target sequences\n preds: List of model generated outputs\n **kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n\n Returns:\n result: Dictionary containing evaluation results.\n '
results = {}
for (metric, func) in kwargs.items():
results[metric] = func(labels, preds)
return results
| 5,236,419,145,034,337,000
|
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
|
simpletransformers/seq2seq/seq2seq_model.py
|
compute_metrics
|
AliOsm/simpletransformers
|
python
|
def compute_metrics(self, labels, preds, **kwargs):
'\n Computes the evaluation metrics for the model predictions.\n\n Args:\n labels: List of target sequences\n preds: List of model generated outputs\n **kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n\n Returns:\n result: Dictionary containing evaluation results.\n '
results = {}
for (metric, func) in kwargs.items():
results[metric] = func(labels, preds)
return results
|
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
'\n Creates a T5Dataset from data.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n '
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if (not no_cache):
no_cache = args.no_cache
if (not no_cache):
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = ('dev' if evaluate else 'train')
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
elif (args.model_type in ['bart', 'marian']):
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode)
| -7,127,475,467,697,222,000
|
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
|
simpletransformers/seq2seq/seq2seq_model.py
|
load_and_cache_examples
|
AliOsm/simpletransformers
|
python
|
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
'\n Creates a T5Dataset from data.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n '
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if (not no_cache):
no_cache = args.no_cache
if (not no_cache):
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = ('dev' if evaluate else 'train')
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
elif (args.model_type in ['bart', 'marian']):
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode)
|
def test_create_user_with_email_successful(self):
'이메일로 유저 생성을 성공하는 테스트'
email = 'example@example.com'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
| -1,282,299,646,816,919,600
|
이메일로 유저 생성을 성공하는 테스트
|
shoppingmall/core/tests/test_models.py
|
test_create_user_with_email_successful
|
jacobjlee/simple-shopping
|
python
|
def test_create_user_with_email_successful(self):
email = 'example@example.com'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
|
def test_new_user_email_normalized(self):
'이메일이 표준 형식으로 들어오는 테스트'
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'testpw123')
self.assertEqual(user.email, email.lower())
| 5,622,624,492,197,440,000
|
이메일이 표준 형식으로 들어오는 테스트
|
shoppingmall/core/tests/test_models.py
|
test_new_user_email_normalized
|
jacobjlee/simple-shopping
|
python
|
def test_new_user_email_normalized(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'testpw123')
self.assertEqual(user.email, email.lower())
|
def test_new_user_missing_email(self):
'이메일이 입력되지 않았을 때 에러가 발생하는 테스트'
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'testpw123')
| -4,798,733,096,387,014,000
|
이메일이 입력되지 않았을 때 에러가 발생하는 테스트
|
shoppingmall/core/tests/test_models.py
|
test_new_user_missing_email
|
jacobjlee/simple-shopping
|
python
|
def test_new_user_missing_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'testpw123')
|
def test_create_new_superuser(self):
'Superuser를 생성하는 테스트'
user = get_user_model().objects.create_superuser('example@example.com', 'testpw123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 1,265,609,921,803,198,200
|
Superuser를 생성하는 테스트
|
shoppingmall/core/tests/test_models.py
|
test_create_new_superuser
|
jacobjlee/simple-shopping
|
python
|
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser('example@example.com', 'testpw123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
def xor_string(hash1, hash2, hash_size):
'Encrypt/Decrypt function used for password encryption in\n authentication, using a simple XOR.\n\n Args:\n hash1 (str): The first hash.\n hash2 (str): The second hash.\n\n Returns:\n str: A string with the xor applied.\n '
xored = [(h1 ^ h2) for (h1, h2) in zip(hash1, hash2)]
return struct.pack('{0}B'.format(hash_size), *xored)
| -3,380,580,171,674,236,400
|
Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
xor_string
|
Abdullah9340/Geese-Migration
|
python
|
def xor_string(hash1, hash2, hash_size):
'Encrypt/Decrypt function used for password encryption in\n authentication, using a simple XOR.\n\n Args:\n hash1 (str): The first hash.\n hash2 (str): The second hash.\n\n Returns:\n str: A string with the xor applied.\n '
xored = [(h1 ^ h2) for (h1, h2) in zip(hash1, hash2)]
return struct.pack('{0}B'.format(hash_size), *xored)
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
raise NotImplementedError
| 6,467,344,744,560,710,000
|
Returns the plugin name.
Returns:
str: The plugin name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
name
|
Abdullah9340/Geese-Migration
|
python
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
raise NotImplementedError
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
raise NotImplementedError
| 6,014,413,375,730,915,000
|
Returns the authentication name.
Returns:
str: The authentication name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_name
|
Abdullah9340/Geese-Migration
|
python
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
raise NotImplementedError
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'MySQL 4.1 Authentication Plugin'
| -5,534,950,544,939,674,000
|
Returns the plugin name.
Returns:
str: The plugin name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
name
|
Abdullah9340/Geese-Migration
|
python
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'MySQL 4.1 Authentication Plugin'
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'MYSQL41'
| 5,984,777,660,505,297,000
|
Returns the authentication name.
Returns:
str: The authentication name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_name
|
Abdullah9340/Geese-Migration
|
python
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'MYSQL41'
|
def auth_data(self, data):
'Hashing for MySQL 4.1 authentication.\n\n Args:\n data (str): The authentication data.\n\n Returns:\n str: The authentication response.\n '
if self._password:
password = (self._password.encode('utf-8') if isinstance(self._password, str) else self._password)
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1((data + hash2)).digest(), 20)
return '{0}\x00{1}\x00*{2}\x00'.format('', self._username, hexlify(xored))
return '{0}\x00{1}\x00'.format('', self._username)
| -2,681,088,055,857,822,000
|
Hashing for MySQL 4.1 authentication.
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_data
|
Abdullah9340/Geese-Migration
|
python
|
def auth_data(self, data):
'Hashing for MySQL 4.1 authentication.\n\n Args:\n data (str): The authentication data.\n\n Returns:\n str: The authentication response.\n '
if self._password:
password = (self._password.encode('utf-8') if isinstance(self._password, str) else self._password)
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1((data + hash2)).digest(), 20)
return '{0}\x00{1}\x00*{2}\x00'.format(, self._username, hexlify(xored))
return '{0}\x00{1}\x00'.format(, self._username)
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'Plain Authentication Plugin'
| 4,109,888,586,528,399,000
|
Returns the plugin name.
Returns:
str: The plugin name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
name
|
Abdullah9340/Geese-Migration
|
python
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'Plain Authentication Plugin'
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'PLAIN'
| 3,704,259,228,832,687,000
|
Returns the authentication name.
Returns:
str: The authentication name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_name
|
Abdullah9340/Geese-Migration
|
python
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'PLAIN'
|
def auth_data(self):
'Returns the authentication data.\n\n Returns:\n str: The authentication data.\n '
return '\x00{0}\x00{1}'.format(self._username, self._password)
| 3,974,220,015,677,046,000
|
Returns the authentication data.
Returns:
str: The authentication data.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_data
|
Abdullah9340/Geese-Migration
|
python
|
def auth_data(self):
'Returns the authentication data.\n\n Returns:\n str: The authentication data.\n '
return '\x00{0}\x00{1}'.format(self._username, self._password)
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'SHA256_MEMORY Authentication Plugin'
| 7,930,071,930,540,710,000
|
Returns the plugin name.
Returns:
str: The plugin name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
name
|
Abdullah9340/Geese-Migration
|
python
|
def name(self):
'Returns the plugin name.\n\n Returns:\n str: The plugin name.\n '
return 'SHA256_MEMORY Authentication Plugin'
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'SHA256_MEMORY'
| 4,464,576,182,657,441,000
|
Returns the authentication name.
Returns:
str: The authentication name.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_name
|
Abdullah9340/Geese-Migration
|
python
|
def auth_name(self):
'Returns the authentication name.\n\n Returns:\n str: The authentication name.\n '
return 'SHA256_MEMORY'
|
def auth_data(self, data):
'Hashing for SHA256_MEMORY authentication.\n\n The scramble is of the form:\n SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)\n\n Args:\n data (str): The authentication data.\n\n Returns:\n str: The authentication response.\n '
password = (self._password.encode('utf-8') if isinstance(self._password, str) else self._password)
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256((hashlib.sha256(hash1).digest() + data)).digest()
xored = xor_string(hash2, hash1, 32)
return '\x00{0}\x00{1}'.format(self._username, hexlify(xored))
| -8,982,060,605,021,540,000
|
Hashing for SHA256_MEMORY authentication.
The scramble is of the form:
SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
|
backend/env/Lib/site-packages/mysqlx/authentication.py
|
auth_data
|
Abdullah9340/Geese-Migration
|
python
|
def auth_data(self, data):
'Hashing for SHA256_MEMORY authentication.\n\n The scramble is of the form:\n SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)\n\n Args:\n data (str): The authentication data.\n\n Returns:\n str: The authentication response.\n '
password = (self._password.encode('utf-8') if isinstance(self._password, str) else self._password)
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256((hashlib.sha256(hash1).digest() + data)).digest()
xored = xor_string(hash2, hash1, 32)
return '\x00{0}\x00{1}'.format(self._username, hexlify(xored))
|
def p_expression_1(self, p):
' expression : binary_expression '
p[0] = p[1]
| 7,685,516,735,086,991,000
|
expression : binary_expression
|
analyzer/apisan/parse/sparser.py
|
p_expression_1
|
oslab-swrc/apisan
|
python
|
def p_expression_1(self, p):
' '
p[0] = p[1]
|
def p_binary_expression_1(self, p):
' binary_expression : cast_expression '
p[0] = p[1]
| -9,182,160,903,065,062,000
|
binary_expression : cast_expression
|
analyzer/apisan/parse/sparser.py
|
p_binary_expression_1
|
oslab-swrc/apisan
|
python
|
def p_binary_expression_1(self, p):
' '
p[0] = p[1]
|
def p_binary_expression_2(self, p):
' binary_expression : binary_expression TIMES binary_expression\n | binary_expression DIVIDE binary_expression\n | binary_expression MOD binary_expression\n | binary_expression PLUS binary_expression\n | binary_expression MINUS binary_expression\n | binary_expression RSHIFT binary_expression\n | binary_expression LSHIFT binary_expression\n | binary_expression LT binary_expression\n | binary_expression LE binary_expression\n | binary_expression GE binary_expression\n | binary_expression GT binary_expression\n | binary_expression EQ binary_expression\n | binary_expression NE binary_expression\n | binary_expression AND binary_expression\n | binary_expression OR binary_expression\n | binary_expression XOR binary_expression\n | binary_expression LAND binary_expression\n | binary_expression LOR binary_expression\n '
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
| 6,915,403,403,737,476,000
|
binary_expression : binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
|
analyzer/apisan/parse/sparser.py
|
p_binary_expression_2
|
oslab-swrc/apisan
|
python
|
def p_binary_expression_2(self, p):
' binary_expression : binary_expression TIMES binary_expression\n | binary_expression DIVIDE binary_expression\n | binary_expression MOD binary_expression\n | binary_expression PLUS binary_expression\n | binary_expression MINUS binary_expression\n | binary_expression RSHIFT binary_expression\n | binary_expression LSHIFT binary_expression\n | binary_expression LT binary_expression\n | binary_expression LE binary_expression\n | binary_expression GE binary_expression\n | binary_expression GT binary_expression\n | binary_expression EQ binary_expression\n | binary_expression NE binary_expression\n | binary_expression AND binary_expression\n | binary_expression OR binary_expression\n | binary_expression XOR binary_expression\n | binary_expression LAND binary_expression\n | binary_expression LOR binary_expression\n '
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
|
def p_binary_expression_3(self, p):
' expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE '
p[0] = ConstraintSymbol(p[1], p[4])
| -1,963,574,743,673,760,800
|
expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE
|
analyzer/apisan/parse/sparser.py
|
p_binary_expression_3
|
oslab-swrc/apisan
|
python
|
def p_binary_expression_3(self, p):
' '
p[0] = ConstraintSymbol(p[1], p[4])
|
def p_constraint(self, p):
' constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET '
p[0] = (p[2], p[4])
| -8,889,768,589,170,384,000
|
constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET
|
analyzer/apisan/parse/sparser.py
|
p_constraint
|
oslab-swrc/apisan
|
python
|
def p_constraint(self, p):
' '
p[0] = (p[2], p[4])
|
def p_constraint_list(self, p):
' constraint_list : constraint_list COMMA constraint\n | constraint '
if (len(p) == 2):
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
| -5,220,464,784,130,532,000
|
constraint_list : constraint_list COMMA constraint
| constraint
|
analyzer/apisan/parse/sparser.py
|
p_constraint_list
|
oslab-swrc/apisan
|
python
|
def p_constraint_list(self, p):
' constraint_list : constraint_list COMMA constraint\n | constraint '
if (len(p) == 2):
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
|
def p_cast_expression_1(self, p):
' cast_expression : unary_expression '
p[0] = p[1]
| 2,346,770,209,569,342,000
|
cast_expression : unary_expression
|
analyzer/apisan/parse/sparser.py
|
p_cast_expression_1
|
oslab-swrc/apisan
|
python
|
def p_cast_expression_1(self, p):
' '
p[0] = p[1]
|
def p_unary_expression_1(self, p):
' unary_expression : postfix_expression '
p[0] = p[1]
| 4,318,103,696,975,526,000
|
unary_expression : postfix_expression
|
analyzer/apisan/parse/sparser.py
|
p_unary_expression_1
|
oslab-swrc/apisan
|
python
|
def p_unary_expression_1(self, p):
' '
p[0] = p[1]
|
def p_unary_expression_2(self, p):
' unary_expression : AND postfix_expression '
p[0] = p[2]
| -4,938,042,286,868,855,000
|
unary_expression : AND postfix_expression
|
analyzer/apisan/parse/sparser.py
|
p_unary_expression_2
|
oslab-swrc/apisan
|
python
|
def p_unary_expression_2(self, p):
' '
p[0] = p[2]
|
def p_postfix_expression_1(self, p):
' postfix_expression : primary_expression '
p[0] = p[1]
| -6,792,552,474,756,700,000
|
postfix_expression : primary_expression
|
analyzer/apisan/parse/sparser.py
|
p_postfix_expression_1
|
oslab-swrc/apisan
|
python
|
def p_postfix_expression_1(self, p):
' '
p[0] = p[1]
|
def p_postfix_expression_2(self, p):
' postfix_expression : postfix_expression ARROW ID'
p[0] = FieldSymbol(p[1], p[3])
| 4,118,578,218,121,580,000
|
postfix_expression : postfix_expression ARROW ID
|
analyzer/apisan/parse/sparser.py
|
p_postfix_expression_2
|
oslab-swrc/apisan
|
python
|
def p_postfix_expression_2(self, p):
' '
p[0] = FieldSymbol(p[1], p[3])
|
def p_postfix_expression3(self, p):
' postfix_expression : postfix_expression LBRACKET expression RBRACKET '
p[0] = ArraySymbol(p[1], p[3])
| -7,503,193,322,489,411,000
|
postfix_expression : postfix_expression LBRACKET expression RBRACKET
|
analyzer/apisan/parse/sparser.py
|
p_postfix_expression3
|
oslab-swrc/apisan
|
python
|
def p_postfix_expression3(self, p):
' '
p[0] = ArraySymbol(p[1], p[3])
|
def p_postfix_expression4(self, p):
' postfix_expression : postfix_expression LPAREN argument_list RPAREN '
p[0] = CallSymbol(p[1], p[3])
| 3,751,622,720,951,135,000
|
postfix_expression : postfix_expression LPAREN argument_list RPAREN
|
analyzer/apisan/parse/sparser.py
|
p_postfix_expression4
|
oslab-swrc/apisan
|
python
|
def p_postfix_expression4(self, p):
' '
p[0] = CallSymbol(p[1], p[3])
|
def p_primary_expression_1(self, p):
' primary_expression : ID '
p[0] = IDSymbol(p[1])
| 6,044,687,616,587,051,000
|
primary_expression : ID
|
analyzer/apisan/parse/sparser.py
|
p_primary_expression_1
|
oslab-swrc/apisan
|
python
|
def p_primary_expression_1(self, p):
' '
p[0] = IDSymbol(p[1])
|
def p_primary_expression_2(self, p):
' primary_expression : concrete_integer_expression '
p[0] = ConcreteIntSymbol(p[1])
| -328,179,987,849,410,370
|
primary_expression : concrete_integer_expression
|
analyzer/apisan/parse/sparser.py
|
p_primary_expression_2
|
oslab-swrc/apisan
|
python
|
def p_primary_expression_2(self, p):
' '
p[0] = ConcreteIntSymbol(p[1])
|
def p_primary_expression_3(self, p):
'primary_expression : LPAREN expression RPAREN'
p[0] = p[2]
| 7,522,107,969,994,399,000
|
primary_expression : LPAREN expression RPAREN
|
analyzer/apisan/parse/sparser.py
|
p_primary_expression_3
|
oslab-swrc/apisan
|
python
|
def p_primary_expression_3(self, p):
p[0] = p[2]
|
def p_primary_expression_4(self, p):
' primary_expression : STRING_LITERAL '
p[0] = StringLiteralSymbol(p[1])
| 8,210,178,987,876,999,000
|
primary_expression : STRING_LITERAL
|
analyzer/apisan/parse/sparser.py
|
p_primary_expression_4
|
oslab-swrc/apisan
|
python
|
def p_primary_expression_4(self, p):
' '
p[0] = StringLiteralSymbol(p[1])
|
def p_concrete_integer(self, p):
' concrete_integer_expression : INT_CONST_DEC\n | MINUS INT_CONST_DEC '
if (len(p) == 3):
p[0] = (- int(p[2]))
else:
p[0] = int(p[1])
| 4,772,510,855,737,581,000
|
concrete_integer_expression : INT_CONST_DEC
| MINUS INT_CONST_DEC
|
analyzer/apisan/parse/sparser.py
|
p_concrete_integer
|
oslab-swrc/apisan
|
python
|
def p_concrete_integer(self, p):
' concrete_integer_expression : INT_CONST_DEC\n | MINUS INT_CONST_DEC '
if (len(p) == 3):
p[0] = (- int(p[2]))
else:
p[0] = int(p[1])
|
def p_argument_list(self, p):
' argument_list :\n | expression\n | argument_list COMMA expression '
if (len(p) == 1):
p[0] = []
elif (len(p) == 2):
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
| -1,090,489,905,007,694,100
|
argument_list :
| expression
| argument_list COMMA expression
|
analyzer/apisan/parse/sparser.py
|
p_argument_list
|
oslab-swrc/apisan
|
python
|
def p_argument_list(self, p):
' argument_list :\n | expression\n | argument_list COMMA expression '
if (len(p) == 1):
p[0] = []
elif (len(p) == 2):
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
|
def date_time(timestr):
'from str return timestr + msec'
(t_a, t_b) = timestr.split('.')
return (time.strptime(t_a, '%Y/%b/%d %H:%M:%S'), float(('0.' + t_b)))
| -6,460,670,341,405,587,000
|
from str return timestr + msec
|
yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py
|
date_time
|
harumome/kamo
|
python
|
def date_time(timestr):
(t_a, t_b) = timestr.split('.')
return (time.strptime(t_a, '%Y/%b/%d %H:%M:%S'), float(('0.' + t_b)))
|
def date_seconds(timestr):
'from str return seconds'
(t_a, msec) = date_time(timestr)
return (time.mktime(t_a) + msec)
| 8,607,692,014,939,393,000
|
from str return seconds
|
yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py
|
date_seconds
|
harumome/kamo
|
python
|
def date_seconds(timestr):
(t_a, msec) = date_time(timestr)
return (time.mktime(t_a) + msec)
|
def get_edge_resolution(pixel_x, width, distance, wavelength):
'Calculate EdgeResolution'
from math import sin, atan
if (abs(DISTANCE(distance)) > 0.0):
rad = ((0.5 * float(FLOAT2(pixel_x))) * int(width))
return (FLOAT1(wavelength) / (2 * sin((0.5 * atan((rad / DISTANCE(distance)))))))
else:
return 0.0
| -276,537,080,251,477,400
|
Calculate EdgeResolution
|
yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py
|
get_edge_resolution
|
harumome/kamo
|
python
|
def get_edge_resolution(pixel_x, width, distance, wavelength):
from math import sin, atan
if (abs(DISTANCE(distance)) > 0.0):
rad = ((0.5 * float(FLOAT2(pixel_x))) * int(width))
return (FLOAT1(wavelength) / (2 * sin((0.5 * atan((rad / DISTANCE(distance)))))))
else:
return 0.0
|
def getRawHeadDict(self, raw_head):
'Intepret the ascii structure of the minicbf image header.'
i_1 = (28 + raw_head.find('_array_data.header_contents'))
i_2 = raw_head.find('_array_data.data', i_1)
i_3 = (raw_head.find('--CIF-BINARY-FORMAT-SECTION--', i_2) + 29)
i_4 = (i_3 + 500)
lis = [line[2:].strip().split(' ', 1) for line in raw_head[i_1:i_2].splitlines() if (line and (line[0] == '#'))]
lis2 = [line[2:].strip().split(': ', 1) for line in raw_head[i_3:i_4].splitlines() if (line and (line[0:2] == 'X-'))]
self.raw_head_dict = {'Detector_2theta': '0.', 'MESSAGE': ''}
for val in lis:
if (val[0] in HEADER_KEYS):
if (len(val) == 2):
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([val for val in lis2 if ('Binary-' in val[0])]))
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': ' '.join(lis[1])})
return self.raw_head_dict
| -7,886,909,478,486,367,000
|
Intepret the ascii structure of the minicbf image header.
|
yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py
|
getRawHeadDict
|
harumome/kamo
|
python
|
def getRawHeadDict(self, raw_head):
i_1 = (28 + raw_head.find('_array_data.header_contents'))
i_2 = raw_head.find('_array_data.data', i_1)
i_3 = (raw_head.find('--CIF-BINARY-FORMAT-SECTION--', i_2) + 29)
i_4 = (i_3 + 500)
lis = [line[2:].strip().split(' ', 1) for line in raw_head[i_1:i_2].splitlines() if (line and (line[0] == '#'))]
lis2 = [line[2:].strip().split(': ', 1) for line in raw_head[i_3:i_4].splitlines() if (line and (line[0:2] == 'X-'))]
self.raw_head_dict = {'Detector_2theta': '0.', 'MESSAGE': }
for val in lis:
if (val[0] in HEADER_KEYS):
if (len(val) == 2):
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([val for val in lis2 if ('Binary-' in val[0])]))
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': ' '.join(lis[1])})
return self.raw_head_dict
|
def iteritems(obj, **kwargs):
"replacement for six's iteritems for Python2/3 compat\n uses 'iteritems' if available and otherwise uses 'items'.\n\n Passes kwargs to method.\n "
func = getattr(obj, 'iteritems', None)
if (not func):
func = obj.items
return func(**kwargs)
| 3,271,272,364,481,752,600
|
replacement for six's iteritems for Python2/3 compat
uses 'iteritems' if available and otherwise uses 'items'.
Passes kwargs to method.
|
statsmodels/compat/python.py
|
iteritems
|
Aziiz1989/statsmodels
|
python
|
def iteritems(obj, **kwargs):
"replacement for six's iteritems for Python2/3 compat\n uses 'iteritems' if available and otherwise uses 'items'.\n\n Passes kwargs to method.\n "
func = getattr(obj, 'iteritems', None)
if (not func):
func = obj.items
return func(**kwargs)
|
def getargspec(func):
'\n Simple workaroung for getargspec deprecation that returns\n an ArgSpec-like object\n '
sig = inspect.signature(func)
parameters = sig.parameters
(args, defaults) = ([], [])
(varargs, keywords) = (None, None)
for key in parameters:
parameter = parameters[key]
if (parameter.kind == inspect.Parameter.VAR_POSITIONAL):
varargs = key
elif (parameter.kind == inspect.Parameter.VAR_KEYWORD):
keywords = key
else:
args.append(key)
if (parameter.default is not parameter.empty):
defaults.append(parameter.default)
defaults = (None if (len(defaults) == 0) else defaults)
return ArgSpec(args, varargs, keywords, defaults)
| 4,329,741,620,168,690,700
|
Simple workaroung for getargspec deprecation that returns
an ArgSpec-like object
|
statsmodels/compat/python.py
|
getargspec
|
Aziiz1989/statsmodels
|
python
|
def getargspec(func):
'\n Simple workaroung for getargspec deprecation that returns\n an ArgSpec-like object\n '
sig = inspect.signature(func)
parameters = sig.parameters
(args, defaults) = ([], [])
(varargs, keywords) = (None, None)
for key in parameters:
parameter = parameters[key]
if (parameter.kind == inspect.Parameter.VAR_POSITIONAL):
varargs = key
elif (parameter.kind == inspect.Parameter.VAR_KEYWORD):
keywords = key
else:
args.append(key)
if (parameter.default is not parameter.empty):
defaults.append(parameter.default)
defaults = (None if (len(defaults) == 0) else defaults)
return ArgSpec(args, varargs, keywords, defaults)
|
def train(self, X, y):
'\n Train the classifier. For k-nearest neighbors this is just\n memorizing the training data.\n\n Inputs:\n - X: A numpy array of shape (num_train, D) containing the training data\n consisting of num_train samples each of dimension D.\n - y: A numpy array of shape (N,) containing the training labels, where\n y[i] is the label for X[i].\n '
self.X_train = X
self.y_train = y
| 1,106,634,005,181,075,800
|
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
train
|
Michellemingxuan/stanford_cs231n
|
python
|
def train(self, X, y):
'\n Train the classifier. For k-nearest neighbors this is just\n memorizing the training data.\n\n Inputs:\n - X: A numpy array of shape (num_train, D) containing the training data\n consisting of num_train samples each of dimension D.\n - y: A numpy array of shape (N,) containing the training labels, where\n y[i] is the label for X[i].\n '
self.X_train = X
self.y_train = y
|
def predict(self, X, k=1, num_loops=0):
'\n Predict labels for test data using this classifier.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data consisting\n of num_test samples each of dimension D.\n - k: The number of nearest neighbors that vote for the predicted labels.\n - num_loops: Determines which implementation to use to compute distances\n between training points and testing points.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n '
if (num_loops == 0):
dists = self.compute_distances_no_loops(X)
elif (num_loops == 1):
dists = self.compute_distances_one_loop(X)
elif (num_loops == 2):
dists = self.compute_distances_two_loops(X)
else:
raise ValueError(('Invalid value %d for num_loops' % num_loops))
return self.predict_labels(dists, k=k)
| -2,996,105,026,029,196,000
|
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
predict
|
Michellemingxuan/stanford_cs231n
|
python
|
def predict(self, X, k=1, num_loops=0):
'\n Predict labels for test data using this classifier.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data consisting\n of num_test samples each of dimension D.\n - k: The number of nearest neighbors that vote for the predicted labels.\n - num_loops: Determines which implementation to use to compute distances\n between training points and testing points.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n '
if (num_loops == 0):
dists = self.compute_distances_no_loops(X)
elif (num_loops == 1):
dists = self.compute_distances_one_loop(X)
elif (num_loops == 2):
dists = self.compute_distances_two_loops(X)
else:
raise ValueError(('Invalid value %d for num_loops' % num_loops))
return self.predict_labels(dists, k=k)
|
def compute_distances_two_loops(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using a nested loop over both the training data and the\n test data.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data.\n\n Returns:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n is the Euclidean distance between the ith test point and the jth training\n point.\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
dists[(i, j)] = np.sqrt(sum(((X[(i,)] - self.X_train[(j,)]) ** 2)))
pass
return dists
| 8,778,991,418,094,518,000
|
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
compute_distances_two_loops
|
Michellemingxuan/stanford_cs231n
|
python
|
def compute_distances_two_loops(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using a nested loop over both the training data and the\n test data.\n\n Inputs:\n - X: A numpy array of shape (num_test, D) containing test data.\n\n Returns:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n is the Euclidean distance between the ith test point and the jth training\n point.\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
dists[(i, j)] = np.sqrt(sum(((X[(i,)] - self.X_train[(j,)]) ** 2)))
pass
return dists
|
def compute_distances_one_loop(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using a single loop over the test data.\n\n Input / Output: Same as compute_distances_two_loops\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
dists[i, :] = np.sqrt(np.sum(((self.X_train - X[i, :]) ** 2), 1))
pass
return dists
| 5,453,297,031,028,455,000
|
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
compute_distances_one_loop
|
Michellemingxuan/stanford_cs231n
|
python
|
def compute_distances_one_loop(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using a single loop over the test data.\n\n Input / Output: Same as compute_distances_two_loops\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
dists[i, :] = np.sqrt(np.sum(((self.X_train - X[i, :]) ** 2), 1))
pass
return dists
|
def compute_distances_no_loops(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
dists = np.sqrt(np.sum(((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2), 2))
pass
return dists
| -7,016,626,351,587,641,000
|
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
compute_distances_no_loops
|
Michellemingxuan/stanford_cs231n
|
python
|
def compute_distances_no_loops(self, X):
'\n Compute the distance between each test point in X and each training point\n in self.X_train using no explicit loops.\n\n Input / Output: Same as compute_distances_two_loops\n '
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
dists = np.sqrt(np.sum(((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2), 2))
pass
return dists
|
def predict_labels(self, dists, k=1):
'\n Given a matrix of distances between test points and training points,\n predict a label for each test point.\n\n Inputs:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n gives the distance betwen the ith test point and the jth training point.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n '
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
closest_y = []
closest_y = self.y_train[dists[(i,)].argsort()[:k]]
pass
(unique, counts) = np.unique(closest_y, return_counts=True)
y_pred[i] = unique[np.argmax(counts)]
pass
return y_pred
| -7,229,769,627,711,926,000
|
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
|
assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py
|
predict_labels
|
Michellemingxuan/stanford_cs231n
|
python
|
def predict_labels(self, dists, k=1):
'\n Given a matrix of distances between test points and training points,\n predict a label for each test point.\n\n Inputs:\n - dists: A numpy array of shape (num_test, num_train) where dists[i, j]\n gives the distance betwen the ith test point and the jth training point.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n '
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
closest_y = []
closest_y = self.y_train[dists[(i,)].argsort()[:k]]
pass
(unique, counts) = np.unique(closest_y, return_counts=True)
y_pred[i] = unique[np.argmax(counts)]
pass
return y_pred
|
@pytest.fixture(scope='module')
def containerized_rses(rucio_client):
'\n Detects if containerized rses for xrootd & ssh are available in the testing environment.\n :return: A list of (rse_name, rse_id) tuples.\n '
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if ('xrd' in rse_obj['rse'].lower())]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if ('ssh' in rse_obj['rse'].lower())]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print('{ex}. Note that containerized RSEs will not be available in non-containerized test environments'.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
| 2,804,995,855,742,133,000
|
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
|
lib/rucio/tests/conftest.py
|
containerized_rses
|
R-16Bob/rucio
|
python
|
@pytest.fixture(scope='module')
def containerized_rses(rucio_client):
'\n Detects if containerized rses for xrootd & ssh are available in the testing environment.\n :return: A list of (rse_name, rse_id) tuples.\n '
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if ('xrd' in rse_obj['rse'].lower())]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if ('ssh' in rse_obj['rse'].lower())]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print('{ex}. Note that containerized RSEs will not be available in non-containerized test environments'.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
|
@pytest.fixture(scope='class')
def rse_factory_unittest(request, vo):
'\n unittest classes can get access to rse_factory fixture via this fixture\n '
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
request.cls.rse_factory = factory
(yield factory)
factory.cleanup()
| -5,738,361,266,967,748,000
|
unittest classes can get access to rse_factory fixture via this fixture
|
lib/rucio/tests/conftest.py
|
rse_factory_unittest
|
R-16Bob/rucio
|
python
|
@pytest.fixture(scope='class')
def rse_factory_unittest(request, vo):
'\n \n '
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
request.cls.rse_factory = factory
(yield factory)
factory.cleanup()
|
@pytest.fixture
def core_config_mock(request):
'\n Fixture to allow having per-test core.config tables without affecting the other parallel tests.\n\n This override works only in tests which use core function calls directly, not in the ones working\n via the API, because the normal config table is not touched and the rucio instance answering API\n calls is not aware of this mock.\n\n This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.\n Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().\n '
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get('table_content', table_content)
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = ('configs_' + generate_uuid())
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),)
InMemoryBase.metadata.create_all()
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
session = get_session()()
for (section, option, value) in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
(yield)
| -8,479,526,265,431,728,000
|
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
|
lib/rucio/tests/conftest.py
|
core_config_mock
|
R-16Bob/rucio
|
python
|
@pytest.fixture
def core_config_mock(request):
'\n Fixture to allow having per-test core.config tables without affecting the other parallel tests.\n\n This override works only in tests which use core function calls directly, not in the ones working\n via the API, because the normal config table is not touched and the rucio instance answering API\n calls is not aware of this mock.\n\n This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.\n Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().\n '
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get('table_content', table_content)
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = ('configs_' + generate_uuid())
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),)
InMemoryBase.metadata.create_all()
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
session = get_session()()
for (section, option, value) in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
(yield)
|
@pytest.fixture
def file_config_mock(request):
'\n Fixture which allows to have an isolated in-memory configuration file instance which\n is not persisted after exiting the fixture.\n\n This override works only in tests which use config calls directly, not in the ones working\n via the API, as the server config is not changed.\n '
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get('overrides', overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=(lambda : parser)):
for (section, option, value) in (overrides or []):
if (not config_has_section(section)):
config_add_section(section)
config_set(section, option, value)
(yield)
| -2,383,599,826,401,361,400
|
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
|
lib/rucio/tests/conftest.py
|
file_config_mock
|
R-16Bob/rucio
|
python
|
@pytest.fixture
def file_config_mock(request):
'\n Fixture which allows to have an isolated in-memory configuration file instance which\n is not persisted after exiting the fixture.\n\n This override works only in tests which use config calls directly, not in the ones working\n via the API, as the server config is not changed.\n '
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get('overrides', overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=(lambda : parser)):
for (section, option, value) in (overrides or []):
if (not config_has_section(section)):
config_add_section(section)
config_set(section, option, value)
(yield)
|
@pytest.fixture
def caches_mock(request):
'\n Fixture which overrides the different internal caches with in-memory ones for the duration\n of a particular test.\n\n This override works only in tests which use core function calls directly, not in the ones\n working via API.\n\n The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.\n '
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get('caches_to_mock', caches_to_mock)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
(yield mocked_caches)
| 4,544,694,118,791,536,000
|
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
|
lib/rucio/tests/conftest.py
|
caches_mock
|
R-16Bob/rucio
|
python
|
@pytest.fixture
def caches_mock(request):
'\n Fixture which overrides the different internal caches with in-memory ones for the duration\n of a particular test.\n\n This override works only in tests which use core function calls directly, not in the ones\n working via API.\n\n The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.\n '
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get('caches_to_mock', caches_to_mock)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
(yield mocked_caches)
|
@pytest.fixture
def metrics_mock():
'\n Overrides the prometheus metric registry and allows to verify if the desired\n prometheus metrics were correctly recorded.\n '
from unittest import mock
from prometheus_client import CollectorRegistry
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, mock.patch('rucio.core.monitor.COUNTERS', new={}):
(yield registry)
| 3,437,373,712,124,519,000
|
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
|
lib/rucio/tests/conftest.py
|
metrics_mock
|
R-16Bob/rucio
|
python
|
@pytest.fixture
def metrics_mock():
'\n Overrides the prometheus metric registry and allows to verify if the desired\n prometheus metrics were correctly recorded.\n '
from unittest import mock
from prometheus_client import CollectorRegistry
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, mock.patch('rucio.core.monitor.COUNTERS', new={}):
(yield registry)
|
def s_GROUPPASSWORD(self, value):
'if set USERPASSWORD of group GROUPPASSWORD same as it\n if not any value set, key should not exists\n '
if (value in (None, DEFAULT_NO_KEY)):
user_pwd = self.data.get(USERPASSWORD, None)
if (user_pwd is not None):
return user_pwd
else:
return DEFAULT_NO_KEY
| 2,257,729,412,670,996,700
|
if set USERPASSWORD of group GROUPPASSWORD same as it
if not any value set, key should not exists
|
antilles-core/openHPC_web_project/tests/user/mock_libuser.py
|
s_GROUPPASSWORD
|
CarrotXin/Antilles
|
python
|
def s_GROUPPASSWORD(self, value):
'if set USERPASSWORD of group GROUPPASSWORD same as it\n if not any value set, key should not exists\n '
if (value in (None, DEFAULT_NO_KEY)):
user_pwd = self.data.get(USERPASSWORD, None)
if (user_pwd is not None):
return user_pwd
else:
return DEFAULT_NO_KEY
|
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
'With CVE-2019-18933, it was possible for certain users created\n using social login (e.g. Google/GitHub auth) to have the empty\n string as their password in the Zulip database, rather than\n Django\'s "unusable password" (i.e. no password at all). This was a\n serious security issue for organizations with both password and\n Google/GitHub authentication enabled.\n\n Combined with the code changes to prevent new users from entering\n this buggy state, this migration sets the intended "no password"\n state for any users who are in this buggy state, as had been\n intended.\n\n While this bug was discovered by our own development team and we\n believe it hasn\'t been exploited in the wild, out of an abundance\n of caution, this migration also resets the personal API keys for\n all users where Zulip\'s database-level logging cannot **prove**\n that user\'s current personal API key was never accessed using this\n bug.\n\n There are a few ways this can be proven: (1) the user\'s password\n has never been changed and is not the empty string,\n or (2) the user\'s personal API key has changed since that user last\n changed their password (which is not \'\'). Both constitute proof\n because this bug cannot be used to gain the access required to change\n or reset a user\'s password.\n\n Resetting those API keys has the effect of logging many users out\n of the Zulip mobile and terminal apps unnecessarily (e.g. because\n the user changed their password at any point in the past, even\n though the user never was affected by the bug), but we\'re\n comfortable with that cost for ensuring that this bug is\n completely fixed.\n\n To avoid this inconvenience for self-hosted servers which don\'t\n even have EmailAuthBackend enabled, we skip resetting any API keys\n if the server doesn\'t have EmailAuthBackend configured.\n '
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if (event_type_class == 'CharField'):
USER_PASSWORD_CHANGED: Union[(int, str)] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[(int, str)] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
password_change_user_ids = set(RealmAuditLog.objects.filter(event_type=USER_PASSWORD_CHANGED).values_list('modified_user_id', flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
query = RealmAuditLog.objects.filter(modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED, USER_API_KEY_CHANGED]).order_by('event_time')
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
assert (earliest_password_change is not None)
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if (latest_api_key_change is None):
password_change_user_ids_api_key_reset_needed.add(user_id)
elif (earliest_password_change.event_time <= latest_api_key_change.event_time):
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if (password_change_user_ids_no_reset_needed and settings.PRODUCTION):
with open('/var/log/zulip/0209_password_migration.log', 'w') as log_file:
line = 'No reset needed, but changed password: {}\n'
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any, event_time: Any, event_type: Any, affected_user_type: str) -> None:
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=event_type, event_time=event_time, extra_data=ujson.dumps({'migration_id': MIGRATION_ID, 'affected_user_type': affected_user_type}))
email_auth_enabled = ('zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS)
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password('', user_profile.password):
user_profile.password = make_password(None)
update_fields = ['password']
write_realm_audit_log_entry(user_profile, event_time, USER_PASSWORD_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if (email_auth_enabled and (not user_profile.is_bot)):
reset_user_api_key(user_profile)
update_fields.append('api_key')
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif (email_auth_enabled and (user_profile.id in password_change_user_ids_api_key_reset_needed)):
reset_user_api_key(user_profile)
user_profile.save(update_fields=['api_key'])
write_realm_audit_log_entry(user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_CHANGED_PASSWORD)
| -8,432,326,075,367,990,000
|
With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
|
zerver/migrations/0209_user_profile_no_empty_password.py
|
ensure_no_empty_passwords
|
Bpapman/zulip
|
python
|
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
'With CVE-2019-18933, it was possible for certain users created\n using social login (e.g. Google/GitHub auth) to have the empty\n string as their password in the Zulip database, rather than\n Django\'s "unusable password" (i.e. no password at all). This was a\n serious security issue for organizations with both password and\n Google/GitHub authentication enabled.\n\n Combined with the code changes to prevent new users from entering\n this buggy state, this migration sets the intended "no password"\n state for any users who are in this buggy state, as had been\n intended.\n\n While this bug was discovered by our own development team and we\n believe it hasn\'t been exploited in the wild, out of an abundance\n of caution, this migration also resets the personal API keys for\n all users where Zulip\'s database-level logging cannot **prove**\n that user\'s current personal API key was never accessed using this\n bug.\n\n There are a few ways this can be proven: (1) the user\'s password\n has never been changed and is not the empty string,\n or (2) the user\'s personal API key has changed since that user last\n changed their password (which is not \'\'). Both constitute proof\n because this bug cannot be used to gain the access required to change\n or reset a user\'s password.\n\n Resetting those API keys has the effect of logging many users out\n of the Zulip mobile and terminal apps unnecessarily (e.g. because\n the user changed their password at any point in the past, even\n though the user never was affected by the bug), but we\'re\n comfortable with that cost for ensuring that this bug is\n completely fixed.\n\n To avoid this inconvenience for self-hosted servers which don\'t\n even have EmailAuthBackend enabled, we skip resetting any API keys\n if the server doesn\'t have EmailAuthBackend configured.\n '
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if (event_type_class == 'CharField'):
USER_PASSWORD_CHANGED: Union[(int, str)] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[(int, str)] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
password_change_user_ids = set(RealmAuditLog.objects.filter(event_type=USER_PASSWORD_CHANGED).values_list('modified_user_id', flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
query = RealmAuditLog.objects.filter(modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED, USER_API_KEY_CHANGED]).order_by('event_time')
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
assert (earliest_password_change is not None)
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if (latest_api_key_change is None):
password_change_user_ids_api_key_reset_needed.add(user_id)
elif (earliest_password_change.event_time <= latest_api_key_change.event_time):
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if (password_change_user_ids_no_reset_needed and settings.PRODUCTION):
with open('/var/log/zulip/0209_password_migration.log', 'w') as log_file:
line = 'No reset needed, but changed password: {}\n'
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any, event_time: Any, event_type: Any, affected_user_type: str) -> None:
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile, event_type=event_type, event_time=event_time, extra_data=ujson.dumps({'migration_id': MIGRATION_ID, 'affected_user_type': affected_user_type}))
email_auth_enabled = ('zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS)
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password(, user_profile.password):
user_profile.password = make_password(None)
update_fields = ['password']
write_realm_audit_log_entry(user_profile, event_time, USER_PASSWORD_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if (email_auth_enabled and (not user_profile.is_bot)):
reset_user_api_key(user_profile)
update_fields.append('api_key')
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif (email_auth_enabled and (user_profile.id in password_change_user_ids_api_key_reset_needed)):
reset_user_api_key(user_profile)
user_profile.save(update_fields=['api_key'])
write_realm_audit_log_entry(user_profile, event_time, USER_API_KEY_CHANGED, AFFECTED_USER_TYPE_CHANGED_PASSWORD)
|
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None):
'Movie - a model defined in Swagger'
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if (artworks is not None):
self.artworks = artworks
if (genres is not None):
self.genres = genres
if (id is not None):
self.id = id
if (people is not None):
self.people = people
if (release_dates is not None):
self.release_dates = release_dates
if (remoteids is not None):
self.remoteids = remoteids
if (runtime is not None):
self.runtime = runtime
if (trailers is not None):
self.trailers = trailers
if (translations is not None):
self.translations = translations
if (url is not None):
self.url = url
| 1,450,112,740,000,856,000
|
Movie - a model defined in Swagger
|
tvdb_api/models/movie.py
|
__init__
|
h3llrais3r/tvdb_api
|
python
|
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None):
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if (artworks is not None):
self.artworks = artworks
if (genres is not None):
self.genres = genres
if (id is not None):
self.id = id
if (people is not None):
self.people = people
if (release_dates is not None):
self.release_dates = release_dates
if (remoteids is not None):
self.remoteids = remoteids
if (runtime is not None):
self.runtime = runtime
if (trailers is not None):
self.trailers = trailers
if (translations is not None):
self.translations = translations
if (url is not None):
self.url = url
|
@property
def artworks(self):
'Gets the artworks of this Movie. # noqa: E501\n\n\n :return: The artworks of this Movie. # noqa: E501\n :rtype: list[MovieArtwork]\n '
return self._artworks
| -2,393,834,981,830,680,600
|
Gets the artworks of this Movie. # noqa: E501
:return: The artworks of this Movie. # noqa: E501
:rtype: list[MovieArtwork]
|
tvdb_api/models/movie.py
|
artworks
|
h3llrais3r/tvdb_api
|
python
|
@property
def artworks(self):
'Gets the artworks of this Movie. # noqa: E501\n\n\n :return: The artworks of this Movie. # noqa: E501\n :rtype: list[MovieArtwork]\n '
return self._artworks
|
@artworks.setter
def artworks(self, artworks):
'Sets the artworks of this Movie.\n\n\n :param artworks: The artworks of this Movie. # noqa: E501\n :type: list[MovieArtwork]\n '
self._artworks = artworks
| -1,086,038,072,461,534,200
|
Sets the artworks of this Movie.
:param artworks: The artworks of this Movie. # noqa: E501
:type: list[MovieArtwork]
|
tvdb_api/models/movie.py
|
artworks
|
h3llrais3r/tvdb_api
|
python
|
@artworks.setter
def artworks(self, artworks):
'Sets the artworks of this Movie.\n\n\n :param artworks: The artworks of this Movie. # noqa: E501\n :type: list[MovieArtwork]\n '
self._artworks = artworks
|
@property
def genres(self):
'Gets the genres of this Movie. # noqa: E501\n\n\n :return: The genres of this Movie. # noqa: E501\n :rtype: list[MovieGenre]\n '
return self._genres
| 7,144,432,880,067,460,000
|
Gets the genres of this Movie. # noqa: E501
:return: The genres of this Movie. # noqa: E501
:rtype: list[MovieGenre]
|
tvdb_api/models/movie.py
|
genres
|
h3llrais3r/tvdb_api
|
python
|
@property
def genres(self):
'Gets the genres of this Movie. # noqa: E501\n\n\n :return: The genres of this Movie. # noqa: E501\n :rtype: list[MovieGenre]\n '
return self._genres
|
@genres.setter
def genres(self, genres):
'Sets the genres of this Movie.\n\n\n :param genres: The genres of this Movie. # noqa: E501\n :type: list[MovieGenre]\n '
self._genres = genres
| -8,035,082,629,329,302,000
|
Sets the genres of this Movie.
:param genres: The genres of this Movie. # noqa: E501
:type: list[MovieGenre]
|
tvdb_api/models/movie.py
|
genres
|
h3llrais3r/tvdb_api
|
python
|
@genres.setter
def genres(self, genres):
'Sets the genres of this Movie.\n\n\n :param genres: The genres of this Movie. # noqa: E501\n :type: list[MovieGenre]\n '
self._genres = genres
|
@property
def id(self):
'Gets the id of this Movie. # noqa: E501\n\n\n :return: The id of this Movie. # noqa: E501\n :rtype: int\n '
return self._id
| 133,836,784,827,236,960
|
Gets the id of this Movie. # noqa: E501
:return: The id of this Movie. # noqa: E501
:rtype: int
|
tvdb_api/models/movie.py
|
id
|
h3llrais3r/tvdb_api
|
python
|
@property
def id(self):
'Gets the id of this Movie. # noqa: E501\n\n\n :return: The id of this Movie. # noqa: E501\n :rtype: int\n '
return self._id
|
@id.setter
def id(self, id):
'Sets the id of this Movie.\n\n\n :param id: The id of this Movie. # noqa: E501\n :type: int\n '
self._id = id
| -400,809,097,172,074,600
|
Sets the id of this Movie.
:param id: The id of this Movie. # noqa: E501
:type: int
|
tvdb_api/models/movie.py
|
id
|
h3llrais3r/tvdb_api
|
python
|
@id.setter
def id(self, id):
'Sets the id of this Movie.\n\n\n :param id: The id of this Movie. # noqa: E501\n :type: int\n '
self._id = id
|
@property
def people(self):
'Gets the people of this Movie. # noqa: E501\n\n\n :return: The people of this Movie. # noqa: E501\n :rtype: MoviePeople\n '
return self._people
| -1,147,309,872,900,875,500
|
Gets the people of this Movie. # noqa: E501
:return: The people of this Movie. # noqa: E501
:rtype: MoviePeople
|
tvdb_api/models/movie.py
|
people
|
h3llrais3r/tvdb_api
|
python
|
@property
def people(self):
'Gets the people of this Movie. # noqa: E501\n\n\n :return: The people of this Movie. # noqa: E501\n :rtype: MoviePeople\n '
return self._people
|
@people.setter
def people(self, people):
'Sets the people of this Movie.\n\n\n :param people: The people of this Movie. # noqa: E501\n :type: MoviePeople\n '
self._people = people
| 8,841,761,709,071,807,000
|
Sets the people of this Movie.
:param people: The people of this Movie. # noqa: E501
:type: MoviePeople
|
tvdb_api/models/movie.py
|
people
|
h3llrais3r/tvdb_api
|
python
|
@people.setter
def people(self, people):
'Sets the people of this Movie.\n\n\n :param people: The people of this Movie. # noqa: E501\n :type: MoviePeople\n '
self._people = people
|
@property
def release_dates(self):
'Gets the release_dates of this Movie. # noqa: E501\n\n\n :return: The release_dates of this Movie. # noqa: E501\n :rtype: list[MovieReleaseDate]\n '
return self._release_dates
| 4,026,720,840,994,479,600
|
Gets the release_dates of this Movie. # noqa: E501
:return: The release_dates of this Movie. # noqa: E501
:rtype: list[MovieReleaseDate]
|
tvdb_api/models/movie.py
|
release_dates
|
h3llrais3r/tvdb_api
|
python
|
@property
def release_dates(self):
'Gets the release_dates of this Movie. # noqa: E501\n\n\n :return: The release_dates of this Movie. # noqa: E501\n :rtype: list[MovieReleaseDate]\n '
return self._release_dates
|
@release_dates.setter
def release_dates(self, release_dates):
'Sets the release_dates of this Movie.\n\n\n :param release_dates: The release_dates of this Movie. # noqa: E501\n :type: list[MovieReleaseDate]\n '
self._release_dates = release_dates
| -6,582,223,386,136,288,000
|
Sets the release_dates of this Movie.
:param release_dates: The release_dates of this Movie. # noqa: E501
:type: list[MovieReleaseDate]
|
tvdb_api/models/movie.py
|
release_dates
|
h3llrais3r/tvdb_api
|
python
|
@release_dates.setter
def release_dates(self, release_dates):
'Sets the release_dates of this Movie.\n\n\n :param release_dates: The release_dates of this Movie. # noqa: E501\n :type: list[MovieReleaseDate]\n '
self._release_dates = release_dates
|
@property
def remoteids(self):
'Gets the remoteids of this Movie. # noqa: E501\n\n\n :return: The remoteids of this Movie. # noqa: E501\n :rtype: list[MovieRemoteId]\n '
return self._remoteids
| 82,586,091,699,628,220
|
Gets the remoteids of this Movie. # noqa: E501
:return: The remoteids of this Movie. # noqa: E501
:rtype: list[MovieRemoteId]
|
tvdb_api/models/movie.py
|
remoteids
|
h3llrais3r/tvdb_api
|
python
|
@property
def remoteids(self):
'Gets the remoteids of this Movie. # noqa: E501\n\n\n :return: The remoteids of this Movie. # noqa: E501\n :rtype: list[MovieRemoteId]\n '
return self._remoteids
|
@remoteids.setter
def remoteids(self, remoteids):
'Sets the remoteids of this Movie.\n\n\n :param remoteids: The remoteids of this Movie. # noqa: E501\n :type: list[MovieRemoteId]\n '
self._remoteids = remoteids
| 6,932,675,821,644,166,000
|
Sets the remoteids of this Movie.
:param remoteids: The remoteids of this Movie. # noqa: E501
:type: list[MovieRemoteId]
|
tvdb_api/models/movie.py
|
remoteids
|
h3llrais3r/tvdb_api
|
python
|
@remoteids.setter
def remoteids(self, remoteids):
'Sets the remoteids of this Movie.\n\n\n :param remoteids: The remoteids of this Movie. # noqa: E501\n :type: list[MovieRemoteId]\n '
self._remoteids = remoteids
|
@property
def runtime(self):
'Gets the runtime of this Movie. # noqa: E501\n\n\n :return: The runtime of this Movie. # noqa: E501\n :rtype: int\n '
return self._runtime
| -5,657,135,229,381,579,000
|
Gets the runtime of this Movie. # noqa: E501
:return: The runtime of this Movie. # noqa: E501
:rtype: int
|
tvdb_api/models/movie.py
|
runtime
|
h3llrais3r/tvdb_api
|
python
|
@property
def runtime(self):
'Gets the runtime of this Movie. # noqa: E501\n\n\n :return: The runtime of this Movie. # noqa: E501\n :rtype: int\n '
return self._runtime
|
@runtime.setter
def runtime(self, runtime):
'Sets the runtime of this Movie.\n\n\n :param runtime: The runtime of this Movie. # noqa: E501\n :type: int\n '
self._runtime = runtime
| -8,879,695,535,615,070,000
|
Sets the runtime of this Movie.
:param runtime: The runtime of this Movie. # noqa: E501
:type: int
|
tvdb_api/models/movie.py
|
runtime
|
h3llrais3r/tvdb_api
|
python
|
@runtime.setter
def runtime(self, runtime):
'Sets the runtime of this Movie.\n\n\n :param runtime: The runtime of this Movie. # noqa: E501\n :type: int\n '
self._runtime = runtime
|
@property
def trailers(self):
'Gets the trailers of this Movie. # noqa: E501\n\n\n :return: The trailers of this Movie. # noqa: E501\n :rtype: list[MovieTrailer]\n '
return self._trailers
| -4,756,530,408,680,252,000
|
Gets the trailers of this Movie. # noqa: E501
:return: The trailers of this Movie. # noqa: E501
:rtype: list[MovieTrailer]
|
tvdb_api/models/movie.py
|
trailers
|
h3llrais3r/tvdb_api
|
python
|
@property
def trailers(self):
'Gets the trailers of this Movie. # noqa: E501\n\n\n :return: The trailers of this Movie. # noqa: E501\n :rtype: list[MovieTrailer]\n '
return self._trailers
|
@trailers.setter
def trailers(self, trailers):
'Sets the trailers of this Movie.\n\n\n :param trailers: The trailers of this Movie. # noqa: E501\n :type: list[MovieTrailer]\n '
self._trailers = trailers
| 7,242,678,631,285,110,000
|
Sets the trailers of this Movie.
:param trailers: The trailers of this Movie. # noqa: E501
:type: list[MovieTrailer]
|
tvdb_api/models/movie.py
|
trailers
|
h3llrais3r/tvdb_api
|
python
|
@trailers.setter
def trailers(self, trailers):
'Sets the trailers of this Movie.\n\n\n :param trailers: The trailers of this Movie. # noqa: E501\n :type: list[MovieTrailer]\n '
self._trailers = trailers
|
@property
def translations(self):
'Gets the translations of this Movie. # noqa: E501\n\n\n :return: The translations of this Movie. # noqa: E501\n :rtype: list[MovieTranslation]\n '
return self._translations
| 6,026,753,750,882,946,000
|
Gets the translations of this Movie. # noqa: E501
:return: The translations of this Movie. # noqa: E501
:rtype: list[MovieTranslation]
|
tvdb_api/models/movie.py
|
translations
|
h3llrais3r/tvdb_api
|
python
|
@property
def translations(self):
'Gets the translations of this Movie. # noqa: E501\n\n\n :return: The translations of this Movie. # noqa: E501\n :rtype: list[MovieTranslation]\n '
return self._translations
|
@translations.setter
def translations(self, translations):
'Sets the translations of this Movie.\n\n\n :param translations: The translations of this Movie. # noqa: E501\n :type: list[MovieTranslation]\n '
self._translations = translations
| 4,669,909,626,875,010,000
|
Sets the translations of this Movie.
:param translations: The translations of this Movie. # noqa: E501
:type: list[MovieTranslation]
|
tvdb_api/models/movie.py
|
translations
|
h3llrais3r/tvdb_api
|
python
|
@translations.setter
def translations(self, translations):
'Sets the translations of this Movie.\n\n\n :param translations: The translations of this Movie. # noqa: E501\n :type: list[MovieTranslation]\n '
self._translations = translations
|
@property
def url(self):
'Gets the url of this Movie. # noqa: E501\n\n\n :return: The url of this Movie. # noqa: E501\n :rtype: str\n '
return self._url
| 1,514,740,167,924,753,700
|
Gets the url of this Movie. # noqa: E501
:return: The url of this Movie. # noqa: E501
:rtype: str
|
tvdb_api/models/movie.py
|
url
|
h3llrais3r/tvdb_api
|
python
|
@property
def url(self):
'Gets the url of this Movie. # noqa: E501\n\n\n :return: The url of this Movie. # noqa: E501\n :rtype: str\n '
return self._url
|
@url.setter
def url(self, url):
'Sets the url of this Movie.\n\n\n :param url: The url of this Movie. # noqa: E501\n :type: str\n '
self._url = url
| 5,967,116,398,014,488,000
|
Sets the url of this Movie.
:param url: The url of this Movie. # noqa: E501
:type: str
|
tvdb_api/models/movie.py
|
url
|
h3llrais3r/tvdb_api
|
python
|
@url.setter
def url(self, url):
'Sets the url of this Movie.\n\n\n :param url: The url of this Movie. # noqa: E501\n :type: str\n '
self._url = url
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Movie, dict):
for (key, value) in self.items():
result[key] = value
return result
| -2,365,698,491,032,322,600
|
Returns the model properties as a dict
|
tvdb_api/models/movie.py
|
to_dict
|
h3llrais3r/tvdb_api
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Movie, dict):
for (key, value) in self.items():
result[key] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
tvdb_api/models/movie.py
|
to_str
|
h3llrais3r/tvdb_api
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
tvdb_api/models/movie.py
|
__repr__
|
h3llrais3r/tvdb_api
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, Movie)):
return False
return (self.__dict__ == other.__dict__)
| 5,689,336,831,722,514,000
|
Returns true if both objects are equal
|
tvdb_api/models/movie.py
|
__eq__
|
h3llrais3r/tvdb_api
|
python
|
def __eq__(self, other):
if (not isinstance(other, Movie)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
tvdb_api/models/movie.py
|
__ne__
|
h3llrais3r/tvdb_api
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def mol_sim_matrix(fingerprints1, fingerprints2, method='cosine', filename=None, max_size=1000, print_progress=True):
"Create Matrix of all molecular similarities (based on molecular fingerprints).\n\n If filename is not None, the result will be saved as npy.\n To create molecular fingerprints see mol_fingerprints() function from MS_functions.\n\n Args:\n ----\n fingerprints1: list\n List of molecular fingerprints (numpy arrays).\n fingerprints2: list\n List of molecular fingerprints (numpy arrays).\n method: str\n Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.\n (see scipy.spatial.distance.cdist).\n filename: str\n Filename to save results to. OR: If file already exists it will be\n loaded instead.\n max_size: int\n Maximum size of (sub) all-vs-all matrix to handle in one go. Will split\n up larger matrices into\n max_size x max_size matrices.\n print_progress: bool, optional\n If True, print phase of the run to indicate progress. Default = True.\n "
if (filename is not None):
try:
molecular_similarities = np.load(filename)
print('Molecular similarity scores found and loaded.')
collect_new_data = False
except FileNotFoundError:
print('Could not find file ', filename)
print('Molecular scores will be calculated from scratch.')
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
splits = int((np.ceil((matrix_size[0] / max_size)) * np.ceil((matrix_size[1] / max_size))))
count_splits = 0
for i in range(int(np.ceil((matrix_size[0] / max_size)))):
low1 = (i * max_size)
high1 = min(((i + 1) * max_size), matrix_size[0])
for j in range(int(np.ceil((matrix_size[1] / max_size)))):
low2 = (j * max_size)
high2 = min(((j + 1) * max_size), matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = (1 - spatial.distance.cdist(fingerprints_arr1[low1:high1], fingerprints_arr2[low2:high2], method))
count_splits += 1
if print_progress:
print('\r', 'Calculated submatrix {} out of {}'.format(count_splits, splits), end='')
if print_progress:
print((20 * '--'))
print('Succesfully calculated matrix with all-vs-all molecular similarity values.')
if (filename is not None):
np.save(filename, molecular_similarities)
print('Matrix was saved under:', filename)
return molecular_similarities
| -4,397,187,001,534,330,000
|
Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
|
matchms/old/ms_similarity_classical.py
|
mol_sim_matrix
|
matchms/old-iomega-spec2vec
|
python
|
def mol_sim_matrix(fingerprints1, fingerprints2, method='cosine', filename=None, max_size=1000, print_progress=True):
"Create Matrix of all molecular similarities (based on molecular fingerprints).\n\n If filename is not None, the result will be saved as npy.\n To create molecular fingerprints see mol_fingerprints() function from MS_functions.\n\n Args:\n ----\n fingerprints1: list\n List of molecular fingerprints (numpy arrays).\n fingerprints2: list\n List of molecular fingerprints (numpy arrays).\n method: str\n Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.\n (see scipy.spatial.distance.cdist).\n filename: str\n Filename to save results to. OR: If file already exists it will be\n loaded instead.\n max_size: int\n Maximum size of (sub) all-vs-all matrix to handle in one go. Will split\n up larger matrices into\n max_size x max_size matrices.\n print_progress: bool, optional\n If True, print phase of the run to indicate progress. Default = True.\n "
if (filename is not None):
try:
molecular_similarities = np.load(filename)
print('Molecular similarity scores found and loaded.')
collect_new_data = False
except FileNotFoundError:
print('Could not find file ', filename)
print('Molecular scores will be calculated from scratch.')
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
splits = int((np.ceil((matrix_size[0] / max_size)) * np.ceil((matrix_size[1] / max_size))))
count_splits = 0
for i in range(int(np.ceil((matrix_size[0] / max_size)))):
low1 = (i * max_size)
high1 = min(((i + 1) * max_size), matrix_size[0])
for j in range(int(np.ceil((matrix_size[1] / max_size)))):
low2 = (j * max_size)
high2 = min(((j + 1) * max_size), matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = (1 - spatial.distance.cdist(fingerprints_arr1[low1:high1], fingerprints_arr2[low2:high2], method))
count_splits += 1
if print_progress:
print('\r', 'Calculated submatrix {} out of {}'.format(count_splits, splits), end=)
if print_progress:
print((20 * '--'))
print('Succesfully calculated matrix with all-vs-all molecular similarity values.')
if (filename is not None):
np.save(filename, molecular_similarities)
print('Matrix was saved under:', filename)
return molecular_similarities
|
def cosine_score_greedy(spec1, spec2, mass_shift, tol, min_intens=0, use_numba=True):
'Calculate cosine score between spectrum1 and spectrum2.\n\n If mass_shifted = True it will shift the spectra with respect to each other\n by difference in their parentmasses.\n\n Args:\n ----\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n '
if ((spec1.shape[0] == 0) or (spec2.shape[0] == 0)):
return (0.0, [])
spec1[:, 1] = (spec1[:, 1] / max(spec1[:, 1]))
spec2[:, 1] = (spec2[:, 1] / max(spec2[:, 1]))
spec1 = spec1[(spec1[:, 1] > min_intens), :]
spec2 = spec2[(spec2[:, 1] > min_intens), :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if ((mass_shift is not None) and (mass_shift != 0.0)):
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = (zero_pairs + nonzero_pairs)
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=(lambda x: x[2]), reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if ((not (m[0] in used1)) and (not (m[1] in used2))):
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
score = (score / max(np.sum((spec1[:, 1] ** 2)), np.sum((spec2[:, 1] ** 2))))
return (score, used_matches)
| -1,856,239,111,906,763,300
|
Calculate cosine score between spectrum1 and spectrum2.
If mass_shifted = True it will shift the spectra with respect to each other
by difference in their parentmasses.
Args:
----
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
|
matchms/old/ms_similarity_classical.py
|
cosine_score_greedy
|
matchms/old-iomega-spec2vec
|
python
|
def cosine_score_greedy(spec1, spec2, mass_shift, tol, min_intens=0, use_numba=True):
'Calculate cosine score between spectrum1 and spectrum2.\n\n If mass_shifted = True it will shift the spectra with respect to each other\n by difference in their parentmasses.\n\n Args:\n ----\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n '
if ((spec1.shape[0] == 0) or (spec2.shape[0] == 0)):
return (0.0, [])
spec1[:, 1] = (spec1[:, 1] / max(spec1[:, 1]))
spec2[:, 1] = (spec2[:, 1] / max(spec2[:, 1]))
spec1 = spec1[(spec1[:, 1] > min_intens), :]
spec2 = spec2[(spec2[:, 1] > min_intens), :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if ((mass_shift is not None) and (mass_shift != 0.0)):
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = (zero_pairs + nonzero_pairs)
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=(lambda x: x[2]), reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if ((not (m[0] in used1)) and (not (m[1] in used2))):
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
score = (score / max(np.sum((spec1[:, 1] ** 2)), np.sum((spec2[:, 1] ** 2))))
return (score, used_matches)
|
def cosine_score_hungarian(spec1, spec2, mass_shift, tol, min_intens=0):
"Taking full care of weighted bipartite matching problem.\n\n Use Hungarian algorithm (slow...)\n\n Args:\n --------\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n mass_shift: float\n Difference in parent mass of both spectra to account for. Set to 'None'\n when no shifting is desired --> back to normal cosine score.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n "
if ((spec1.shape[0] == 0) or (spec2.shape[0] == 0)):
return (0.0, [])
spec1[:, 1] = (spec1[:, 1] / max(spec1[:, 1]))
spec2[:, 1] = (spec2[:, 1] / max(spec2[:, 1]))
spec1 = spec1[(spec1[:, 1] > min_intens), :]
spec2 = spec2[(spec2[:, 1] > min_intens), :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if ((mass_shift is not None) and (mass_shift != 0.0)):
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = (zero_pairs + nonzero_pairs)
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=(lambda x: x[2]), reverse=True)
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if (len(matching_pairs) > 0):
for m in matching_pairs:
matrix[(list1.index(m[0]), list2.index(m[1]))] = (1 - m[2])
(row_ind, col_ind) = linear_sum_assignment(matrix)
score = (len(row_ind) - matrix[(row_ind, col_ind)].sum())
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
score = (score / max(np.sum((spec1[:, 1] ** 2)), np.sum((spec2[:, 1] ** 2))))
else:
score = 0.0
return (score, used_matches)
| 7,721,985,818,695,637,000
|
Taking full care of weighted bipartite matching problem.
Use Hungarian algorithm (slow...)
Args:
--------
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
mass_shift: float
Difference in parent mass of both spectra to account for. Set to 'None'
when no shifting is desired --> back to normal cosine score.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
|
matchms/old/ms_similarity_classical.py
|
cosine_score_hungarian
|
matchms/old-iomega-spec2vec
|
python
|
def cosine_score_hungarian(spec1, spec2, mass_shift, tol, min_intens=0):
"Taking full care of weighted bipartite matching problem.\n\n Use Hungarian algorithm (slow...)\n\n Args:\n --------\n spec1: Spectrum peaks and intensities as numpy array.\n spec2: Spectrum peaks and intensities as numpy array.\n mass_shift: float\n Difference in parent mass of both spectra to account for. Set to 'None'\n when no shifting is desired --> back to normal cosine score.\n tol: float\n Tolerance value to define how far two peaks can be apart to still count as match.\n min_intens: float\n Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower\n intensity will be ignored --> higher min_intens is faster, but less precise.\n "
if ((spec1.shape[0] == 0) or (spec2.shape[0] == 0)):
return (0.0, [])
spec1[:, 1] = (spec1[:, 1] / max(spec1[:, 1]))
spec2[:, 1] = (spec2[:, 1] / max(spec2[:, 1]))
spec1 = spec1[(spec1[:, 1] > min_intens), :]
spec2 = spec2[(spec2[:, 1] > min_intens), :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if ((mass_shift is not None) and (mass_shift != 0.0)):
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = (zero_pairs + nonzero_pairs)
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=(lambda x: x[2]), reverse=True)
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if (len(matching_pairs) > 0):
for m in matching_pairs:
matrix[(list1.index(m[0]), list2.index(m[1]))] = (1 - m[2])
(row_ind, col_ind) = linear_sum_assignment(matrix)
score = (len(row_ind) - matrix[(row_ind, col_ind)].sum())
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
score = (score / max(np.sum((spec1[:, 1] ** 2)), np.sum((spec2[:, 1] ** 2))))
else:
score = 0.0
return (score, used_matches)
|
def cosine_matrix_fast(spectra, tol, max_mz, min_mz=0):
'Calculates cosine similarity matrix.\n\n Be careful! Binning is here done by creating one-hot vectors.\n It is hence really actual "bining" and different from the tolerance-based\n approach used for the cosine_matrix or molnet_matrix!\n\n Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...\n '
for (i, spectrum) in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
spec[:, 1] = (spec[:, 1] / np.max(spec[:, 1]))
if (i == 0):
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return (1 - Cdist)
| -5,577,614,660,094,574,000
|
Calculates cosine similarity matrix.
Be careful! Binning is here done by creating one-hot vectors.
It is hence really actual "bining" and different from the tolerance-based
approach used for the cosine_matrix or molnet_matrix!
Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...
|
matchms/old/ms_similarity_classical.py
|
cosine_matrix_fast
|
matchms/old-iomega-spec2vec
|
python
|
def cosine_matrix_fast(spectra, tol, max_mz, min_mz=0):
'Calculates cosine similarity matrix.\n\n Be careful! Binning is here done by creating one-hot vectors.\n It is hence really actual "bining" and different from the tolerance-based\n approach used for the cosine_matrix or molnet_matrix!\n\n Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...\n '
for (i, spectrum) in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
spec[:, 1] = (spec[:, 1] / np.max(spec[:, 1]))
if (i == 0):
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return (1 - Cdist)
|
def cosine_score_matrix(spectra, tol, max_mz=1000.0, min_intens=0, mass_shifting=False, method='hungarian', num_workers=4, filename=None, safety_points=None):
'Create Matrix of all modified cosine similarities.\n\n Takes some time to calculate, so better only do it once and save as npy.\n\n Now implemented: parallelization of code using concurrent.futures and numba options.\n\n spectra: list\n List of spectra (of Spectrum class)\n tol: float\n Tolerance to still count peaks a match (mz +- tolerance).\n max_mz: float\n Maxium m-z mass to take into account\n #min_mz: float\n # Minimum m-z mass to take into account\n min_intens: float\n Sets the minimum relative intensity peaks must have to be looked at for\n potential matches.\n mass_shifting: bool\n Set to \'True\' if mass difference between spectra should be accounted for\n --> "modified cosine" score\n Set to \'False\' for --> "normal cosine" score\n method: \'greedy\', \'greedy-numba\', \'hungarian\'\n "greedy" will use Simon\'s molnet scoring which is faster than hungarian,\n but not 100% accurate\n regarding the weighted bipartite matching problem.\n "hungarian" will use the Hungarian algorithm, which is more accurate.\n Since its slower, numba is used here to compile in time.\n "greedy-numba" will use a (partly) numba compiled version of greedy.\n Much faster, but needs numba.\n num_workers: int\n Number of threads to use for calculation.\n filename: str/ None\n Filename to look for existing npy-file with molent matrix. Or, if not\n found, to use to save the newly calculated matrix.\n safety_points: int\n Number of safety points, i.e. number of times the modcos-matrix is saved\n during process. Set to \'None\' to avoid saving matrix on the way.\n '
if (filename is not None):
if (filename[(- 4):] != '.npy'):
filename = (filename + '.npy')
try:
print('Loading similarity scores from', filename)
modcos_sim = np.load(filename)
print('Loading min_match values from', (filename[:(- 4)] + '_matches.npy'))
modcos_matches = np.load((filename[:(- 4)] + '_matches.npy'))
diagonal = modcos_sim.diagonal()
if (np.min(diagonal) == 0):
print('Uncomplete cosine similarity scores found and loaded.')
missing_scores = np.where((diagonal == 0))[0].astype(int)
print('Missing cosine scores will be calculated.')
counter_total = int(((len(spectra) ** 2) / 2))
counter_init = (counter_total - np.sum((len(spectra) - missing_scores)))
print('About ', (100 * (counter_init / counter_total)), '% of the values already completed.')
collect_new_data = True
else:
print('Complete cosine similarity scores found and loaded.')
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print('Could not find file ', filename, 'or file', (filename[:(- 4)] + '_matches.npy'))
if mass_shifting:
print('Modified cosine scores will be calculated from scratch.')
else:
print('Cosine scores will be calculated from scratch.')
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if (counter_init == 0):
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if (safety_points is not None):
safety_save = int((((len(spectra) ** 2) / 2) / safety_points))
print('Calculate pairwise scores by', num_workers, 'number of workers.')
for i in missing_scores:
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[(spec1[:, 0] < max_mz), :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[(spec2[:, 0] < max_mz), :]
if mass_shifting:
mass_shift = (spectra[i].parent_mz - spectra[j].parent_mz)
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j, mass_shift, tol, min_intens, method, counter])
counter += 1
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for (m, future) in enumerate(modcos_pairs[0]):
(_, _, ind_i, ind_j, _, _, _, _, counting) = parameter_collection[m]
modcos_sim[(ind_i, ind_j)] = future.result()[0]
modcos_matches[(ind_i, ind_j)] = future.result()[1]
if ((filename is not None) and (safety_points is not None)):
if (((counting + 1) % safety_save) == 0):
np.save(filename, modcos_sim)
np.save((filename[:(- 4)] + '_matches.npy'), modcos_matches)
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[(i, j)] = modcos_sim[(j, i)]
modcos_matches[(i, j)] = modcos_matches[(j, i)]
if (filename is not None):
np.save(filename, modcos_sim)
np.save((filename[:(- 4)] + '_matches.npy'), modcos_matches)
return (modcos_sim, modcos_matches)
| -7,387,584,936,006,276,000
|
Create Matrix of all modified cosine similarities.
Takes some time to calculate, so better only do it once and save as npy.
Now implemented: parallelization of code using concurrent.futures and numba options.
spectra: list
List of spectra (of Spectrum class)
tol: float
Tolerance to still count peaks a match (mz +- tolerance).
max_mz: float
Maxium m-z mass to take into account
#min_mz: float
# Minimum m-z mass to take into account
min_intens: float
Sets the minimum relative intensity peaks must have to be looked at for
potential matches.
mass_shifting: bool
Set to 'True' if mass difference between spectra should be accounted for
--> "modified cosine" score
Set to 'False' for --> "normal cosine" score
method: 'greedy', 'greedy-numba', 'hungarian'
"greedy" will use Simon's molnet scoring which is faster than hungarian,
but not 100% accurate
regarding the weighted bipartite matching problem.
"hungarian" will use the Hungarian algorithm, which is more accurate.
Since its slower, numba is used here to compile in time.
"greedy-numba" will use a (partly) numba compiled version of greedy.
Much faster, but needs numba.
num_workers: int
Number of threads to use for calculation.
filename: str/ None
Filename to look for existing npy-file with molent matrix. Or, if not
found, to use to save the newly calculated matrix.
safety_points: int
Number of safety points, i.e. number of times the modcos-matrix is saved
during process. Set to 'None' to avoid saving matrix on the way.
|
matchms/old/ms_similarity_classical.py
|
cosine_score_matrix
|
matchms/old-iomega-spec2vec
|
python
|
def cosine_score_matrix(spectra, tol, max_mz=1000.0, min_intens=0, mass_shifting=False, method='hungarian', num_workers=4, filename=None, safety_points=None):
'Create Matrix of all modified cosine similarities.\n\n Takes some time to calculate, so better only do it once and save as npy.\n\n Now implemented: parallelization of code using concurrent.futures and numba options.\n\n spectra: list\n List of spectra (of Spectrum class)\n tol: float\n Tolerance to still count peaks a match (mz +- tolerance).\n max_mz: float\n Maxium m-z mass to take into account\n #min_mz: float\n # Minimum m-z mass to take into account\n min_intens: float\n Sets the minimum relative intensity peaks must have to be looked at for\n potential matches.\n mass_shifting: bool\n Set to \'True\' if mass difference between spectra should be accounted for\n --> "modified cosine" score\n Set to \'False\' for --> "normal cosine" score\n method: \'greedy\', \'greedy-numba\', \'hungarian\'\n "greedy" will use Simon\'s molnet scoring which is faster than hungarian,\n but not 100% accurate\n regarding the weighted bipartite matching problem.\n "hungarian" will use the Hungarian algorithm, which is more accurate.\n Since its slower, numba is used here to compile in time.\n "greedy-numba" will use a (partly) numba compiled version of greedy.\n Much faster, but needs numba.\n num_workers: int\n Number of threads to use for calculation.\n filename: str/ None\n Filename to look for existing npy-file with molent matrix. Or, if not\n found, to use to save the newly calculated matrix.\n safety_points: int\n Number of safety points, i.e. number of times the modcos-matrix is saved\n during process. Set to \'None\' to avoid saving matrix on the way.\n '
if (filename is not None):
if (filename[(- 4):] != '.npy'):
filename = (filename + '.npy')
try:
print('Loading similarity scores from', filename)
modcos_sim = np.load(filename)
print('Loading min_match values from', (filename[:(- 4)] + '_matches.npy'))
modcos_matches = np.load((filename[:(- 4)] + '_matches.npy'))
diagonal = modcos_sim.diagonal()
if (np.min(diagonal) == 0):
print('Uncomplete cosine similarity scores found and loaded.')
missing_scores = np.where((diagonal == 0))[0].astype(int)
print('Missing cosine scores will be calculated.')
counter_total = int(((len(spectra) ** 2) / 2))
counter_init = (counter_total - np.sum((len(spectra) - missing_scores)))
print('About ', (100 * (counter_init / counter_total)), '% of the values already completed.')
collect_new_data = True
else:
print('Complete cosine similarity scores found and loaded.')
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print('Could not find file ', filename, 'or file', (filename[:(- 4)] + '_matches.npy'))
if mass_shifting:
print('Modified cosine scores will be calculated from scratch.')
else:
print('Cosine scores will be calculated from scratch.')
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if (counter_init == 0):
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if (safety_points is not None):
safety_save = int((((len(spectra) ** 2) / 2) / safety_points))
print('Calculate pairwise scores by', num_workers, 'number of workers.')
for i in missing_scores:
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[(spec1[:, 0] < max_mz), :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[(spec2[:, 0] < max_mz), :]
if mass_shifting:
mass_shift = (spectra[i].parent_mz - spectra[j].parent_mz)
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j, mass_shift, tol, min_intens, method, counter])
counter += 1
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for (m, future) in enumerate(modcos_pairs[0]):
(_, _, ind_i, ind_j, _, _, _, _, counting) = parameter_collection[m]
modcos_sim[(ind_i, ind_j)] = future.result()[0]
modcos_matches[(ind_i, ind_j)] = future.result()[1]
if ((filename is not None) and (safety_points is not None)):
if (((counting + 1) % safety_save) == 0):
np.save(filename, modcos_sim)
np.save((filename[:(- 4)] + '_matches.npy'), modcos_matches)
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[(i, j)] = modcos_sim[(j, i)]
modcos_matches[(i, j)] = modcos_matches[(j, i)]
if (filename is not None):
np.save(filename, modcos_sim)
np.save((filename[:(- 4)] + '_matches.npy'), modcos_matches)
return (modcos_sim, modcos_matches)
|
def modcos_pair(X, len_spectra):
'Single molnet pair calculation\n '
(spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter) = X
if (method == 'greedy'):
(molnet_pair, used_matches) = cosine_score_greedy(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens, use_numba=False)
elif (method == 'greedy-numba'):
(molnet_pair, used_matches) = cosine_score_greedy(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens, use_numba=True)
elif (method == 'hungarian'):
(molnet_pair, used_matches) = cosine_score_hungarian(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens)
else:
print('Given method does not exist...')
if ((((counter + 1) % 1000) == 0) or (counter == (len_spectra - 1))):
print('\r', ' Calculated MolNet for pair {} -- {}'.format(i, j), '. ( ', np.round(((200 * (counter + 1)) / (len_spectra ** 2)), 2), ' % done).', end='')
return (molnet_pair, len(used_matches))
| 2,678,553,399,383,915,000
|
Single molnet pair calculation
|
matchms/old/ms_similarity_classical.py
|
modcos_pair
|
matchms/old-iomega-spec2vec
|
python
|
def modcos_pair(X, len_spectra):
'\n '
(spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter) = X
if (method == 'greedy'):
(molnet_pair, used_matches) = cosine_score_greedy(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens, use_numba=False)
elif (method == 'greedy-numba'):
(molnet_pair, used_matches) = cosine_score_greedy(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens, use_numba=True)
elif (method == 'hungarian'):
(molnet_pair, used_matches) = cosine_score_hungarian(spectra_i, spectra_j, mass_shift, tol, min_intens=min_intens)
else:
print('Given method does not exist...')
if ((((counter + 1) % 1000) == 0) or (counter == (len_spectra - 1))):
print('\r', ' Calculated MolNet for pair {} -- {}'.format(i, j), '. ( ', np.round(((200 * (counter + 1)) / (len_spectra ** 2)), 2), ' % done).', end=)
return (molnet_pair, len(used_matches))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.